1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 QLogic Corporation. All rights reserved. 24 */ 25 26 #include <qlge.h> 27 #include <sys/atomic.h> 28 #include <sys/strsubr.h> 29 #include <sys/pattr.h> 30 #include <netinet/in.h> 31 #include <netinet/ip.h> 32 #include <netinet/ip6.h> 33 #include <netinet/tcp.h> 34 #include <netinet/udp.h> 35 #include <inet/ip.h> 36 37 38 39 /* 40 * Local variables 41 */ 42 static struct ether_addr ql_ether_broadcast_addr = 43 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 44 static char version[] = "QLogic GLDv3 Driver " VERSIONSTR; 45 46 /* 47 * Local function prototypes 48 */ 49 static void ql_free_resources(dev_info_t *, qlge_t *); 50 static void ql_fini_kstats(qlge_t *); 51 static uint32_t ql_get_link_state(qlge_t *); 52 static void ql_read_conf(qlge_t *); 53 static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *, 54 ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *, 55 size_t, size_t, caddr_t *, ddi_dma_cookie_t *); 56 static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *); 57 static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int); 58 static int ql_route_initialize(qlge_t *); 59 static int ql_attach(dev_info_t *, ddi_attach_cmd_t); 60 static int ql_detach(dev_info_t *, ddi_detach_cmd_t); 61 static int ql_bringdown_adapter(qlge_t *); 62 static int ql_bringup_adapter(qlge_t *); 63 static int ql_asic_reset(qlge_t *); 64 static void ql_wake_mpi_reset_soft_intr(qlge_t *); 65 static void ql_stop_timer(qlge_t *qlge); 66 67 /* 68 * TX dma maping handlers allow multiple sscatter-gather lists 69 */ 70 ddi_dma_attr_t tx_mapping_dma_attr = { 71 DMA_ATTR_V0, /* dma_attr_version */ 72 QL_DMA_LOW_ADDRESS, /* low DMA address range */ 73 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */ 74 QL_DMA_XFER_COUNTER, /* DMA counter register */ 75 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */ 76 QL_DMA_BURSTSIZES, /* DMA burstsizes */ 77 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */ 78 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */ 79 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */ 80 QL_MAX_TX_DMA_HANDLES, /* s/g list length */ 81 QL_DMA_GRANULARITY, /* granularity of device */ 82 QL_DMA_XFER_FLAGS /* DMA transfer flags */ 83 }; 84 85 /* 86 * Receive buffers and Request/Response queues do not allow scatter-gather lists 87 */ 88 ddi_dma_attr_t dma_attr = { 89 DMA_ATTR_V0, /* dma_attr_version */ 90 QL_DMA_LOW_ADDRESS, /* low DMA address range */ 91 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */ 92 QL_DMA_XFER_COUNTER, /* DMA counter register */ 93 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment, default - 8 */ 94 QL_DMA_BURSTSIZES, /* DMA burstsizes */ 95 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */ 96 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */ 97 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */ 98 1, /* s/g list length, i.e no sg list */ 99 QL_DMA_GRANULARITY, /* granularity of device */ 100 QL_DMA_XFER_FLAGS /* DMA transfer flags */ 101 }; 102 103 /* 104 * DMA access attribute structure. 105 */ 106 /* device register access from host */ 107 ddi_device_acc_attr_t ql_dev_acc_attr = { 108 DDI_DEVICE_ATTR_V0, 109 DDI_STRUCTURE_LE_ACC, 110 DDI_STRICTORDER_ACC 111 }; 112 113 /* host ring descriptors */ 114 ddi_device_acc_attr_t ql_desc_acc_attr = { 115 DDI_DEVICE_ATTR_V0, 116 DDI_NEVERSWAP_ACC, 117 DDI_STRICTORDER_ACC 118 }; 119 120 /* host ring buffer */ 121 ddi_device_acc_attr_t ql_buf_acc_attr = { 122 DDI_DEVICE_ATTR_V0, 123 DDI_NEVERSWAP_ACC, 124 DDI_STRICTORDER_ACC 125 }; 126 127 /* 128 * Hash key table for Receive Side Scaling (RSS) support 129 */ 130 const uint8_t key_data[] = { 131 0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36, 132 0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f, 133 0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8, 134 0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49}; 135 136 /* 137 * Shadow Registers: 138 * Outbound queues have a consumer index that is maintained by the chip. 139 * Inbound queues have a producer index that is maintained by the chip. 140 * For lower overhead, these registers are "shadowed" to host memory 141 * which allows the device driver to track the queue progress without 142 * PCI reads. When an entry is placed on an inbound queue, the chip will 143 * update the relevant index register and then copy the value to the 144 * shadow register in host memory. 145 */ 146 147 static inline unsigned int 148 ql_read_sh_reg(const volatile void *addr) 149 { 150 return (*(volatile uint32_t *)addr); 151 } 152 153 /* 154 * Read 32 bit atomically 155 */ 156 uint32_t 157 ql_atomic_read_32(volatile uint32_t *target) 158 { 159 /* 160 * atomic_add_32_nv returns the new value after the add, 161 * we are adding 0 so we should get the original value 162 */ 163 return (atomic_add_32_nv(target, 0)); 164 } 165 166 /* 167 * Set 32 bit atomically 168 */ 169 void 170 ql_atomic_set_32(volatile uint32_t *target, uint32_t newval) 171 { 172 (void) atomic_swap_32(target, newval); 173 } 174 175 176 /* 177 * Setup device PCI configuration registers. 178 * Kernel context. 179 */ 180 static void 181 ql_pci_config(qlge_t *qlge) 182 { 183 uint16_t w; 184 185 qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle, 186 PCI_CONF_VENID); 187 qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle, 188 PCI_CONF_DEVID); 189 190 /* 191 * we want to respect framework's setting of PCI 192 * configuration space command register and also 193 * want to make sure that all bits of interest to us 194 * are properly set in PCI Command register(0x04). 195 * PCI_COMM_IO 0x1 I/O access enable 196 * PCI_COMM_MAE 0x2 Memory access enable 197 * PCI_COMM_ME 0x4 bus master enable 198 * PCI_COMM_MEMWR_INVAL 0x10 memory write and invalidate enable. 199 */ 200 w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM); 201 w = (uint16_t)(w & (~PCI_COMM_IO)); 202 w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME | 203 /* PCI_COMM_MEMWR_INVAL | */ 204 PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE); 205 206 pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w); 207 208 ql_dump_pci_config(qlge); 209 } 210 211 /* 212 * This routine parforms the neccessary steps to set GLD mac information 213 * such as Function number, xgmac mask and shift bits 214 */ 215 static int 216 ql_set_mac_info(qlge_t *qlge) 217 { 218 uint32_t value; 219 int rval = DDI_SUCCESS; 220 uint32_t fn0_net, fn1_net; 221 222 /* set default value */ 223 qlge->fn0_net = FN0_NET; 224 qlge->fn1_net = FN1_NET; 225 226 if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) { 227 cmn_err(CE_WARN, "%s(%d) read MPI register failed", 228 __func__, qlge->instance); 229 } else { 230 fn0_net = (value >> 1) & 0x07; 231 fn1_net = (value >> 5) & 0x07; 232 if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) { 233 cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n" 234 "nic0 function number %d," 235 "nic1 function number %d " 236 "use default\n", 237 __func__, qlge->instance, value, fn0_net, fn1_net); 238 } else { 239 qlge->fn0_net = fn0_net; 240 qlge->fn1_net = fn1_net; 241 } 242 } 243 244 /* Get the function number that the driver is associated with */ 245 value = ql_read_reg(qlge, REG_STATUS); 246 qlge->func_number = (uint8_t)((value >> 6) & 0x03); 247 QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n", 248 value, qlge->func_number)); 249 250 /* The driver is loaded on a non-NIC function? */ 251 if ((qlge->func_number != qlge->fn0_net) && 252 (qlge->func_number != qlge->fn1_net)) { 253 cmn_err(CE_WARN, 254 "Invalid function number = 0x%x\n", qlge->func_number); 255 return (DDI_FAILURE); 256 } 257 /* network port 0? */ 258 if (qlge->func_number == qlge->fn0_net) { 259 qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK; 260 qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS; 261 } else { 262 qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK; 263 qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS; 264 } 265 266 return (rval); 267 268 } 269 270 /* 271 * write to doorbell register 272 */ 273 void 274 ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data) 275 { 276 ddi_put32(qlge->dev_doorbell_reg_handle, addr, data); 277 } 278 279 /* 280 * read from doorbell register 281 */ 282 uint32_t 283 ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr) 284 { 285 uint32_t ret; 286 287 ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr); 288 289 return (ret); 290 } 291 292 /* 293 * This function waits for a specific bit to come ready 294 * in a given register. It is used mostly by the initialize 295 * process, but is also used in kernel thread API such as 296 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. 297 */ 298 static int 299 ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit) 300 { 301 uint32_t temp; 302 int count = UDELAY_COUNT; 303 304 while (count) { 305 temp = ql_read_reg(qlge, reg); 306 307 /* check for errors */ 308 if ((temp & err_bit) != 0) { 309 break; 310 } else if ((temp & bit) != 0) 311 return (DDI_SUCCESS); 312 qlge_delay(UDELAY_DELAY); 313 count--; 314 } 315 cmn_err(CE_WARN, 316 "Waiting for reg %x to come ready failed.", reg); 317 return (DDI_FAILURE); 318 } 319 320 /* 321 * The CFG register is used to download TX and RX control blocks 322 * to the chip. This function waits for an operation to complete. 323 */ 324 static int 325 ql_wait_cfg(qlge_t *qlge, uint32_t bit) 326 { 327 int count = UDELAY_COUNT; 328 uint32_t temp; 329 330 while (count) { 331 temp = ql_read_reg(qlge, REG_CONFIGURATION); 332 if ((temp & CFG_LE) != 0) { 333 break; 334 } 335 if ((temp & bit) == 0) 336 return (DDI_SUCCESS); 337 qlge_delay(UDELAY_DELAY); 338 count--; 339 } 340 cmn_err(CE_WARN, 341 "Waiting for cfg register bit %x failed.", bit); 342 return (DDI_FAILURE); 343 } 344 345 346 /* 347 * Used to issue init control blocks to hw. Maps control block, 348 * sets address, triggers download, waits for completion. 349 */ 350 static int 351 ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id) 352 { 353 int status = DDI_SUCCESS; 354 uint32_t mask; 355 uint32_t value; 356 357 status = ql_sem_spinlock(qlge, SEM_ICB_MASK); 358 if (status != DDI_SUCCESS) { 359 goto exit; 360 } 361 status = ql_wait_cfg(qlge, bit); 362 if (status != DDI_SUCCESS) { 363 goto exit; 364 } 365 366 ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr)); 367 ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr)); 368 369 mask = CFG_Q_MASK | (bit << 16); 370 value = bit | (q_id << CFG_Q_SHIFT); 371 ql_write_reg(qlge, REG_CONFIGURATION, (mask | value)); 372 373 /* 374 * Wait for the bit to clear after signaling hw. 375 */ 376 status = ql_wait_cfg(qlge, bit); 377 ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */ 378 379 exit: 380 return (status); 381 } 382 383 /* 384 * Initialize adapter instance 385 */ 386 static int 387 ql_init_instance(qlge_t *qlge) 388 { 389 int i; 390 391 /* Default value */ 392 qlge->mac_flags = QL_MAC_INIT; 393 qlge->mtu = ETHERMTU; /* set normal size as default */ 394 qlge->page_size = VM_PAGE_SIZE; /* default page size */ 395 /* Set up the default ring sizes. */ 396 qlge->tx_ring_size = NUM_TX_RING_ENTRIES; 397 qlge->rx_ring_size = NUM_RX_RING_ENTRIES; 398 399 /* Set up the coalescing parameters. */ 400 qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT; 401 qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT; 402 qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT; 403 qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT; 404 qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH; 405 qlge->ql_dbgprnt = 0; 406 #if QL_DEBUG 407 qlge->ql_dbgprnt = QL_DEBUG; 408 #endif /* QL_DEBUG */ 409 410 /* 411 * TODO: Should be obtained from configuration or based off 412 * number of active cpus SJP 4th Mar. 09 413 */ 414 qlge->tx_ring_count = 1; 415 qlge->rss_ring_count = 4; 416 qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count; 417 418 for (i = 0; i < MAX_RX_RINGS; i++) { 419 qlge->rx_polls[i] = 0; 420 qlge->rx_interrupts[i] = 0; 421 } 422 423 /* 424 * Set up the operating parameters. 425 */ 426 qlge->multicast_list_count = 0; 427 428 /* 429 * Set up the max number of unicast list 430 */ 431 qlge->unicst_total = MAX_UNICAST_LIST_SIZE; 432 qlge->unicst_avail = MAX_UNICAST_LIST_SIZE; 433 434 /* 435 * read user defined properties in .conf file 436 */ 437 ql_read_conf(qlge); /* mtu, pause, LSO etc */ 438 439 QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu)); 440 441 /* choose Memory Space mapping and get Vendor Id, Device ID etc */ 442 ql_pci_config(qlge); 443 qlge->ip_hdr_offset = 0; 444 445 if (qlge->device_id == 0x8000) { 446 /* Schultz card */ 447 qlge->cfg_flags |= CFG_CHIP_8100; 448 /* enable just ipv4 chksum offload for Schultz */ 449 qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4; 450 /* 451 * Schultz firmware does not do pseduo IP header checksum 452 * calculation, needed to be done by driver 453 */ 454 qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM; 455 if (qlge->lso_enable) 456 qlge->cfg_flags |= CFG_LSO; 457 qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER; 458 /* Schultz must split packet header */ 459 qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER; 460 qlge->max_read_mbx = 5; 461 qlge->ip_hdr_offset = 2; 462 } 463 464 /* Set Function Number and some of the iocb mac information */ 465 if (ql_set_mac_info(qlge) != DDI_SUCCESS) 466 return (DDI_FAILURE); 467 468 /* Read network settings from NVRAM */ 469 /* After nvram is read successfully, update dev_addr */ 470 if (ql_get_flash_params(qlge) == DDI_SUCCESS) { 471 QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number)); 472 for (i = 0; i < ETHERADDRL; i++) { 473 qlge->dev_addr.ether_addr_octet[i] = 474 qlge->nic_config.factory_MAC[i]; 475 } 476 } else { 477 cmn_err(CE_WARN, "%s(%d): Failed to read flash memory", 478 __func__, qlge->instance); 479 return (DDI_FAILURE); 480 } 481 482 bcopy(qlge->dev_addr.ether_addr_octet, 483 qlge->unicst_addr[0].addr.ether_addr_octet, 484 ETHERADDRL); 485 QL_DUMP(DBG_INIT, "\t flash mac address dump:\n", 486 &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL); 487 488 qlge->port_link_state = LS_DOWN; 489 490 return (DDI_SUCCESS); 491 } 492 493 494 /* 495 * This hardware semaphore provides the mechanism for exclusive access to 496 * resources shared between the NIC driver, MPI firmware, 497 * FCOE firmware and the FC driver. 498 */ 499 static int 500 ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask) 501 { 502 uint32_t sem_bits = 0; 503 504 switch (sem_mask) { 505 case SEM_XGMAC0_MASK: 506 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; 507 break; 508 case SEM_XGMAC1_MASK: 509 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; 510 break; 511 case SEM_ICB_MASK: 512 sem_bits = SEM_SET << SEM_ICB_SHIFT; 513 break; 514 case SEM_MAC_ADDR_MASK: 515 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; 516 break; 517 case SEM_FLASH_MASK: 518 sem_bits = SEM_SET << SEM_FLASH_SHIFT; 519 break; 520 case SEM_PROBE_MASK: 521 sem_bits = SEM_SET << SEM_PROBE_SHIFT; 522 break; 523 case SEM_RT_IDX_MASK: 524 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; 525 break; 526 case SEM_PROC_REG_MASK: 527 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; 528 break; 529 default: 530 cmn_err(CE_WARN, "Bad Semaphore mask!."); 531 return (DDI_FAILURE); 532 } 533 534 ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask); 535 return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits)); 536 } 537 538 /* 539 * Lock a specific bit of Semaphore register to gain 540 * access to a particular shared register 541 */ 542 int 543 ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask) 544 { 545 unsigned int wait_count = 30; 546 547 while (wait_count) { 548 if (!ql_sem_trylock(qlge, sem_mask)) 549 return (DDI_SUCCESS); 550 qlge_delay(100); 551 wait_count--; 552 } 553 cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ", 554 __func__, qlge->instance, sem_mask); 555 return (DDI_FAILURE); 556 } 557 558 /* 559 * Unock a specific bit of Semaphore register to release 560 * access to a particular shared register 561 */ 562 void 563 ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask) 564 { 565 ql_write_reg(qlge, REG_SEMAPHORE, sem_mask); 566 (void) ql_read_reg(qlge, REG_SEMAPHORE); /* flush */ 567 } 568 569 /* 570 * Get property value from configuration file. 571 * 572 * string = property string pointer. 573 * 574 * Returns: 575 * 0xFFFFFFFF = no property else property value. 576 */ 577 static uint32_t 578 ql_get_prop(qlge_t *qlge, char *string) 579 { 580 char buf[256]; 581 uint32_t data; 582 583 /* Get adapter instance parameter. */ 584 (void) sprintf(buf, "hba%d-%s", qlge->instance, string); 585 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf, 586 (int)0xffffffff); 587 588 /* Adapter instance parameter found? */ 589 if (data == 0xffffffff) { 590 /* No, get default parameter. */ 591 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, 592 string, (int)0xffffffff); 593 } 594 595 return (data); 596 } 597 598 /* 599 * Read user setting from configuration file. 600 */ 601 static void 602 ql_read_conf(qlge_t *qlge) 603 { 604 uint32_t data; 605 606 /* clear configuration flags */ 607 qlge->cfg_flags = 0; 608 609 /* Get default rx_copy enable/disable. */ 610 if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff || 611 data == 0) { 612 qlge->cfg_flags &= ~CFG_RX_COPY_MODE; 613 qlge->rx_copy = B_FALSE; 614 QL_PRINT(DBG_INIT, ("rx copy mode disabled\n")); 615 } else if (data == 1) { 616 qlge->cfg_flags |= CFG_RX_COPY_MODE; 617 qlge->rx_copy = B_TRUE; 618 QL_PRINT(DBG_INIT, ("rx copy mode enabled\n")); 619 } 620 621 /* Get mtu packet size. */ 622 data = ql_get_prop(qlge, "mtu"); 623 if ((data == ETHERMTU) || (data == JUMBO_MTU)) { 624 if (qlge->mtu != data) { 625 qlge->mtu = data; 626 cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu); 627 } 628 } 629 630 /* Get pause mode, default is Per Priority mode. */ 631 qlge->pause = PAUSE_MODE_PER_PRIORITY; 632 data = ql_get_prop(qlge, "pause"); 633 if (data <= PAUSE_MODE_PER_PRIORITY) { 634 if (qlge->pause != data) { 635 qlge->pause = data; 636 cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause); 637 } 638 } 639 640 /* Get tx_max_coalesced_frames. */ 641 qlge->tx_max_coalesced_frames = 5; 642 data = ql_get_prop(qlge, "tx_max_coalesced_frames"); 643 /* if data is valid */ 644 if ((data != 0xffffffff) && data) { 645 if (qlge->tx_max_coalesced_frames != data) { 646 qlge->tx_max_coalesced_frames = (uint16_t)data; 647 } 648 } 649 650 /* Get split header payload_copy_thresh. */ 651 qlge->payload_copy_thresh = 6; 652 data = ql_get_prop(qlge, "payload_copy_thresh"); 653 /* if data is valid */ 654 if ((data != 0xffffffff) && (data != 0)) { 655 if (qlge->payload_copy_thresh != data) { 656 qlge->payload_copy_thresh = data; 657 } 658 } 659 660 /* large send offload (LSO) capability. */ 661 qlge->lso_enable = 1; 662 data = ql_get_prop(qlge, "lso_enable"); 663 /* if data is valid */ 664 if (data != 0xffffffff) { 665 if (qlge->lso_enable != data) { 666 qlge->lso_enable = (uint16_t)data; 667 } 668 } 669 } 670 671 /* 672 * Enable global interrupt 673 */ 674 static void 675 ql_enable_global_interrupt(qlge_t *qlge) 676 { 677 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, 678 (INTR_EN_EI << 16) | INTR_EN_EI); 679 qlge->flags |= INTERRUPTS_ENABLED; 680 } 681 682 /* 683 * Disable global interrupt 684 */ 685 static void 686 ql_disable_global_interrupt(qlge_t *qlge) 687 { 688 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16)); 689 qlge->flags &= ~INTERRUPTS_ENABLED; 690 } 691 692 /* 693 * Enable one ring interrupt 694 */ 695 void 696 ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr) 697 { 698 struct intr_ctx *ctx = qlge->intr_ctx + intr; 699 700 QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n", 701 __func__, qlge->instance, intr, ctx->irq_cnt)); 702 703 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) { 704 /* 705 * Always enable if we're MSIX multi interrupts and 706 * it's not the default (zeroeth) interrupt. 707 */ 708 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask); 709 return; 710 } 711 712 if (!atomic_dec_32_nv(&ctx->irq_cnt)) { 713 mutex_enter(&qlge->hw_mutex); 714 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask); 715 mutex_exit(&qlge->hw_mutex); 716 QL_PRINT(DBG_INTR, 717 ("%s(%d): write %x to intr enable register \n", 718 __func__, qlge->instance, ctx->intr_en_mask)); 719 } 720 } 721 722 /* 723 * ql_forced_disable_completion_interrupt 724 * Used by call from OS, may be called without 725 * a pending interrupt so force the disable 726 */ 727 uint32_t 728 ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr) 729 { 730 uint32_t var = 0; 731 struct intr_ctx *ctx = qlge->intr_ctx + intr; 732 733 QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n", 734 __func__, qlge->instance, intr, ctx->irq_cnt)); 735 736 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) { 737 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask); 738 var = ql_read_reg(qlge, REG_STATUS); 739 return (var); 740 } 741 742 mutex_enter(&qlge->hw_mutex); 743 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask); 744 var = ql_read_reg(qlge, REG_STATUS); 745 mutex_exit(&qlge->hw_mutex); 746 747 return (var); 748 } 749 750 /* 751 * Disable a completion interrupt 752 */ 753 void 754 ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr) 755 { 756 struct intr_ctx *ctx; 757 758 ctx = qlge->intr_ctx + intr; 759 QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n", 760 __func__, qlge->instance, intr, ctx->irq_cnt)); 761 /* 762 * HW disables for us if we're MSIX multi interrupts and 763 * it's not the default (zeroeth) interrupt. 764 */ 765 if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0)) 766 return; 767 768 if (ql_atomic_read_32(&ctx->irq_cnt) == 0) { 769 mutex_enter(&qlge->hw_mutex); 770 ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask); 771 mutex_exit(&qlge->hw_mutex); 772 } 773 atomic_inc_32(&ctx->irq_cnt); 774 } 775 776 /* 777 * Enable all completion interrupts 778 */ 779 static void 780 ql_enable_all_completion_interrupts(qlge_t *qlge) 781 { 782 int i; 783 uint32_t value = 1; 784 785 for (i = 0; i < qlge->intr_cnt; i++) { 786 /* 787 * Set the count to 1 for Legacy / MSI interrupts or for the 788 * default interrupt (0) 789 */ 790 if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) { 791 ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value); 792 } 793 ql_enable_completion_interrupt(qlge, i); 794 } 795 } 796 797 /* 798 * Disable all completion interrupts 799 */ 800 static void 801 ql_disable_all_completion_interrupts(qlge_t *qlge) 802 { 803 int i; 804 uint32_t value = 0; 805 806 for (i = 0; i < qlge->intr_cnt; i++) { 807 808 /* 809 * Set the count to 0 for Legacy / MSI interrupts or for the 810 * default interrupt (0) 811 */ 812 if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) 813 ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value); 814 815 ql_disable_completion_interrupt(qlge, i); 816 } 817 } 818 819 /* 820 * Update small buffer queue producer index 821 */ 822 static void 823 ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring) 824 { 825 /* Update the buffer producer index */ 826 QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n", 827 rx_ring->sbq_prod_idx)); 828 ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg, 829 rx_ring->sbq_prod_idx); 830 } 831 832 /* 833 * Update large buffer queue producer index 834 */ 835 static void 836 ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring) 837 { 838 /* Update the buffer producer index */ 839 QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n", 840 rx_ring->lbq_prod_idx)); 841 ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg, 842 rx_ring->lbq_prod_idx); 843 } 844 845 /* 846 * Adds a small buffer descriptor to end of its in use list, 847 * assumes sbq_lock is already taken 848 */ 849 static void 850 ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring, 851 struct bq_desc *sbq_desc) 852 { 853 uint32_t inuse_idx = rx_ring->sbq_use_tail; 854 855 rx_ring->sbuf_in_use[inuse_idx] = sbq_desc; 856 inuse_idx++; 857 if (inuse_idx >= rx_ring->sbq_len) 858 inuse_idx = 0; 859 rx_ring->sbq_use_tail = inuse_idx; 860 atomic_inc_32(&rx_ring->sbuf_in_use_count); 861 ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len); 862 } 863 864 /* 865 * Get a small buffer descriptor from its in use list 866 */ 867 static struct bq_desc * 868 ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring) 869 { 870 struct bq_desc *sbq_desc = NULL; 871 uint32_t inuse_idx; 872 873 /* Pick from head of in use list */ 874 inuse_idx = rx_ring->sbq_use_head; 875 sbq_desc = rx_ring->sbuf_in_use[inuse_idx]; 876 rx_ring->sbuf_in_use[inuse_idx] = NULL; 877 878 if (sbq_desc != NULL) { 879 inuse_idx++; 880 if (inuse_idx >= rx_ring->sbq_len) 881 inuse_idx = 0; 882 rx_ring->sbq_use_head = inuse_idx; 883 atomic_dec_32(&rx_ring->sbuf_in_use_count); 884 atomic_inc_32(&rx_ring->rx_indicate); 885 sbq_desc->upl_inuse = 1; 886 /* if mp is NULL */ 887 if (sbq_desc->mp == NULL) { 888 /* try to remap mp again */ 889 sbq_desc->mp = 890 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr), 891 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle); 892 } 893 } 894 895 return (sbq_desc); 896 } 897 898 /* 899 * Add a small buffer descriptor to its free list 900 */ 901 static void 902 ql_add_sbuf_to_free_list(struct rx_ring *rx_ring, 903 struct bq_desc *sbq_desc) 904 { 905 uint32_t free_idx; 906 907 /* Add to the end of free list */ 908 free_idx = rx_ring->sbq_free_tail; 909 rx_ring->sbuf_free[free_idx] = sbq_desc; 910 ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len); 911 free_idx++; 912 if (free_idx >= rx_ring->sbq_len) 913 free_idx = 0; 914 rx_ring->sbq_free_tail = free_idx; 915 atomic_inc_32(&rx_ring->sbuf_free_count); 916 } 917 918 /* 919 * Get a small buffer descriptor from its free list 920 */ 921 static struct bq_desc * 922 ql_get_sbuf_from_free_list(struct rx_ring *rx_ring) 923 { 924 struct bq_desc *sbq_desc; 925 uint32_t free_idx; 926 927 free_idx = rx_ring->sbq_free_head; 928 /* Pick from top of free list */ 929 sbq_desc = rx_ring->sbuf_free[free_idx]; 930 rx_ring->sbuf_free[free_idx] = NULL; 931 if (sbq_desc != NULL) { 932 free_idx++; 933 if (free_idx >= rx_ring->sbq_len) 934 free_idx = 0; 935 rx_ring->sbq_free_head = free_idx; 936 atomic_dec_32(&rx_ring->sbuf_free_count); 937 ASSERT(rx_ring->sbuf_free_count != 0); 938 } 939 return (sbq_desc); 940 } 941 942 /* 943 * Add a large buffer descriptor to its in use list 944 */ 945 static void 946 ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring, 947 struct bq_desc *lbq_desc) 948 { 949 uint32_t inuse_idx; 950 951 inuse_idx = rx_ring->lbq_use_tail; 952 953 rx_ring->lbuf_in_use[inuse_idx] = lbq_desc; 954 inuse_idx++; 955 if (inuse_idx >= rx_ring->lbq_len) 956 inuse_idx = 0; 957 rx_ring->lbq_use_tail = inuse_idx; 958 atomic_inc_32(&rx_ring->lbuf_in_use_count); 959 } 960 961 /* 962 * Get a large buffer descriptor from in use list 963 */ 964 static struct bq_desc * 965 ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring) 966 { 967 struct bq_desc *lbq_desc; 968 uint32_t inuse_idx; 969 970 /* Pick from head of in use list */ 971 inuse_idx = rx_ring->lbq_use_head; 972 lbq_desc = rx_ring->lbuf_in_use[inuse_idx]; 973 rx_ring->lbuf_in_use[inuse_idx] = NULL; 974 975 if (lbq_desc != NULL) { 976 inuse_idx++; 977 if (inuse_idx >= rx_ring->lbq_len) 978 inuse_idx = 0; 979 rx_ring->lbq_use_head = inuse_idx; 980 atomic_dec_32(&rx_ring->lbuf_in_use_count); 981 atomic_inc_32(&rx_ring->rx_indicate); 982 lbq_desc->upl_inuse = 1; 983 984 /* if mp is NULL */ 985 if (lbq_desc->mp == NULL) { 986 /* try to remap mp again */ 987 lbq_desc->mp = 988 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr), 989 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle); 990 } 991 } 992 return (lbq_desc); 993 } 994 995 /* 996 * Add a large buffer descriptor to free list 997 */ 998 static void 999 ql_add_lbuf_to_free_list(struct rx_ring *rx_ring, 1000 struct bq_desc *lbq_desc) 1001 { 1002 uint32_t free_idx; 1003 1004 /* Add to the end of free list */ 1005 free_idx = rx_ring->lbq_free_tail; 1006 rx_ring->lbuf_free[free_idx] = lbq_desc; 1007 free_idx++; 1008 if (free_idx >= rx_ring->lbq_len) 1009 free_idx = 0; 1010 rx_ring->lbq_free_tail = free_idx; 1011 atomic_inc_32(&rx_ring->lbuf_free_count); 1012 ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len); 1013 } 1014 1015 /* 1016 * Get a large buffer descriptor from its free list 1017 */ 1018 static struct bq_desc * 1019 ql_get_lbuf_from_free_list(struct rx_ring *rx_ring) 1020 { 1021 struct bq_desc *lbq_desc; 1022 uint32_t free_idx; 1023 1024 free_idx = rx_ring->lbq_free_head; 1025 /* Pick from head of free list */ 1026 lbq_desc = rx_ring->lbuf_free[free_idx]; 1027 rx_ring->lbuf_free[free_idx] = NULL; 1028 1029 if (lbq_desc != NULL) { 1030 free_idx++; 1031 if (free_idx >= rx_ring->lbq_len) 1032 free_idx = 0; 1033 rx_ring->lbq_free_head = free_idx; 1034 atomic_dec_32(&rx_ring->lbuf_free_count); 1035 ASSERT(rx_ring->lbuf_free_count != 0); 1036 } 1037 return (lbq_desc); 1038 } 1039 1040 /* 1041 * Add a small buffer descriptor to free list 1042 */ 1043 static void 1044 ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory) 1045 { 1046 struct rx_ring *rx_ring = sbq_desc->rx_ring; 1047 uint64_t *sbq_entry; 1048 qlge_t *qlge = (qlge_t *)rx_ring->qlge; 1049 /* 1050 * Sync access 1051 */ 1052 mutex_enter(&rx_ring->sbq_lock); 1053 1054 sbq_desc->upl_inuse = 0; 1055 1056 /* 1057 * If we are freeing the buffers as a result of adapter unload, get out 1058 */ 1059 if ((sbq_desc->free_buf != NULL) || 1060 (qlge->mac_flags == QL_MAC_DETACH)) { 1061 if (sbq_desc->free_buf == NULL) 1062 atomic_dec_32(&rx_ring->rx_indicate); 1063 mutex_exit(&rx_ring->sbq_lock); 1064 return; 1065 } 1066 #ifdef QLGE_LOAD_UNLOAD 1067 if (rx_ring->rx_indicate == 0) 1068 cmn_err(CE_WARN, "sbq: indicate wrong"); 1069 #endif 1070 #ifdef QLGE_TRACK_BUFFER_USAGE 1071 uint32_t sb_consumer_idx; 1072 uint32_t sb_producer_idx; 1073 uint32_t num_free_buffers; 1074 uint32_t temp; 1075 1076 temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg); 1077 sb_producer_idx = temp & 0x0000ffff; 1078 sb_consumer_idx = (temp >> 16); 1079 1080 if (sb_consumer_idx > sb_producer_idx) 1081 num_free_buffers = NUM_SMALL_BUFFERS - 1082 (sb_consumer_idx - sb_producer_idx); 1083 else 1084 num_free_buffers = sb_producer_idx - sb_consumer_idx; 1085 1086 if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id]) 1087 qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers; 1088 1089 #endif 1090 1091 ASSERT(sbq_desc->mp == NULL); 1092 1093 #ifdef QLGE_LOAD_UNLOAD 1094 if (rx_ring->rx_indicate > 0xFF000000) 1095 cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d," 1096 " sbq_desc index %d.", 1097 rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags, 1098 sbq_desc->index); 1099 #endif 1100 if (alloc_memory) { 1101 sbq_desc->mp = 1102 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr), 1103 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle); 1104 if (sbq_desc->mp == NULL) { 1105 rx_ring->rx_failed_sbq_allocs++; 1106 } 1107 } 1108 1109 /* Got the packet from the stack decrement rx_indicate count */ 1110 atomic_dec_32(&rx_ring->rx_indicate); 1111 1112 ql_add_sbuf_to_free_list(rx_ring, sbq_desc); 1113 1114 /* Rearm if possible */ 1115 if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) && 1116 (qlge->mac_flags == QL_MAC_STARTED)) { 1117 sbq_entry = rx_ring->sbq_dma.vaddr; 1118 sbq_entry += rx_ring->sbq_prod_idx; 1119 1120 while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) { 1121 /* Get first one from free list */ 1122 sbq_desc = ql_get_sbuf_from_free_list(rx_ring); 1123 1124 *sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr); 1125 sbq_entry++; 1126 rx_ring->sbq_prod_idx++; 1127 if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) { 1128 rx_ring->sbq_prod_idx = 0; 1129 sbq_entry = rx_ring->sbq_dma.vaddr; 1130 } 1131 /* Add to end of in use list */ 1132 ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc); 1133 } 1134 1135 /* Update small buffer queue producer index */ 1136 ql_update_sbq_prod_idx(qlge, rx_ring); 1137 } 1138 1139 mutex_exit(&rx_ring->sbq_lock); 1140 QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n", 1141 __func__, qlge->instance, rx_ring->sbuf_free_count)); 1142 } 1143 1144 /* 1145 * rx recycle call back function 1146 */ 1147 static void 1148 ql_release_to_sbuf_free_list(caddr_t p) 1149 { 1150 struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p; 1151 1152 if (sbq_desc == NULL) 1153 return; 1154 ql_refill_sbuf_free_list(sbq_desc, B_TRUE); 1155 } 1156 1157 /* 1158 * Add a large buffer descriptor to free list 1159 */ 1160 static void 1161 ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory) 1162 { 1163 struct rx_ring *rx_ring = lbq_desc->rx_ring; 1164 uint64_t *lbq_entry; 1165 qlge_t *qlge = rx_ring->qlge; 1166 1167 /* Sync access */ 1168 mutex_enter(&rx_ring->lbq_lock); 1169 1170 lbq_desc->upl_inuse = 0; 1171 /* 1172 * If we are freeing the buffers as a result of adapter unload, get out 1173 */ 1174 if ((lbq_desc->free_buf != NULL) || 1175 (qlge->mac_flags == QL_MAC_DETACH)) { 1176 if (lbq_desc->free_buf == NULL) 1177 atomic_dec_32(&rx_ring->rx_indicate); 1178 mutex_exit(&rx_ring->lbq_lock); 1179 return; 1180 } 1181 #ifdef QLGE_LOAD_UNLOAD 1182 if (rx_ring->rx_indicate == 0) 1183 cmn_err(CE_WARN, "lbq: indicate wrong"); 1184 #endif 1185 #ifdef QLGE_TRACK_BUFFER_USAGE 1186 uint32_t lb_consumer_idx; 1187 uint32_t lb_producer_idx; 1188 uint32_t num_free_buffers; 1189 uint32_t temp; 1190 1191 temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg); 1192 1193 lb_producer_idx = temp & 0x0000ffff; 1194 lb_consumer_idx = (temp >> 16); 1195 1196 if (lb_consumer_idx > lb_producer_idx) 1197 num_free_buffers = NUM_LARGE_BUFFERS - 1198 (lb_consumer_idx - lb_producer_idx); 1199 else 1200 num_free_buffers = lb_producer_idx - lb_consumer_idx; 1201 1202 if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) { 1203 qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers; 1204 } 1205 #endif 1206 1207 ASSERT(lbq_desc->mp == NULL); 1208 #ifdef QLGE_LOAD_UNLOAD 1209 if (rx_ring->rx_indicate > 0xFF000000) 1210 cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d," 1211 "lbq_desc index %d", 1212 rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags, 1213 lbq_desc->index); 1214 #endif 1215 if (alloc_memory) { 1216 lbq_desc->mp = 1217 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr), 1218 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle); 1219 if (lbq_desc->mp == NULL) { 1220 rx_ring->rx_failed_lbq_allocs++; 1221 } 1222 } 1223 1224 /* Got the packet from the stack decrement rx_indicate count */ 1225 atomic_dec_32(&rx_ring->rx_indicate); 1226 1227 ql_add_lbuf_to_free_list(rx_ring, lbq_desc); 1228 1229 /* Rearm if possible */ 1230 if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) && 1231 (qlge->mac_flags == QL_MAC_STARTED)) { 1232 lbq_entry = rx_ring->lbq_dma.vaddr; 1233 lbq_entry += rx_ring->lbq_prod_idx; 1234 while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) { 1235 /* Get first one from free list */ 1236 lbq_desc = ql_get_lbuf_from_free_list(rx_ring); 1237 1238 *lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr); 1239 lbq_entry++; 1240 rx_ring->lbq_prod_idx++; 1241 if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) { 1242 rx_ring->lbq_prod_idx = 0; 1243 lbq_entry = rx_ring->lbq_dma.vaddr; 1244 } 1245 1246 /* Add to end of in use list */ 1247 ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc); 1248 } 1249 1250 /* Update large buffer queue producer index */ 1251 ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring); 1252 } 1253 1254 mutex_exit(&rx_ring->lbq_lock); 1255 QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n", 1256 __func__, rx_ring->lbuf_free_count)); 1257 } 1258 /* 1259 * rx recycle call back function 1260 */ 1261 static void 1262 ql_release_to_lbuf_free_list(caddr_t p) 1263 { 1264 struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p; 1265 1266 if (lbq_desc == NULL) 1267 return; 1268 ql_refill_lbuf_free_list(lbq_desc, B_TRUE); 1269 } 1270 1271 /* 1272 * free small buffer queue buffers 1273 */ 1274 static void 1275 ql_free_sbq_buffers(struct rx_ring *rx_ring) 1276 { 1277 struct bq_desc *sbq_desc; 1278 uint32_t i; 1279 uint32_t j = rx_ring->sbq_free_head; 1280 int force_cnt = 0; 1281 1282 for (i = 0; i < rx_ring->sbuf_free_count; i++) { 1283 sbq_desc = rx_ring->sbuf_free[j]; 1284 sbq_desc->free_buf = 1; 1285 j++; 1286 if (j >= rx_ring->sbq_len) { 1287 j = 0; 1288 } 1289 if (sbq_desc->mp != NULL) { 1290 freemsg(sbq_desc->mp); 1291 sbq_desc->mp = NULL; 1292 } 1293 } 1294 rx_ring->sbuf_free_count = 0; 1295 1296 j = rx_ring->sbq_use_head; 1297 for (i = 0; i < rx_ring->sbuf_in_use_count; i++) { 1298 sbq_desc = rx_ring->sbuf_in_use[j]; 1299 sbq_desc->free_buf = 1; 1300 j++; 1301 if (j >= rx_ring->sbq_len) { 1302 j = 0; 1303 } 1304 if (sbq_desc->mp != NULL) { 1305 freemsg(sbq_desc->mp); 1306 sbq_desc->mp = NULL; 1307 } 1308 } 1309 rx_ring->sbuf_in_use_count = 0; 1310 1311 sbq_desc = &rx_ring->sbq_desc[0]; 1312 for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) { 1313 /* 1314 * Set flag so that the callback does not allocate a new buffer 1315 */ 1316 sbq_desc->free_buf = 1; 1317 if (sbq_desc->upl_inuse != 0) { 1318 force_cnt++; 1319 } 1320 if (sbq_desc->bd_dma.dma_handle != NULL) { 1321 ql_free_phys(&sbq_desc->bd_dma.dma_handle, 1322 &sbq_desc->bd_dma.acc_handle); 1323 sbq_desc->bd_dma.dma_handle = NULL; 1324 sbq_desc->bd_dma.acc_handle = NULL; 1325 } 1326 } 1327 #ifdef QLGE_LOAD_UNLOAD 1328 cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n", 1329 rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt); 1330 #endif 1331 if (rx_ring->sbuf_in_use != NULL) { 1332 kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len * 1333 sizeof (struct bq_desc *))); 1334 rx_ring->sbuf_in_use = NULL; 1335 } 1336 1337 if (rx_ring->sbuf_free != NULL) { 1338 kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len * 1339 sizeof (struct bq_desc *))); 1340 rx_ring->sbuf_free = NULL; 1341 } 1342 } 1343 1344 /* Allocate small buffers */ 1345 static int 1346 ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring) 1347 { 1348 struct bq_desc *sbq_desc; 1349 int i; 1350 ddi_dma_cookie_t dma_cookie; 1351 1352 rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len * 1353 sizeof (struct bq_desc *), KM_NOSLEEP); 1354 if (rx_ring->sbuf_free == NULL) { 1355 cmn_err(CE_WARN, 1356 "!%s: sbuf_free_list alloc: failed", 1357 __func__); 1358 rx_ring->sbuf_free_count = 0; 1359 goto alloc_sbuf_err; 1360 } 1361 1362 rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len * 1363 sizeof (struct bq_desc *), KM_NOSLEEP); 1364 if (rx_ring->sbuf_in_use == NULL) { 1365 cmn_err(CE_WARN, 1366 "!%s: sbuf_inuse_list alloc: failed", 1367 __func__); 1368 rx_ring->sbuf_in_use_count = 0; 1369 goto alloc_sbuf_err; 1370 } 1371 rx_ring->sbq_use_head = 0; 1372 rx_ring->sbq_use_tail = 0; 1373 rx_ring->sbq_free_head = 0; 1374 rx_ring->sbq_free_tail = 0; 1375 sbq_desc = &rx_ring->sbq_desc[0]; 1376 1377 for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) { 1378 /* Allocate buffer */ 1379 if (ql_alloc_phys(qlge->dip, &sbq_desc->bd_dma.dma_handle, 1380 &ql_buf_acc_attr, 1381 DDI_DMA_READ | DDI_DMA_STREAMING, 1382 &sbq_desc->bd_dma.acc_handle, 1383 (size_t)rx_ring->sbq_buf_size, /* mem size */ 1384 (size_t)0, /* default alignment */ 1385 (caddr_t *)&sbq_desc->bd_dma.vaddr, 1386 &dma_cookie) != 0) { 1387 cmn_err(CE_WARN, 1388 "!%s: ddi_dma_alloc_handle: failed", 1389 __func__); 1390 goto alloc_sbuf_err; 1391 } 1392 1393 /* Set context for Return buffer callback */ 1394 sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress; 1395 sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list; 1396 sbq_desc->rx_recycle.free_arg = (caddr_t)sbq_desc; 1397 sbq_desc->rx_ring = rx_ring; 1398 sbq_desc->upl_inuse = 0; 1399 sbq_desc->free_buf = 0; 1400 1401 sbq_desc->mp = 1402 desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr), 1403 rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle); 1404 if (sbq_desc->mp == NULL) { 1405 cmn_err(CE_WARN, "%s: desballoc() failed", __func__); 1406 goto alloc_sbuf_err; 1407 } 1408 ql_add_sbuf_to_free_list(rx_ring, sbq_desc); 1409 } 1410 1411 return (DDI_SUCCESS); 1412 1413 alloc_sbuf_err: 1414 ql_free_sbq_buffers(rx_ring); 1415 return (DDI_FAILURE); 1416 } 1417 1418 static void 1419 ql_free_lbq_buffers(struct rx_ring *rx_ring) 1420 { 1421 struct bq_desc *lbq_desc; 1422 uint32_t i, j; 1423 int force_cnt = 0; 1424 1425 j = rx_ring->lbq_free_head; 1426 for (i = 0; i < rx_ring->lbuf_free_count; i++) { 1427 lbq_desc = rx_ring->lbuf_free[j]; 1428 lbq_desc->free_buf = 1; 1429 j++; 1430 if (j >= rx_ring->lbq_len) 1431 j = 0; 1432 if (lbq_desc->mp != NULL) { 1433 freemsg(lbq_desc->mp); 1434 lbq_desc->mp = NULL; 1435 } 1436 } 1437 rx_ring->lbuf_free_count = 0; 1438 1439 j = rx_ring->lbq_use_head; 1440 for (i = 0; i < rx_ring->lbuf_in_use_count; i++) { 1441 lbq_desc = rx_ring->lbuf_in_use[j]; 1442 lbq_desc->free_buf = 1; 1443 j++; 1444 if (j >= rx_ring->lbq_len) { 1445 j = 0; 1446 } 1447 if (lbq_desc->mp != NULL) { 1448 freemsg(lbq_desc->mp); 1449 lbq_desc->mp = NULL; 1450 } 1451 } 1452 rx_ring->lbuf_in_use_count = 0; 1453 1454 lbq_desc = &rx_ring->lbq_desc[0]; 1455 for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) { 1456 /* Set flag so that callback will not allocate a new buffer */ 1457 lbq_desc->free_buf = 1; 1458 if (lbq_desc->upl_inuse != 0) { 1459 force_cnt++; 1460 } 1461 if (lbq_desc->bd_dma.dma_handle != NULL) { 1462 ql_free_phys(&lbq_desc->bd_dma.dma_handle, 1463 &lbq_desc->bd_dma.acc_handle); 1464 lbq_desc->bd_dma.dma_handle = NULL; 1465 lbq_desc->bd_dma.acc_handle = NULL; 1466 } 1467 } 1468 #ifdef QLGE_LOAD_UNLOAD 1469 if (force_cnt) { 1470 cmn_err(CE_WARN, "lbq: free %d inuse %d force %d", 1471 rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count, 1472 force_cnt); 1473 } 1474 #endif 1475 if (rx_ring->lbuf_in_use != NULL) { 1476 kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len * 1477 sizeof (struct bq_desc *))); 1478 rx_ring->lbuf_in_use = NULL; 1479 } 1480 1481 if (rx_ring->lbuf_free != NULL) { 1482 kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len * 1483 sizeof (struct bq_desc *))); 1484 rx_ring->lbuf_free = NULL; 1485 } 1486 } 1487 1488 /* Allocate large buffers */ 1489 static int 1490 ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring) 1491 { 1492 struct bq_desc *lbq_desc; 1493 ddi_dma_cookie_t dma_cookie; 1494 int i; 1495 uint32_t lbq_buf_size; 1496 1497 rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len * 1498 sizeof (struct bq_desc *), KM_NOSLEEP); 1499 if (rx_ring->lbuf_free == NULL) { 1500 cmn_err(CE_WARN, 1501 "!%s: lbuf_free_list alloc: failed", 1502 __func__); 1503 rx_ring->lbuf_free_count = 0; 1504 goto alloc_lbuf_err; 1505 } 1506 1507 rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len * 1508 sizeof (struct bq_desc *), KM_NOSLEEP); 1509 1510 if (rx_ring->lbuf_in_use == NULL) { 1511 cmn_err(CE_WARN, 1512 "!%s: lbuf_inuse_list alloc: failed", 1513 __func__); 1514 rx_ring->lbuf_in_use_count = 0; 1515 goto alloc_lbuf_err; 1516 } 1517 rx_ring->lbq_use_head = 0; 1518 rx_ring->lbq_use_tail = 0; 1519 rx_ring->lbq_free_head = 0; 1520 rx_ring->lbq_free_tail = 0; 1521 1522 lbq_buf_size = (qlge->mtu == ETHERMTU) ? 1523 NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE; 1524 1525 lbq_desc = &rx_ring->lbq_desc[0]; 1526 for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) { 1527 rx_ring->lbq_buf_size = lbq_buf_size; 1528 /* Allocate buffer */ 1529 if (ql_alloc_phys(qlge->dip, &lbq_desc->bd_dma.dma_handle, 1530 &ql_buf_acc_attr, 1531 DDI_DMA_READ | DDI_DMA_STREAMING, 1532 &lbq_desc->bd_dma.acc_handle, 1533 (size_t)rx_ring->lbq_buf_size, /* mem size */ 1534 (size_t)0, /* default alignment */ 1535 (caddr_t *)&lbq_desc->bd_dma.vaddr, 1536 &dma_cookie) != 0) { 1537 cmn_err(CE_WARN, 1538 "!%s: ddi_dma_alloc_handle: failed", 1539 __func__); 1540 goto alloc_lbuf_err; 1541 } 1542 1543 /* Set context for Return buffer callback */ 1544 lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress; 1545 lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list; 1546 lbq_desc->rx_recycle.free_arg = (caddr_t)lbq_desc; 1547 lbq_desc->rx_ring = rx_ring; 1548 lbq_desc->upl_inuse = 0; 1549 lbq_desc->free_buf = 0; 1550 1551 lbq_desc->mp = 1552 desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr), 1553 rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle); 1554 if (lbq_desc->mp == NULL) { 1555 cmn_err(CE_WARN, "%s: desballoc() failed", __func__); 1556 goto alloc_lbuf_err; 1557 } 1558 ql_add_lbuf_to_free_list(rx_ring, lbq_desc); 1559 } /* For all large buffers */ 1560 1561 return (DDI_SUCCESS); 1562 1563 alloc_lbuf_err: 1564 ql_free_lbq_buffers(rx_ring); 1565 return (DDI_FAILURE); 1566 } 1567 1568 /* 1569 * Free rx buffers 1570 */ 1571 static void 1572 ql_free_rx_buffers(qlge_t *qlge) 1573 { 1574 int i; 1575 struct rx_ring *rx_ring; 1576 1577 for (i = 0; i < qlge->rx_ring_count; i++) { 1578 rx_ring = &qlge->rx_ring[i]; 1579 if (rx_ring->type != TX_Q) { 1580 ql_free_lbq_buffers(rx_ring); 1581 ql_free_sbq_buffers(rx_ring); 1582 } 1583 } 1584 } 1585 1586 /* 1587 * Allocate rx buffers 1588 */ 1589 static int 1590 ql_alloc_rx_buffers(qlge_t *qlge) 1591 { 1592 struct rx_ring *rx_ring; 1593 int i; 1594 1595 for (i = 0; i < qlge->rx_ring_count; i++) { 1596 rx_ring = &qlge->rx_ring[i]; 1597 if (rx_ring->type != TX_Q) { 1598 if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS) 1599 goto alloc_err; 1600 if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS) 1601 goto alloc_err; 1602 } 1603 } 1604 #ifdef QLGE_TRACK_BUFFER_USAGE 1605 for (i = 0; i < qlge->rx_ring_count; i++) { 1606 if (qlge->rx_ring[i].type == RX_Q) { 1607 qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS; 1608 qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS; 1609 } 1610 qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES; 1611 } 1612 #endif 1613 return (DDI_SUCCESS); 1614 1615 alloc_err: 1616 1617 return (DDI_FAILURE); 1618 } 1619 1620 /* 1621 * Initialize large buffer queue ring 1622 */ 1623 static void 1624 ql_init_lbq_ring(struct rx_ring *rx_ring) 1625 { 1626 uint16_t i; 1627 struct bq_desc *lbq_desc; 1628 1629 bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc)); 1630 for (i = 0; i < rx_ring->lbq_len; i++) { 1631 lbq_desc = &rx_ring->lbq_desc[i]; 1632 lbq_desc->index = i; 1633 } 1634 } 1635 1636 /* 1637 * Initialize small buffer queue ring 1638 */ 1639 static void 1640 ql_init_sbq_ring(struct rx_ring *rx_ring) 1641 { 1642 uint16_t i; 1643 struct bq_desc *sbq_desc; 1644 1645 bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc)); 1646 for (i = 0; i < rx_ring->sbq_len; i++) { 1647 sbq_desc = &rx_ring->sbq_desc[i]; 1648 sbq_desc->index = i; 1649 } 1650 } 1651 1652 /* 1653 * Calculate the pseudo-header checksum if hardware can not do 1654 */ 1655 static void 1656 ql_pseudo_cksum(uint8_t *buf) 1657 { 1658 uint32_t cksum; 1659 uint16_t iphl; 1660 uint16_t proto; 1661 1662 iphl = (uint16_t)(4 * (buf[0] & 0xF)); 1663 cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl; 1664 cksum += proto = buf[9]; 1665 cksum += (((uint16_t)buf[12])<<8) + buf[13]; 1666 cksum += (((uint16_t)buf[14])<<8) + buf[15]; 1667 cksum += (((uint16_t)buf[16])<<8) + buf[17]; 1668 cksum += (((uint16_t)buf[18])<<8) + buf[19]; 1669 cksum = (cksum>>16) + (cksum & 0xFFFF); 1670 cksum = (cksum>>16) + (cksum & 0xFFFF); 1671 1672 /* 1673 * Point it to the TCP/UDP header, and 1674 * update the checksum field. 1675 */ 1676 buf += iphl + ((proto == IPPROTO_TCP) ? 1677 TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET); 1678 1679 *(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum); 1680 1681 } 1682 1683 /* 1684 * Transmit an incoming packet. 1685 */ 1686 mblk_t * 1687 ql_ring_tx(void *arg, mblk_t *mp) 1688 { 1689 struct tx_ring *tx_ring = (struct tx_ring *)arg; 1690 qlge_t *qlge = tx_ring->qlge; 1691 mblk_t *next; 1692 int rval; 1693 uint32_t tx_count = 0; 1694 1695 if (qlge->port_link_state == LS_DOWN) { 1696 /* can not send message while link is down */ 1697 mblk_t *tp; 1698 cmn_err(CE_WARN, "tx failed due to link down"); 1699 1700 while (mp != NULL) { 1701 tp = mp->b_next; 1702 mp->b_next = NULL; 1703 freemsg(mp); 1704 mp = tp; 1705 } 1706 goto exit; 1707 } 1708 1709 mutex_enter(&tx_ring->tx_lock); 1710 /* if mac is not started, driver is not ready, can not send */ 1711 if (tx_ring->mac_flags != QL_MAC_STARTED) { 1712 cmn_err(CE_WARN, "%s(%d)ring not started, mode %d " 1713 " return packets", 1714 __func__, qlge->instance, tx_ring->mac_flags); 1715 mutex_exit(&tx_ring->tx_lock); 1716 goto exit; 1717 } 1718 1719 /* we must try to send all */ 1720 while (mp != NULL) { 1721 /* 1722 * if number of available slots is less than a threshold, 1723 * then quit 1724 */ 1725 if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) { 1726 tx_ring->queue_stopped = 1; 1727 rval = DDI_FAILURE; 1728 #ifdef QLGE_LOAD_UNLOAD 1729 cmn_err(CE_WARN, "%s(%d) no resources", 1730 __func__, qlge->instance); 1731 #endif 1732 tx_ring->defer++; 1733 /* 1734 * If we return the buffer back we are expected to call 1735 * mac_tx_ring_update() when resources are available 1736 */ 1737 break; 1738 } 1739 1740 next = mp->b_next; 1741 mp->b_next = NULL; 1742 1743 rval = ql_send_common(tx_ring, mp); 1744 1745 if (rval != DDI_SUCCESS) { 1746 mp->b_next = next; 1747 break; 1748 } 1749 tx_count++; 1750 mp = next; 1751 } 1752 1753 /* 1754 * After all msg blocks are mapped or copied to tx buffer, 1755 * trigger the hardware to send! 1756 */ 1757 if (tx_count > 0) { 1758 ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg, 1759 tx_ring->prod_idx); 1760 } 1761 1762 mutex_exit(&tx_ring->tx_lock); 1763 exit: 1764 return (mp); 1765 } 1766 1767 1768 /* 1769 * This function builds an mblk list for the given inbound 1770 * completion. 1771 */ 1772 1773 static mblk_t * 1774 ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring, 1775 struct ib_mac_iocb_rsp *ib_mac_rsp) 1776 { 1777 mblk_t *mp = NULL; 1778 mblk_t *mp1 = NULL; /* packet header */ 1779 mblk_t *mp2 = NULL; /* packet content */ 1780 struct bq_desc *lbq_desc; 1781 struct bq_desc *sbq_desc; 1782 uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK); 1783 uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len); 1784 uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len); 1785 uint32_t pkt_len = payload_len + header_len; 1786 uint32_t done; 1787 uint64_t *curr_ial_ptr; 1788 uint32_t ial_data_addr_low; 1789 uint32_t actual_data_addr_low; 1790 mblk_t *mp_ial = NULL; /* ial chained packets */ 1791 uint32_t size; 1792 1793 /* 1794 * Check if error flags are set 1795 */ 1796 if (err_flag != 0) { 1797 if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0) 1798 rx_ring->frame_too_long++; 1799 if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0) 1800 rx_ring->frame_too_short++; 1801 if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0) 1802 rx_ring->fcs_err++; 1803 #ifdef QLGE_LOAD_UNLOAD 1804 cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag); 1805 #endif 1806 QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n", 1807 (uint8_t *)ib_mac_rsp, 8, 1808 (size_t)sizeof (struct ib_mac_iocb_rsp)); 1809 } 1810 1811 /* header should not be in large buffer */ 1812 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) { 1813 cmn_err(CE_WARN, "header in large buffer or invalid!"); 1814 err_flag |= 1; 1815 } 1816 /* 1817 * Handle the header buffer if present. 1818 * packet header must be valid and saved in one small buffer 1819 * broadcast/multicast packets' headers not splitted 1820 */ 1821 if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) && 1822 (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { 1823 QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n", 1824 header_len)); 1825 /* Sync access */ 1826 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring); 1827 1828 ASSERT(sbq_desc != NULL); 1829 1830 /* 1831 * Validate addresses from the ASIC with the 1832 * expected sbuf address 1833 */ 1834 if (cpu_to_le64(sbq_desc->bd_dma.dma_addr) 1835 != ib_mac_rsp->hdr_addr) { 1836 /* Small buffer address mismatch */ 1837 cmn_err(CE_WARN, "%s(%d) ring%d packet saved" 1838 " in wrong small buffer", 1839 __func__, qlge->instance, rx_ring->cq_id); 1840 goto fetal_error; 1841 } 1842 /* get this packet */ 1843 mp1 = sbq_desc->mp; 1844 if ((err_flag != 0)|| (mp1 == NULL)) { 1845 /* failed on this packet, put it back for re-arming */ 1846 #ifdef QLGE_LOAD_UNLOAD 1847 cmn_err(CE_WARN, "get header from small buffer fail"); 1848 #endif 1849 ql_refill_sbuf_free_list(sbq_desc, B_FALSE); 1850 mp1 = NULL; 1851 } else { 1852 /* Flush DMA'd data */ 1853 (void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle, 1854 0, header_len, DDI_DMA_SYNC_FORKERNEL); 1855 1856 if ((qlge->ip_hdr_offset != 0)&& 1857 (header_len < SMALL_BUFFER_SIZE)) { 1858 /* 1859 * copy entire header to a 2 bytes boundary 1860 * address for 8100 adapters so that the IP 1861 * header can be on a 4 byte boundary address 1862 */ 1863 bcopy(mp1->b_rptr, 1864 (mp1->b_rptr + SMALL_BUFFER_SIZE + 1865 qlge->ip_hdr_offset), 1866 header_len); 1867 mp1->b_rptr += SMALL_BUFFER_SIZE + 1868 qlge->ip_hdr_offset; 1869 } 1870 1871 /* 1872 * Adjust the mp payload_len to match 1873 * the packet header payload_len 1874 */ 1875 mp1->b_wptr = mp1->b_rptr + header_len; 1876 mp1->b_next = mp1->b_cont = NULL; 1877 QL_DUMP(DBG_RX, "\t RX packet header dump:\n", 1878 (uint8_t *)mp1->b_rptr, 8, header_len); 1879 } 1880 } 1881 1882 /* 1883 * packet data or whole packet can be in small or one or 1884 * several large buffer(s) 1885 */ 1886 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { 1887 /* 1888 * The data is in a single small buffer. 1889 */ 1890 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring); 1891 1892 ASSERT(sbq_desc != NULL); 1893 1894 QL_PRINT(DBG_RX, 1895 ("%d bytes in a single small buffer, sbq_desc = %p, " 1896 "sbq_desc->bd_dma.dma_addr = %x," 1897 " ib_mac_rsp->data_addr = %x, mp = %p\n", 1898 payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr, 1899 ib_mac_rsp->data_addr, sbq_desc->mp)); 1900 1901 /* 1902 * Validate addresses from the ASIC with the 1903 * expected sbuf address 1904 */ 1905 if (cpu_to_le64(sbq_desc->bd_dma.dma_addr) 1906 != ib_mac_rsp->data_addr) { 1907 /* Small buffer address mismatch */ 1908 cmn_err(CE_WARN, "%s(%d) ring%d packet saved" 1909 " in wrong small buffer", 1910 __func__, qlge->instance, rx_ring->cq_id); 1911 goto fetal_error; 1912 } 1913 /* get this packet */ 1914 mp2 = sbq_desc->mp; 1915 if ((err_flag != 0) || (mp2 == NULL)) { 1916 #ifdef QLGE_LOAD_UNLOAD 1917 /* failed on this packet, put it back for re-arming */ 1918 cmn_err(CE_WARN, "ignore bad data from small buffer"); 1919 #endif 1920 ql_refill_sbuf_free_list(sbq_desc, B_FALSE); 1921 mp2 = NULL; 1922 } else { 1923 /* Adjust the buffer length to match the payload_len */ 1924 mp2->b_wptr = mp2->b_rptr + payload_len; 1925 mp2->b_next = mp2->b_cont = NULL; 1926 /* Flush DMA'd data */ 1927 (void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle, 1928 0, payload_len, DDI_DMA_SYNC_FORKERNEL); 1929 QL_DUMP(DBG_RX, "\t RX packet payload dump:\n", 1930 (uint8_t *)mp2->b_rptr, 8, payload_len); 1931 /* 1932 * if payload is too small , copy to 1933 * the end of packet header 1934 */ 1935 if ((mp1 != NULL) && 1936 (payload_len <= qlge->payload_copy_thresh) && 1937 (pkt_len < 1938 (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) { 1939 bcopy(mp2->b_rptr, mp1->b_wptr, payload_len); 1940 mp1->b_wptr += payload_len; 1941 freemsg(mp2); 1942 mp2 = NULL; 1943 } 1944 } 1945 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { 1946 /* 1947 * The data is in a single large buffer. 1948 */ 1949 lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring); 1950 1951 QL_PRINT(DBG_RX, 1952 ("%d bytes in a single large buffer, lbq_desc = %p, " 1953 "lbq_desc->bd_dma.dma_addr = %x," 1954 " ib_mac_rsp->data_addr = %x, mp = %p\n", 1955 payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr, 1956 ib_mac_rsp->data_addr, lbq_desc->mp)); 1957 1958 ASSERT(lbq_desc != NULL); 1959 1960 /* 1961 * Validate addresses from the ASIC with 1962 * the expected lbuf address 1963 */ 1964 if (cpu_to_le64(lbq_desc->bd_dma.dma_addr) 1965 != ib_mac_rsp->data_addr) { 1966 /* Large buffer address mismatch */ 1967 cmn_err(CE_WARN, "%s(%d) ring%d packet saved" 1968 " in wrong large buffer", 1969 __func__, qlge->instance, rx_ring->cq_id); 1970 goto fetal_error; 1971 } 1972 mp2 = lbq_desc->mp; 1973 if ((err_flag != 0) || (mp2 == NULL)) { 1974 #ifdef QLGE_LOAD_UNLOAD 1975 cmn_err(CE_WARN, "ignore bad data from large buffer"); 1976 #endif 1977 /* failed on this packet, put it back for re-arming */ 1978 ql_refill_lbuf_free_list(lbq_desc, B_FALSE); 1979 mp2 = NULL; 1980 } else { 1981 /* 1982 * Adjust the buffer length to match 1983 * the packet payload_len 1984 */ 1985 mp2->b_wptr = mp2->b_rptr + payload_len; 1986 mp2->b_next = mp2->b_cont = NULL; 1987 /* Flush DMA'd data */ 1988 (void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle, 1989 0, payload_len, DDI_DMA_SYNC_FORKERNEL); 1990 QL_DUMP(DBG_RX, "\t RX packet payload dump:\n", 1991 (uint8_t *)mp2->b_rptr, 8, payload_len); 1992 /* 1993 * if payload is too small , copy to 1994 * the end of packet header 1995 */ 1996 if ((mp1 != NULL) && 1997 (payload_len <= qlge->payload_copy_thresh) && 1998 (pkt_len< 1999 (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) { 2000 bcopy(mp2->b_rptr, mp1->b_wptr, payload_len); 2001 mp1->b_wptr += payload_len; 2002 freemsg(mp2); 2003 mp2 = NULL; 2004 } 2005 } 2006 } else if (payload_len) { 2007 /* 2008 * payload available but not in sml nor lrg buffer, 2009 * so, it is saved in IAL 2010 */ 2011 #ifdef QLGE_LOAD_UNLOAD 2012 cmn_err(CE_NOTE, "packet chained in IAL \n"); 2013 #endif 2014 /* lrg buf addresses are saved in one small buffer */ 2015 sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring); 2016 curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr; 2017 done = 0; 2018 while (!done) { 2019 ial_data_addr_low = 2020 (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 2021 0xFFFFFFFE); 2022 /* check if this is the last packet fragment */ 2023 done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1); 2024 curr_ial_ptr++; 2025 /* 2026 * The data is in one or several large buffer(s). 2027 */ 2028 lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring); 2029 actual_data_addr_low = 2030 (uint32_t)(lbq_desc->bd_dma.dma_addr & 2031 0xFFFFFFFE); 2032 if (ial_data_addr_low != actual_data_addr_low) { 2033 cmn_err(CE_WARN, 2034 "packet saved in wrong ial lrg buffer" 2035 " expected %x, actual %lx", 2036 ial_data_addr_low, 2037 (uintptr_t)lbq_desc->bd_dma.dma_addr); 2038 goto fetal_error; 2039 } 2040 2041 if (mp_ial == NULL) { 2042 mp_ial = mp2 = lbq_desc->mp; 2043 } else { 2044 mp2->b_cont = lbq_desc->mp; 2045 mp2 = lbq_desc->mp; 2046 } 2047 mp2->b_next = NULL; 2048 mp2->b_cont = NULL; 2049 size = (payload_len < rx_ring->lbq_buf_size)? 2050 payload_len : rx_ring->lbq_buf_size; 2051 mp2->b_wptr = mp2->b_rptr + size; 2052 /* Flush DMA'd data */ 2053 (void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle, 2054 0, size, DDI_DMA_SYNC_FORKERNEL); 2055 payload_len -= size; 2056 QL_DUMP(DBG_RX, "\t Mac data dump:\n", 2057 (uint8_t *)mp2->b_rptr, 8, size); 2058 } 2059 mp2 = mp_ial; 2060 freemsg(sbq_desc->mp); 2061 } 2062 /* 2063 * some packets' hdr not split, then send mp2 upstream, otherwise, 2064 * concatenate message block mp2 to the tail of message header, mp1 2065 */ 2066 if (!err_flag) { 2067 if (mp1) { 2068 if (mp2) { 2069 QL_PRINT(DBG_RX, ("packet in mp1 and mp2\n")); 2070 linkb(mp1, mp2); /* mp1->b_cont = mp2; */ 2071 mp = mp1; 2072 } else { 2073 QL_PRINT(DBG_RX, ("packet in mp1 only\n")); 2074 mp = mp1; 2075 } 2076 } else if (mp2) { 2077 QL_PRINT(DBG_RX, ("packet in mp2 only\n")); 2078 mp = mp2; 2079 } 2080 } 2081 return (mp); 2082 2083 fetal_error: 2084 /* Fetal Error! */ 2085 *mp->b_wptr = 0; 2086 return (mp); 2087 2088 } 2089 2090 /* 2091 * Bump completion queue consumer index. 2092 */ 2093 static void 2094 ql_update_cq(struct rx_ring *rx_ring) 2095 { 2096 rx_ring->cnsmr_idx++; 2097 rx_ring->curr_entry++; 2098 if (rx_ring->cnsmr_idx >= rx_ring->cq_len) { 2099 rx_ring->cnsmr_idx = 0; 2100 rx_ring->curr_entry = rx_ring->cq_dma.vaddr; 2101 } 2102 } 2103 2104 /* 2105 * Update completion queue consumer index. 2106 */ 2107 static void 2108 ql_write_cq_idx(struct rx_ring *rx_ring) 2109 { 2110 qlge_t *qlge = rx_ring->qlge; 2111 2112 ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg, 2113 rx_ring->cnsmr_idx); 2114 } 2115 2116 /* 2117 * Processes a SYS-Chip Event Notification Completion Event. 2118 * The incoming notification event that describes a link up/down 2119 * or some sorts of error happens. 2120 */ 2121 static void 2122 ql_process_chip_ae_intr(qlge_t *qlge, 2123 struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr) 2124 { 2125 uint8_t eventType = ib_sys_event_rsp_ptr->event_type; 2126 uint32_t soft_req = 0; 2127 2128 switch (eventType) { 2129 case SYS_EVENT_PORT_LINK_UP: /* 0x0h */ 2130 QL_PRINT(DBG_MBX, ("Port Link Up\n")); 2131 break; 2132 2133 case SYS_EVENT_PORT_LINK_DOWN: /* 0x1h */ 2134 QL_PRINT(DBG_MBX, ("Port Link Down\n")); 2135 break; 2136 2137 case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */ 2138 cmn_err(CE_WARN, "A multiple CAM hits look up error " 2139 "occurred"); 2140 soft_req |= NEED_HW_RESET; 2141 break; 2142 2143 case SYS_EVENT_SOFT_ECC_ERR: /* 0x7h */ 2144 cmn_err(CE_WARN, "Soft ECC error detected"); 2145 soft_req |= NEED_HW_RESET; 2146 break; 2147 2148 case SYS_EVENT_MGMT_FATAL_ERR: /* 0x8h */ 2149 cmn_err(CE_WARN, "Management (MPI) Processor fatal" 2150 " error occured"); 2151 soft_req |= NEED_MPI_RESET; 2152 break; 2153 2154 case SYS_EVENT_MAC_INTERRUPT: /* 0x9h */ 2155 QL_PRINT(DBG_MBX, ("MAC Interrupt")); 2156 break; 2157 2158 case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF: /* 0x40h */ 2159 cmn_err(CE_WARN, "PCI Error reading small/large " 2160 "buffers occured"); 2161 soft_req |= NEED_HW_RESET; 2162 break; 2163 2164 default: 2165 QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: " 2166 "type 0x%x occured", 2167 __func__, qlge->instance, eventType)); 2168 break; 2169 } 2170 2171 if ((soft_req & NEED_MPI_RESET) != 0) { 2172 ql_wake_mpi_reset_soft_intr(qlge); 2173 } else if ((soft_req & NEED_HW_RESET) != 0) { 2174 ql_wake_asic_reset_soft_intr(qlge); 2175 } 2176 } 2177 2178 /* 2179 * set received packet checksum flag 2180 */ 2181 void 2182 ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp) 2183 { 2184 uint32_t flags; 2185 2186 /* Not TCP or UDP packet? nothing more to do */ 2187 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) && 2188 ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0)) 2189 return; 2190 2191 /* No CKO support for IPv6 */ 2192 if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0) 2193 return; 2194 2195 /* 2196 * If checksum error, don't set flags; stack will calculate 2197 * checksum, detect the error and update statistics 2198 */ 2199 if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) || 2200 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0)) 2201 return; 2202 2203 /* TCP or UDP packet and checksum valid */ 2204 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) && 2205 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) { 2206 flags = HCK_FULLCKSUM_OK; 2207 mac_hcksum_set(mp, 0, 0, 0, 0, flags); 2208 } 2209 if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) && 2210 ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) { 2211 flags = HCK_FULLCKSUM_OK; 2212 mac_hcksum_set(mp, 0, 0, 0, 0, flags); 2213 } 2214 } 2215 2216 /* 2217 * This function goes through h/w descriptor in one specified rx ring, 2218 * receives the data if the descriptor status shows the data is ready. 2219 * It returns a chain of mblks containing the received data, to be 2220 * passed up to mac_rx_ring(). 2221 */ 2222 mblk_t * 2223 ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes) 2224 { 2225 qlge_t *qlge = rx_ring->qlge; 2226 uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2227 struct ib_mac_iocb_rsp *net_rsp; 2228 mblk_t *mp; 2229 mblk_t *mblk_head; 2230 mblk_t **mblk_tail; 2231 uint32_t received_bytes = 0; 2232 boolean_t done = B_FALSE; 2233 uint32_t length; 2234 2235 #ifdef QLGE_TRACK_BUFFER_USAGE 2236 uint32_t consumer_idx; 2237 uint32_t producer_idx; 2238 uint32_t num_free_entries; 2239 uint32_t temp; 2240 2241 temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg); 2242 consumer_idx = temp & 0x0000ffff; 2243 producer_idx = (temp >> 16); 2244 2245 if (consumer_idx > producer_idx) 2246 num_free_entries = (consumer_idx - producer_idx); 2247 else 2248 num_free_entries = NUM_RX_RING_ENTRIES - ( 2249 producer_idx - consumer_idx); 2250 2251 if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id]) 2252 qlge->cq_low_count[rx_ring->cq_id] = num_free_entries; 2253 2254 #endif 2255 mblk_head = NULL; 2256 mblk_tail = &mblk_head; 2257 2258 while (!done && (prod != rx_ring->cnsmr_idx)) { 2259 QL_PRINT(DBG_RX, 2260 ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", 2261 __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx)); 2262 2263 net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry; 2264 (void) ddi_dma_sync(rx_ring->cq_dma.dma_handle, 2265 (off_t)((uintptr_t)net_rsp - 2266 (uintptr_t)rx_ring->cq_dma.vaddr), 2267 (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL); 2268 QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n", 2269 rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp)); 2270 2271 switch (net_rsp->opcode) { 2272 2273 case OPCODE_IB_MAC_IOCB: 2274 /* Adding length of pkt header and payload */ 2275 length = le32_to_cpu(net_rsp->data_len) + 2276 le32_to_cpu(net_rsp->hdr_len); 2277 if ((poll_bytes != QLGE_POLL_ALL) && 2278 ((received_bytes + length) > poll_bytes)) { 2279 done = B_TRUE; 2280 continue; 2281 } 2282 received_bytes += length; 2283 2284 mp = ql_build_rx_mp(qlge, rx_ring, net_rsp); 2285 if (mp != NULL) { 2286 if (rx_ring->mac_flags != QL_MAC_STARTED) { 2287 /* 2288 * Increment number of packets we have 2289 * indicated to the stack, should be 2290 * decremented when we get it back 2291 * or when freemsg is called 2292 */ 2293 ASSERT(rx_ring->rx_indicate 2294 <= rx_ring->cq_len); 2295 #ifdef QLGE_LOAD_UNLOAD 2296 cmn_err(CE_WARN, "%s do not send to OS," 2297 " mac_flags %d, indicate %d", 2298 __func__, rx_ring->mac_flags, 2299 rx_ring->rx_indicate); 2300 #endif 2301 QL_PRINT(DBG_RX, 2302 ("cq_id = %d, packet " 2303 "dropped, mac not " 2304 "enabled.\n", 2305 rx_ring->cq_id)); 2306 rx_ring->rx_pkt_dropped_mac_unenabled++; 2307 2308 /* rx_lock is expected to be held */ 2309 mutex_exit(&rx_ring->rx_lock); 2310 freemsg(mp); 2311 mutex_enter(&rx_ring->rx_lock); 2312 mp = NULL; 2313 } 2314 2315 if (mp != NULL) { 2316 /* 2317 * IP full packet has been 2318 * successfully verified by 2319 * H/W and is correct 2320 */ 2321 ql_set_rx_cksum(mp, net_rsp); 2322 2323 rx_ring->rx_packets++; 2324 rx_ring->rx_bytes += length; 2325 *mblk_tail = mp; 2326 mblk_tail = &mp->b_next; 2327 } 2328 } else { 2329 QL_PRINT(DBG_RX, 2330 ("cq_id = %d, packet dropped\n", 2331 rx_ring->cq_id)); 2332 rx_ring->rx_packets_dropped_no_buffer++; 2333 } 2334 break; 2335 2336 case OPCODE_IB_SYS_EVENT_IOCB: 2337 ql_process_chip_ae_intr(qlge, 2338 (struct ib_sys_event_iocb_rsp *) 2339 net_rsp); 2340 break; 2341 2342 default: 2343 cmn_err(CE_WARN, 2344 "%s Ring(%d)Hit default case, not handled!" 2345 " dropping the packet, " 2346 "opcode = %x.", __func__, rx_ring->cq_id, 2347 net_rsp->opcode); 2348 break; 2349 } 2350 /* increment cnsmr_idx and curr_entry */ 2351 ql_update_cq(rx_ring); 2352 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2353 2354 } 2355 /* update cnsmr_idx */ 2356 ql_write_cq_idx(rx_ring); 2357 /* do not enable interrupt for polling mode */ 2358 if (poll_bytes == QLGE_POLL_ALL) 2359 ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq); 2360 return (mblk_head); 2361 } 2362 2363 /* Process an outbound completion from an rx ring. */ 2364 static void 2365 ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp) 2366 { 2367 struct tx_ring *tx_ring; 2368 struct tx_ring_desc *tx_ring_desc; 2369 int j; 2370 2371 tx_ring = &qlge->tx_ring[mac_rsp->txq_idx]; 2372 tx_ring_desc = tx_ring->wq_desc; 2373 tx_ring_desc += mac_rsp->tid; 2374 2375 if (tx_ring_desc->tx_type == USE_DMA) { 2376 QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n", 2377 __func__, qlge->instance)); 2378 2379 /* 2380 * Release the DMA resource that is used for 2381 * DMA binding. 2382 */ 2383 for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) { 2384 (void) ddi_dma_unbind_handle( 2385 tx_ring_desc->tx_dma_handle[j]); 2386 } 2387 2388 tx_ring_desc->tx_dma_handle_used = 0; 2389 /* 2390 * Free the mblk after sending completed 2391 */ 2392 if (tx_ring_desc->mp != NULL) { 2393 freemsg(tx_ring_desc->mp); 2394 tx_ring_desc->mp = NULL; 2395 } 2396 } 2397 2398 tx_ring->obytes += tx_ring_desc->tx_bytes; 2399 tx_ring->opackets++; 2400 2401 if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S | 2402 OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) { 2403 tx_ring->errxmt++; 2404 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { 2405 /* EMPTY */ 2406 QL_PRINT(DBG_TX, 2407 ("Total descriptor length did not match " 2408 "transfer length.\n")); 2409 } 2410 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { 2411 /* EMPTY */ 2412 QL_PRINT(DBG_TX, 2413 ("Frame too short to be legal, not sent.\n")); 2414 } 2415 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { 2416 /* EMPTY */ 2417 QL_PRINT(DBG_TX, 2418 ("Frame too long, but sent anyway.\n")); 2419 } 2420 if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) { 2421 /* EMPTY */ 2422 QL_PRINT(DBG_TX, 2423 ("PCI backplane error. Frame not sent.\n")); 2424 } 2425 } 2426 atomic_inc_32(&tx_ring->tx_free_count); 2427 } 2428 2429 /* 2430 * clean up tx completion iocbs 2431 */ 2432 static int 2433 ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) 2434 { 2435 qlge_t *qlge = rx_ring->qlge; 2436 uint32_t prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2437 struct ob_mac_iocb_rsp *net_rsp = NULL; 2438 int count = 0; 2439 struct tx_ring *tx_ring; 2440 boolean_t resume_tx = B_FALSE; 2441 2442 mutex_enter(&rx_ring->rx_lock); 2443 #ifdef QLGE_TRACK_BUFFER_USAGE 2444 { 2445 uint32_t consumer_idx; 2446 uint32_t producer_idx; 2447 uint32_t num_free_entries; 2448 uint32_t temp; 2449 2450 temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg); 2451 consumer_idx = temp & 0x0000ffff; 2452 producer_idx = (temp >> 16); 2453 2454 if (consumer_idx > producer_idx) 2455 num_free_entries = (consumer_idx - producer_idx); 2456 else 2457 num_free_entries = NUM_RX_RING_ENTRIES - 2458 (producer_idx - consumer_idx); 2459 2460 if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id]) 2461 qlge->cq_low_count[rx_ring->cq_id] = num_free_entries; 2462 2463 } 2464 #endif 2465 /* While there are entries in the completion queue. */ 2466 while (prod != rx_ring->cnsmr_idx) { 2467 2468 QL_PRINT(DBG_RX, 2469 ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__, 2470 rx_ring->cq_id, prod, rx_ring->cnsmr_idx)); 2471 2472 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; 2473 (void) ddi_dma_sync(rx_ring->cq_dma.dma_handle, 2474 (off_t)((uintptr_t)net_rsp - 2475 (uintptr_t)rx_ring->cq_dma.vaddr), 2476 (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL); 2477 2478 QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: " 2479 "response packet data\n", 2480 rx_ring->curr_entry, 8, 2481 (size_t)sizeof (*net_rsp)); 2482 2483 switch (net_rsp->opcode) { 2484 2485 case OPCODE_OB_MAC_OFFLOAD_IOCB: 2486 case OPCODE_OB_MAC_IOCB: 2487 ql_process_mac_tx_intr(qlge, net_rsp); 2488 break; 2489 2490 default: 2491 cmn_err(CE_WARN, 2492 "%s Hit default case, not handled! " 2493 "dropping the packet," 2494 " opcode = %x.", 2495 __func__, net_rsp->opcode); 2496 break; 2497 } 2498 count++; 2499 ql_update_cq(rx_ring); 2500 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2501 } 2502 ql_write_cq_idx(rx_ring); 2503 2504 mutex_exit(&rx_ring->rx_lock); 2505 2506 tx_ring = &qlge->tx_ring[net_rsp->txq_idx]; 2507 2508 mutex_enter(&tx_ring->tx_lock); 2509 2510 if (tx_ring->queue_stopped && 2511 (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) { 2512 /* 2513 * The queue got stopped because the tx_ring was full. 2514 * Wake it up, because it's now at least 25% empty. 2515 */ 2516 tx_ring->queue_stopped = 0; 2517 resume_tx = B_TRUE; 2518 } 2519 2520 mutex_exit(&tx_ring->tx_lock); 2521 /* Don't hold the lock during OS callback */ 2522 if (resume_tx) 2523 RESUME_TX(tx_ring); 2524 return (count); 2525 } 2526 2527 /* 2528 * reset asic when error happens 2529 */ 2530 /* ARGSUSED */ 2531 static uint_t 2532 ql_asic_reset_work(caddr_t arg1, caddr_t arg2) 2533 { 2534 qlge_t *qlge = (qlge_t *)((void *)arg1); 2535 int status; 2536 2537 mutex_enter(&qlge->gen_mutex); 2538 status = ql_bringdown_adapter(qlge); 2539 if (status != DDI_SUCCESS) 2540 goto error; 2541 2542 status = ql_bringup_adapter(qlge); 2543 if (status != DDI_SUCCESS) 2544 goto error; 2545 mutex_exit(&qlge->gen_mutex); 2546 return (DDI_INTR_CLAIMED); 2547 2548 error: 2549 mutex_exit(&qlge->gen_mutex); 2550 cmn_err(CE_WARN, 2551 "qlge up/down cycle failed, closing device"); 2552 return (DDI_INTR_CLAIMED); 2553 } 2554 2555 /* 2556 * Reset MPI 2557 */ 2558 /* ARGSUSED */ 2559 static uint_t 2560 ql_mpi_reset_work(caddr_t arg1, caddr_t arg2) 2561 { 2562 qlge_t *qlge = (qlge_t *)((void *)arg1); 2563 2564 (void) ql_reset_mpi_risc(qlge); 2565 return (DDI_INTR_CLAIMED); 2566 } 2567 2568 /* 2569 * Process MPI mailbox messages 2570 */ 2571 /* ARGSUSED */ 2572 static uint_t 2573 ql_mpi_event_work(caddr_t arg1, caddr_t arg2) 2574 { 2575 qlge_t *qlge = (qlge_t *)((void *)arg1); 2576 2577 ql_do_mpi_intr(qlge); 2578 return (DDI_INTR_CLAIMED); 2579 } 2580 2581 /* Fire up a handler to reset the MPI processor. */ 2582 void 2583 ql_wake_asic_reset_soft_intr(qlge_t *qlge) 2584 { 2585 (void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL); 2586 } 2587 2588 static void 2589 ql_wake_mpi_reset_soft_intr(qlge_t *qlge) 2590 { 2591 (void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL); 2592 } 2593 2594 static void 2595 ql_wake_mpi_event_soft_intr(qlge_t *qlge) 2596 { 2597 (void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL); 2598 } 2599 2600 /* 2601 * This handles a fatal error, MPI activity, and the default 2602 * rx_ring in an MSI-X multiple interrupt vector environment. 2603 * In MSI/Legacy environment it also process the rest of 2604 * the rx_rings. 2605 */ 2606 /* ARGSUSED */ 2607 static uint_t 2608 ql_isr(caddr_t arg1, caddr_t arg2) 2609 { 2610 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1); 2611 qlge_t *qlge = rx_ring->qlge; 2612 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0]; 2613 uint32_t var, prod; 2614 int i; 2615 int work_done = 0; 2616 2617 mblk_t *mp; 2618 2619 _NOTE(ARGUNUSED(arg2)); 2620 2621 ++qlge->rx_interrupts[rx_ring->cq_id]; 2622 2623 if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) { 2624 ql_write_reg(qlge, REG_RSVD7, 0xfeed0002); 2625 var = ql_read_reg(qlge, REG_ERROR_STATUS); 2626 var = ql_read_reg(qlge, REG_STATUS); 2627 var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1); 2628 return (DDI_INTR_CLAIMED); 2629 } 2630 2631 ql_disable_completion_interrupt(qlge, intr_ctx->intr); 2632 2633 /* 2634 * Check the default queue and wake handler if active. 2635 */ 2636 rx_ring = &qlge->rx_ring[0]; 2637 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2638 QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ", 2639 prod, rx_ring->cnsmr_idx)); 2640 /* check if interrupt is due to incoming packet */ 2641 if (prod != rx_ring->cnsmr_idx) { 2642 QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n")); 2643 ql_disable_completion_interrupt(qlge, intr_ctx->intr); 2644 mutex_enter(&rx_ring->rx_lock); 2645 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL); 2646 mutex_exit(&rx_ring->rx_lock); 2647 2648 if (mp != NULL) 2649 RX_UPSTREAM(rx_ring, mp); 2650 work_done++; 2651 } else { 2652 /* 2653 * If interrupt is not due to incoming packet, read status 2654 * register to see if error happens or mailbox interrupt. 2655 */ 2656 var = ql_read_reg(qlge, REG_STATUS); 2657 if ((var & STATUS_FE) != 0) { 2658 ql_write_reg(qlge, REG_RSVD7, 0xfeed0003); 2659 2660 cmn_err(CE_WARN, "Got fatal error, STS = %x.", var); 2661 var = ql_read_reg(qlge, REG_ERROR_STATUS); 2662 cmn_err(CE_WARN, 2663 "Resetting chip. Error Status Register = 0x%x", 2664 var); 2665 ql_wake_asic_reset_soft_intr(qlge); 2666 return (DDI_INTR_CLAIMED); 2667 } 2668 2669 /* 2670 * Check MPI processor activity. 2671 */ 2672 if ((var & STATUS_PI) != 0) { 2673 /* 2674 * We've got an async event or mailbox completion. 2675 * Handle it and clear the source of the interrupt. 2676 */ 2677 ql_write_reg(qlge, REG_RSVD7, 0xfeed0004); 2678 2679 QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n")); 2680 ql_disable_completion_interrupt(qlge, intr_ctx->intr); 2681 ql_wake_mpi_event_soft_intr(qlge); 2682 work_done++; 2683 } 2684 } 2685 2686 if (qlge->intr_type != DDI_INTR_TYPE_MSIX) { 2687 /* 2688 * Start the DPC for each active queue. 2689 */ 2690 for (i = 1; i < qlge->rx_ring_count; i++) { 2691 rx_ring = &qlge->rx_ring[i]; 2692 2693 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != 2694 rx_ring->cnsmr_idx) { 2695 QL_PRINT(DBG_INTR, 2696 ("Waking handler for rx_ring[%d].\n", i)); 2697 2698 ql_disable_completion_interrupt(qlge, 2699 rx_ring->irq); 2700 if (rx_ring->type == TX_Q) { 2701 (void) ql_clean_outbound_rx_ring( 2702 rx_ring); 2703 ql_enable_completion_interrupt( 2704 rx_ring->qlge, rx_ring->irq); 2705 } else { 2706 mutex_enter(&rx_ring->rx_lock); 2707 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL); 2708 mutex_exit(&rx_ring->rx_lock); 2709 if (mp != NULL) 2710 RX_UPSTREAM(rx_ring, mp); 2711 #ifdef QLGE_LOAD_UNLOAD 2712 if (rx_ring->mac_flags == 2713 QL_MAC_STOPPED) 2714 cmn_err(CE_NOTE, 2715 "%s rx_indicate(%d) %d\n", 2716 __func__, i, 2717 rx_ring->rx_indicate); 2718 #endif 2719 } 2720 work_done++; 2721 } 2722 } 2723 } 2724 2725 ql_enable_completion_interrupt(qlge, intr_ctx->intr); 2726 2727 return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2728 } 2729 2730 /* 2731 * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions. 2732 */ 2733 /* ARGSUSED */ 2734 static uint_t 2735 ql_msix_tx_isr(caddr_t arg1, caddr_t arg2) 2736 { 2737 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1); 2738 qlge_t *qlge = rx_ring->qlge; 2739 _NOTE(ARGUNUSED(arg2)); 2740 2741 ++qlge->rx_interrupts[rx_ring->cq_id]; 2742 (void) ql_clean_outbound_rx_ring(rx_ring); 2743 ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq); 2744 2745 return (DDI_INTR_CLAIMED); 2746 } 2747 2748 /* 2749 * Poll n_bytes of chained incoming packets 2750 */ 2751 mblk_t * 2752 ql_ring_rx_poll(void *arg, int n_bytes) 2753 { 2754 struct rx_ring *rx_ring = (struct rx_ring *)arg; 2755 qlge_t *qlge = rx_ring->qlge; 2756 mblk_t *mp = NULL; 2757 uint32_t var; 2758 2759 ASSERT(n_bytes >= 0); 2760 QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n", 2761 __func__, rx_ring->cq_id, n_bytes)); 2762 2763 ++qlge->rx_polls[rx_ring->cq_id]; 2764 2765 if (n_bytes == 0) 2766 return (mp); 2767 mutex_enter(&rx_ring->rx_lock); 2768 mp = ql_ring_rx(rx_ring, n_bytes); 2769 mutex_exit(&rx_ring->rx_lock); 2770 2771 if ((rx_ring->cq_id == 0) && (mp == NULL)) { 2772 var = ql_read_reg(qlge, REG_STATUS); 2773 /* 2774 * Check for fatal error. 2775 */ 2776 if ((var & STATUS_FE) != 0) { 2777 ql_write_reg(qlge, REG_RSVD7, 0xfeed0003); 2778 var = ql_read_reg(qlge, REG_ERROR_STATUS); 2779 cmn_err(CE_WARN, "Got fatal error %x.", var); 2780 ql_wake_asic_reset_soft_intr(qlge); 2781 } 2782 /* 2783 * Check MPI processor activity. 2784 */ 2785 if ((var & STATUS_PI) != 0) { 2786 /* 2787 * We've got an async event or mailbox completion. 2788 * Handle it and clear the source of the interrupt. 2789 */ 2790 ql_write_reg(qlge, REG_RSVD7, 0xfeed0004); 2791 ql_do_mpi_intr(qlge); 2792 } 2793 } 2794 2795 return (mp); 2796 } 2797 2798 /* 2799 * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions. 2800 */ 2801 /* ARGSUSED */ 2802 static uint_t 2803 ql_msix_rx_isr(caddr_t arg1, caddr_t arg2) 2804 { 2805 struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1); 2806 qlge_t *qlge = rx_ring->qlge; 2807 mblk_t *mp; 2808 _NOTE(ARGUNUSED(arg2)); 2809 2810 QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id)); 2811 2812 ++qlge->rx_interrupts[rx_ring->cq_id]; 2813 2814 mutex_enter(&rx_ring->rx_lock); 2815 mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL); 2816 mutex_exit(&rx_ring->rx_lock); 2817 2818 if (mp != NULL) 2819 RX_UPSTREAM(rx_ring, mp); 2820 2821 return (DDI_INTR_CLAIMED); 2822 } 2823 2824 2825 /* 2826 * 2827 * Allocate DMA Buffer for ioctl service 2828 * 2829 */ 2830 static int 2831 ql_alloc_ioctl_dma_buf(qlge_t *qlge) 2832 { 2833 uint64_t phy_addr; 2834 uint64_t alloc_size; 2835 ddi_dma_cookie_t dma_cookie; 2836 2837 alloc_size = qlge->ioctl_buf_dma_attr.mem_len = 2838 max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH); 2839 if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle, 2840 &ql_buf_acc_attr, 2841 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2842 &qlge->ioctl_buf_dma_attr.acc_handle, 2843 (size_t)alloc_size, /* mem size */ 2844 (size_t)0, /* alignment */ 2845 (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr, 2846 &dma_cookie) != 0) { 2847 cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.", 2848 __func__, qlge->instance); 2849 return (DDI_FAILURE); 2850 } 2851 2852 phy_addr = dma_cookie.dmac_laddress; 2853 2854 if (qlge->ioctl_buf_dma_attr.vaddr == NULL) { 2855 cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance); 2856 return (DDI_FAILURE); 2857 } 2858 2859 qlge->ioctl_buf_dma_attr.dma_addr = phy_addr; 2860 2861 QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, " 2862 "phy_addr = 0x%lx\n", 2863 __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr)); 2864 2865 return (DDI_SUCCESS); 2866 } 2867 2868 2869 /* 2870 * Function to free physical memory. 2871 */ 2872 static void 2873 ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle) 2874 { 2875 if (dma_handle != NULL) { 2876 (void) ddi_dma_unbind_handle(*dma_handle); 2877 if (acc_handle != NULL) 2878 ddi_dma_mem_free(acc_handle); 2879 ddi_dma_free_handle(dma_handle); 2880 } 2881 } 2882 2883 /* 2884 * Function to free ioctl dma buffer. 2885 */ 2886 static void 2887 ql_free_ioctl_dma_buf(qlge_t *qlge) 2888 { 2889 if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) { 2890 ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle, 2891 &qlge->ioctl_buf_dma_attr.acc_handle); 2892 2893 qlge->ioctl_buf_dma_attr.vaddr = NULL; 2894 qlge->ioctl_buf_dma_attr.dma_handle = NULL; 2895 } 2896 } 2897 2898 /* 2899 * Free shadow register space used for request and completion queues 2900 */ 2901 static void 2902 ql_free_shadow_space(qlge_t *qlge) 2903 { 2904 if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) { 2905 ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle, 2906 &qlge->host_copy_shadow_dma_attr.acc_handle); 2907 bzero(&qlge->host_copy_shadow_dma_attr, 2908 sizeof (qlge->host_copy_shadow_dma_attr)); 2909 } 2910 2911 if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) { 2912 ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle, 2913 &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle); 2914 bzero(&qlge->buf_q_ptr_base_addr_dma_attr, 2915 sizeof (qlge->buf_q_ptr_base_addr_dma_attr)); 2916 } 2917 } 2918 2919 /* 2920 * Allocate shadow register space for request and completion queues 2921 */ 2922 static int 2923 ql_alloc_shadow_space(qlge_t *qlge) 2924 { 2925 ddi_dma_cookie_t dma_cookie; 2926 2927 if (ql_alloc_phys(qlge->dip, 2928 &qlge->host_copy_shadow_dma_attr.dma_handle, 2929 &ql_dev_acc_attr, 2930 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2931 &qlge->host_copy_shadow_dma_attr.acc_handle, 2932 (size_t)VM_PAGE_SIZE, /* mem size */ 2933 (size_t)4, /* 4 bytes alignment */ 2934 (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr, 2935 &dma_cookie) != 0) { 2936 bzero(&qlge->host_copy_shadow_dma_attr, 2937 sizeof (qlge->host_copy_shadow_dma_attr)); 2938 2939 cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for " 2940 "response shadow registers", __func__, qlge->instance); 2941 return (DDI_FAILURE); 2942 } 2943 2944 qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress; 2945 2946 if (ql_alloc_phys(qlge->dip, 2947 &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle, 2948 &ql_desc_acc_attr, 2949 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2950 &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle, 2951 (size_t)VM_PAGE_SIZE, /* mem size */ 2952 (size_t)4, /* 4 bytes alignment */ 2953 (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr, 2954 &dma_cookie) != 0) { 2955 bzero(&qlge->buf_q_ptr_base_addr_dma_attr, 2956 sizeof (qlge->buf_q_ptr_base_addr_dma_attr)); 2957 2958 cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory " 2959 "for request shadow registers", 2960 __func__, qlge->instance); 2961 goto err_wqp_sh_area; 2962 } 2963 qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress; 2964 2965 return (DDI_SUCCESS); 2966 2967 err_wqp_sh_area: 2968 ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle, 2969 &qlge->host_copy_shadow_dma_attr.acc_handle); 2970 bzero(&qlge->host_copy_shadow_dma_attr, 2971 sizeof (qlge->host_copy_shadow_dma_attr)); 2972 2973 return (DDI_FAILURE); 2974 } 2975 2976 /* 2977 * Initialize a tx ring 2978 */ 2979 static void 2980 ql_init_tx_ring(struct tx_ring *tx_ring) 2981 { 2982 int i; 2983 struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr; 2984 struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc; 2985 2986 for (i = 0; i < tx_ring->wq_len; i++) { 2987 tx_ring_desc->index = i; 2988 tx_ring_desc->queue_entry = mac_iocb_ptr; 2989 mac_iocb_ptr++; 2990 tx_ring_desc++; 2991 } 2992 tx_ring->tx_free_count = tx_ring->wq_len; 2993 tx_ring->queue_stopped = 0; 2994 } 2995 2996 /* 2997 * Free one tx ring resources 2998 */ 2999 static void 3000 ql_free_tx_resources(struct tx_ring *tx_ring) 3001 { 3002 struct tx_ring_desc *tx_ring_desc; 3003 int i, j; 3004 3005 ql_free_phys(&tx_ring->wq_dma.dma_handle, &tx_ring->wq_dma.acc_handle); 3006 bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma)); 3007 3008 if (tx_ring->wq_desc != NULL) { 3009 tx_ring_desc = tx_ring->wq_desc; 3010 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) { 3011 for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) { 3012 if (tx_ring_desc->tx_dma_handle[j]) { 3013 /* 3014 * The unbinding will happen in tx 3015 * completion, here we just free the 3016 * handles 3017 */ 3018 ddi_dma_free_handle( 3019 &(tx_ring_desc->tx_dma_handle[j])); 3020 tx_ring_desc->tx_dma_handle[j] = NULL; 3021 } 3022 } 3023 if (tx_ring_desc->oal != NULL) { 3024 tx_ring_desc->oal_dma_addr = 0; 3025 tx_ring_desc->oal = NULL; 3026 tx_ring_desc->copy_buffer = NULL; 3027 tx_ring_desc->copy_buffer_dma_addr = 0; 3028 3029 ql_free_phys(&tx_ring_desc->oal_dma.dma_handle, 3030 &tx_ring_desc->oal_dma.acc_handle); 3031 } 3032 } 3033 kmem_free(tx_ring->wq_desc, 3034 tx_ring->wq_len * sizeof (struct tx_ring_desc)); 3035 tx_ring->wq_desc = NULL; 3036 } 3037 /* free the wqicb struct */ 3038 if (tx_ring->wqicb_dma.dma_handle) { 3039 ql_free_phys(&tx_ring->wqicb_dma.dma_handle, 3040 &tx_ring->wqicb_dma.acc_handle); 3041 bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma)); 3042 } 3043 } 3044 3045 /* 3046 * Allocate work (request) queue memory and transmit 3047 * descriptors for this transmit ring 3048 */ 3049 static int 3050 ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring) 3051 { 3052 ddi_dma_cookie_t dma_cookie; 3053 struct tx_ring_desc *tx_ring_desc; 3054 int i, j; 3055 uint32_t length; 3056 3057 /* allocate dma buffers for obiocbs */ 3058 if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle, 3059 &ql_desc_acc_attr, 3060 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3061 &tx_ring->wq_dma.acc_handle, 3062 (size_t)tx_ring->wq_size, /* mem size */ 3063 (size_t)128, /* alignment:128 bytes boundary */ 3064 (caddr_t *)&tx_ring->wq_dma.vaddr, 3065 &dma_cookie) != 0) { 3066 bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma)); 3067 cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.", 3068 __func__, qlge->instance); 3069 return (DDI_FAILURE); 3070 } 3071 tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress; 3072 3073 tx_ring->wq_desc = 3074 kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc), 3075 KM_NOSLEEP); 3076 if (tx_ring->wq_desc == NULL) { 3077 goto err; 3078 } else { 3079 tx_ring_desc = tx_ring->wq_desc; 3080 /* 3081 * Allocate a large enough structure to hold the following 3082 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes 3083 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes 3084 */ 3085 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) { 3086 length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS) 3087 + QL_MAX_COPY_LENGTH; 3088 3089 if (ql_alloc_phys(qlge->dip, 3090 &tx_ring_desc->oal_dma.dma_handle, 3091 &ql_desc_acc_attr, 3092 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3093 &tx_ring_desc->oal_dma.acc_handle, 3094 (size_t)length, /* mem size */ 3095 (size_t)0, /* default alignment:8 bytes boundary */ 3096 (caddr_t *)&tx_ring_desc->oal_dma.vaddr, 3097 &dma_cookie) != 0) { 3098 bzero(&tx_ring_desc->oal_dma, 3099 sizeof (tx_ring_desc->oal_dma)); 3100 cmn_err(CE_WARN, "%s(%d): reqQ tx buf &" 3101 "oal alloc failed.", 3102 __func__, qlge->instance); 3103 return (DDI_FAILURE); 3104 } 3105 3106 tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr; 3107 tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress; 3108 tx_ring_desc->copy_buffer = 3109 (caddr_t)((uint8_t *)tx_ring_desc->oal 3110 + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)); 3111 tx_ring_desc->copy_buffer_dma_addr = 3112 (tx_ring_desc->oal_dma_addr 3113 + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)); 3114 3115 /* Allocate dma handles for transmit buffers */ 3116 for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) { 3117 if (ddi_dma_alloc_handle(qlge->dip, 3118 &tx_mapping_dma_attr, 3119 DDI_DMA_DONTWAIT, 3120 0, &tx_ring_desc->tx_dma_handle[j]) 3121 != DDI_SUCCESS) { 3122 cmn_err(CE_WARN, 3123 "!%s: ddi_dma_alloc_handle: " 3124 "tx_dma_handle " 3125 "alloc failed", __func__); 3126 goto err; 3127 } 3128 } 3129 } 3130 } 3131 /* alloc a wqicb control block to load this tx ring to hw */ 3132 if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle, 3133 &ql_desc_acc_attr, 3134 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3135 &tx_ring->wqicb_dma.acc_handle, 3136 (size_t)sizeof (struct wqicb_t), /* mem size */ 3137 (size_t)0, /* alignment:128 bytes boundary */ 3138 (caddr_t *)&tx_ring->wqicb_dma.vaddr, 3139 &dma_cookie) != 0) { 3140 bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma)); 3141 cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.", 3142 __func__, qlge->instance); 3143 return (DDI_FAILURE); 3144 } 3145 tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress; 3146 3147 return (DDI_SUCCESS); 3148 3149 err: 3150 ql_free_tx_resources(tx_ring); 3151 return (DDI_FAILURE); 3152 } 3153 3154 /* 3155 * Free one rx ring resources 3156 */ 3157 static void 3158 ql_free_rx_resources(struct rx_ring *rx_ring) 3159 { 3160 /* Free the small buffer queue. */ 3161 if (rx_ring->sbq_dma.dma_handle) { 3162 ql_free_phys(&rx_ring->sbq_dma.dma_handle, 3163 &rx_ring->sbq_dma.acc_handle); 3164 bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma)); 3165 } 3166 3167 /* Free the small buffer queue control blocks. */ 3168 kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len * 3169 sizeof (struct bq_desc)); 3170 rx_ring->sbq_desc = NULL; 3171 3172 /* Free the large buffer queue. */ 3173 if (rx_ring->lbq_dma.dma_handle) { 3174 ql_free_phys(&rx_ring->lbq_dma.dma_handle, 3175 &rx_ring->lbq_dma.acc_handle); 3176 bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma)); 3177 } 3178 3179 /* Free the large buffer queue control blocks. */ 3180 kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len * 3181 sizeof (struct bq_desc)); 3182 rx_ring->lbq_desc = NULL; 3183 3184 /* Free cqicb struct */ 3185 if (rx_ring->cqicb_dma.dma_handle) { 3186 ql_free_phys(&rx_ring->cqicb_dma.dma_handle, 3187 &rx_ring->cqicb_dma.acc_handle); 3188 bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma)); 3189 } 3190 /* Free the rx queue. */ 3191 if (rx_ring->cq_dma.dma_handle) { 3192 ql_free_phys(&rx_ring->cq_dma.dma_handle, 3193 &rx_ring->cq_dma.acc_handle); 3194 bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma)); 3195 } 3196 } 3197 3198 /* 3199 * Allocate queues and buffers for this completions queue based 3200 * on the values in the parameter structure. 3201 */ 3202 static int 3203 ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring) 3204 { 3205 ddi_dma_cookie_t dma_cookie; 3206 3207 if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle, 3208 &ql_desc_acc_attr, 3209 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3210 &rx_ring->cq_dma.acc_handle, 3211 (size_t)rx_ring->cq_size, /* mem size */ 3212 (size_t)128, /* alignment:128 bytes boundary */ 3213 (caddr_t *)&rx_ring->cq_dma.vaddr, 3214 &dma_cookie) != 0) { 3215 bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma)); 3216 cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.", 3217 __func__, qlge->instance); 3218 return (DDI_FAILURE); 3219 } 3220 rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress; 3221 3222 if (rx_ring->sbq_len != 0) { 3223 /* 3224 * Allocate small buffer queue. 3225 */ 3226 if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle, 3227 &ql_desc_acc_attr, 3228 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3229 &rx_ring->sbq_dma.acc_handle, 3230 (size_t)rx_ring->sbq_size, /* mem size */ 3231 (size_t)128, /* alignment:128 bytes boundary */ 3232 (caddr_t *)&rx_ring->sbq_dma.vaddr, 3233 &dma_cookie) != 0) { 3234 bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma)); 3235 cmn_err(CE_WARN, 3236 "%s(%d): small buffer queue allocation failed.", 3237 __func__, qlge->instance); 3238 goto err_mem; 3239 } 3240 rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress; 3241 3242 /* 3243 * Allocate small buffer queue control blocks. 3244 */ 3245 rx_ring->sbq_desc = 3246 kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc), 3247 KM_NOSLEEP); 3248 if (rx_ring->sbq_desc == NULL) { 3249 cmn_err(CE_WARN, 3250 "sbq control block allocation failed."); 3251 goto err_mem; 3252 } 3253 3254 ql_init_sbq_ring(rx_ring); 3255 } 3256 3257 if (rx_ring->lbq_len != 0) { 3258 /* 3259 * Allocate large buffer queue. 3260 */ 3261 if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle, 3262 &ql_desc_acc_attr, 3263 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3264 &rx_ring->lbq_dma.acc_handle, 3265 (size_t)rx_ring->lbq_size, /* mem size */ 3266 (size_t)128, /* alignment:128 bytes boundary */ 3267 (caddr_t *)&rx_ring->lbq_dma.vaddr, 3268 &dma_cookie) != 0) { 3269 bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma)); 3270 cmn_err(CE_WARN, "%s(%d): lbq allocation failed.", 3271 __func__, qlge->instance); 3272 goto err_mem; 3273 } 3274 rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress; 3275 3276 /* 3277 * Allocate large buffer queue control blocks. 3278 */ 3279 rx_ring->lbq_desc = 3280 kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc), 3281 KM_NOSLEEP); 3282 if (rx_ring->lbq_desc == NULL) { 3283 cmn_err(CE_WARN, 3284 "Large buffer queue control block allocation " 3285 "failed."); 3286 goto err_mem; 3287 } 3288 ql_init_lbq_ring(rx_ring); 3289 } 3290 3291 if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle, 3292 &ql_desc_acc_attr, 3293 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3294 &rx_ring->cqicb_dma.acc_handle, 3295 (size_t)sizeof (struct cqicb_t), /* mem size */ 3296 (size_t)0, /* alignment:128 bytes boundary */ 3297 (caddr_t *)&rx_ring->cqicb_dma.vaddr, 3298 &dma_cookie) != 0) { 3299 bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma)); 3300 cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.", 3301 __func__, qlge->instance); 3302 return (DDI_FAILURE); 3303 } 3304 rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress; 3305 3306 return (DDI_SUCCESS); 3307 3308 err_mem: 3309 ql_free_rx_resources(rx_ring); 3310 return (DDI_FAILURE); 3311 } 3312 3313 /* 3314 * Frees tx/rx queues memory resources 3315 */ 3316 static void 3317 ql_free_mem_resources(qlge_t *qlge) 3318 { 3319 int i; 3320 3321 if (qlge->ricb_dma.dma_handle) { 3322 /* free the ricb struct */ 3323 ql_free_phys(&qlge->ricb_dma.dma_handle, 3324 &qlge->ricb_dma.acc_handle); 3325 bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma)); 3326 } 3327 3328 ql_free_rx_buffers(qlge); 3329 3330 ql_free_ioctl_dma_buf(qlge); 3331 3332 for (i = 0; i < qlge->tx_ring_count; i++) 3333 ql_free_tx_resources(&qlge->tx_ring[i]); 3334 3335 for (i = 0; i < qlge->rx_ring_count; i++) 3336 ql_free_rx_resources(&qlge->rx_ring[i]); 3337 3338 ql_free_shadow_space(qlge); 3339 } 3340 3341 /* 3342 * Allocate buffer queues, large buffers and small buffers etc 3343 * 3344 * This API is called in the gld_attach member function. It is called 3345 * only once. Later reset,reboot should not re-allocate all rings and 3346 * buffers. 3347 */ 3348 static int 3349 ql_alloc_mem_resources(qlge_t *qlge) 3350 { 3351 int i; 3352 ddi_dma_cookie_t dma_cookie; 3353 3354 /* Allocate space for our shadow registers */ 3355 if (ql_alloc_shadow_space(qlge)) 3356 return (DDI_FAILURE); 3357 3358 for (i = 0; i < qlge->rx_ring_count; i++) { 3359 if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) { 3360 cmn_err(CE_WARN, "RX resource allocation failed."); 3361 goto err_mem; 3362 } 3363 } 3364 /* Allocate tx queue resources */ 3365 for (i = 0; i < qlge->tx_ring_count; i++) { 3366 if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) { 3367 cmn_err(CE_WARN, "Tx resource allocation failed."); 3368 goto err_mem; 3369 } 3370 } 3371 3372 if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) { 3373 goto err_mem; 3374 } 3375 3376 if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) { 3377 cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed", 3378 __func__, qlge->instance); 3379 goto err_mem; 3380 } 3381 3382 qlge->sequence |= INIT_ALLOC_RX_BUF; 3383 3384 if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle, 3385 &ql_desc_acc_attr, 3386 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 3387 &qlge->ricb_dma.acc_handle, 3388 (size_t)sizeof (struct ricb), /* mem size */ 3389 (size_t)0, /* alignment:128 bytes boundary */ 3390 (caddr_t *)&qlge->ricb_dma.vaddr, 3391 &dma_cookie) != 0) { 3392 bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma)); 3393 cmn_err(CE_WARN, "%s(%d): ricb allocation failed.", 3394 __func__, qlge->instance); 3395 return (DDI_FAILURE); 3396 } 3397 qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress; 3398 3399 return (DDI_SUCCESS); 3400 3401 err_mem: 3402 ql_free_mem_resources(qlge); 3403 return (DDI_FAILURE); 3404 } 3405 3406 3407 /* 3408 * Function used to allocate physical memory and zero it. 3409 */ 3410 3411 static int 3412 ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle, 3413 ddi_device_acc_attr_t *device_acc_attr, 3414 uint_t dma_flags, 3415 ddi_acc_handle_t *acc_handle, 3416 size_t size, 3417 size_t alignment, 3418 caddr_t *vaddr, 3419 ddi_dma_cookie_t *dma_cookie) 3420 { 3421 size_t rlen; 3422 uint_t cnt; 3423 3424 /* 3425 * Workaround for SUN XMITS buffer must end and start on 8 byte 3426 * boundary. Else, hardware will overrun the buffer. Simple fix is 3427 * to make sure buffer has enough room for overrun. 3428 */ 3429 if (size & 7) { 3430 size += 8 - (size & 7); 3431 } 3432 3433 /* Adjust the alignment if requested */ 3434 if (alignment) { 3435 dma_attr.dma_attr_align = alignment; 3436 } 3437 3438 /* 3439 * Allocate DMA handle 3440 */ 3441 if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_SLEEP, NULL, 3442 dma_handle) != DDI_SUCCESS) { 3443 cmn_err(CE_WARN, QL_BANG "%s: ddi_dma_alloc_handle FAILED", 3444 __func__); 3445 return (QL_ERROR); 3446 } 3447 /* 3448 * Allocate DMA memory 3449 */ 3450 if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr, 3451 dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING), DDI_DMA_SLEEP, 3452 NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) { 3453 ddi_dma_free_handle(dma_handle); 3454 } 3455 if (vaddr == NULL) { 3456 cmn_err(CE_WARN, "alloc_phys: Memory alloc Failed"); 3457 ddi_dma_free_handle(dma_handle); 3458 return (QL_ERROR); 3459 } 3460 3461 if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen, 3462 dma_flags, DDI_DMA_SLEEP, NULL, 3463 dma_cookie, &cnt) != DDI_DMA_MAPPED) { 3464 ddi_dma_mem_free(acc_handle); 3465 3466 ddi_dma_free_handle(dma_handle); 3467 cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED", 3468 __func__); 3469 return (QL_ERROR); 3470 } 3471 3472 if (cnt != 1) { 3473 3474 ql_free_phys(dma_handle, acc_handle); 3475 3476 cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count", 3477 __func__); 3478 return (QL_ERROR); 3479 } 3480 3481 bzero((caddr_t)*vaddr, rlen); 3482 3483 return (0); 3484 } 3485 3486 /* 3487 * Add interrupt handlers based on the interrupt type. 3488 * Before adding the interrupt handlers, the interrupt vectors should 3489 * have been allocated, and the rx/tx rings have also been allocated. 3490 */ 3491 static int 3492 ql_add_intr_handlers(qlge_t *qlge) 3493 { 3494 int vector = 0; 3495 int rc, i; 3496 uint32_t value; 3497 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0]; 3498 3499 switch (qlge->intr_type) { 3500 case DDI_INTR_TYPE_MSIX: 3501 /* 3502 * Add interrupt handler for rx and tx rings: vector[0 - 3503 * (qlge->intr_cnt -1)]. 3504 */ 3505 value = 0; 3506 for (vector = 0; vector < qlge->intr_cnt; vector++) { 3507 ql_atomic_set_32(&intr_ctx->irq_cnt, value); 3508 3509 /* 3510 * associate interrupt vector with interrupt handler 3511 */ 3512 rc = ddi_intr_add_handler(qlge->htable[vector], 3513 (ddi_intr_handler_t *)intr_ctx->handler, 3514 (void *)&qlge->rx_ring[vector], NULL); 3515 3516 if (rc != DDI_SUCCESS) { 3517 QL_PRINT(DBG_INIT, 3518 ("Add rx interrupt handler failed. " 3519 "return: %d, vector: %d", rc, vector)); 3520 for (vector--; vector >= 0; vector--) { 3521 (void) ddi_intr_remove_handler( 3522 qlge->htable[vector]); 3523 } 3524 return (DDI_FAILURE); 3525 } 3526 intr_ctx++; 3527 } 3528 break; 3529 3530 case DDI_INTR_TYPE_MSI: 3531 /* 3532 * Add interrupt handlers for the only vector 3533 */ 3534 ql_atomic_set_32(&intr_ctx->irq_cnt, value); 3535 3536 rc = ddi_intr_add_handler(qlge->htable[vector], 3537 ql_isr, 3538 (caddr_t)&qlge->rx_ring[0], NULL); 3539 3540 if (rc != DDI_SUCCESS) { 3541 QL_PRINT(DBG_INIT, 3542 ("Add MSI interrupt handler failed: %d\n", rc)); 3543 return (DDI_FAILURE); 3544 } 3545 break; 3546 3547 case DDI_INTR_TYPE_FIXED: 3548 /* 3549 * Add interrupt handlers for the only vector 3550 */ 3551 ql_atomic_set_32(&intr_ctx->irq_cnt, value); 3552 3553 rc = ddi_intr_add_handler(qlge->htable[vector], 3554 ql_isr, 3555 (caddr_t)&qlge->rx_ring[0], NULL); 3556 3557 if (rc != DDI_SUCCESS) { 3558 QL_PRINT(DBG_INIT, 3559 ("Add legacy interrupt handler failed: %d\n", rc)); 3560 return (DDI_FAILURE); 3561 } 3562 break; 3563 3564 default: 3565 return (DDI_FAILURE); 3566 } 3567 3568 /* Enable interrupts */ 3569 /* Block enable */ 3570 if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) { 3571 QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n", 3572 qlge->intr_cnt)); 3573 (void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt); 3574 } else { /* Non block enable */ 3575 for (i = 0; i < qlge->intr_cnt; i++) { 3576 QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d\n," 3577 "handle 0x%x\n", i, qlge->htable[i])); 3578 (void) ddi_intr_enable(qlge->htable[i]); 3579 } 3580 } 3581 qlge->sequence |= INIT_INTR_ENABLED; 3582 3583 return (DDI_SUCCESS); 3584 } 3585 3586 /* 3587 * Here we build the intr_ctx structures based on 3588 * our rx_ring count and intr vector count. 3589 * The intr_ctx structure is used to hook each vector 3590 * to possibly different handlers. 3591 */ 3592 static void 3593 ql_resolve_queues_to_irqs(qlge_t *qlge) 3594 { 3595 int i = 0; 3596 struct intr_ctx *intr_ctx = &qlge->intr_ctx[0]; 3597 3598 if (qlge->intr_type == DDI_INTR_TYPE_MSIX) { 3599 /* 3600 * Each rx_ring has its own intr_ctx since we 3601 * have separate vectors for each queue. 3602 * This only true when MSI-X is enabled. 3603 */ 3604 for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) { 3605 qlge->rx_ring[i].irq = i; 3606 intr_ctx->intr = i; 3607 intr_ctx->qlge = qlge; 3608 3609 /* 3610 * We set up each vectors enable/disable/read bits so 3611 * there's no bit/mask calculations in critical path. 3612 */ 3613 intr_ctx->intr_en_mask = 3614 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3615 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | 3616 INTR_EN_IHD | i; 3617 intr_ctx->intr_dis_mask = 3618 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3619 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | 3620 INTR_EN_IHD | i; 3621 intr_ctx->intr_read_mask = 3622 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3623 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD 3624 | i; 3625 3626 if (i == 0) { 3627 /* 3628 * Default queue handles bcast/mcast plus 3629 * async events. 3630 */ 3631 intr_ctx->handler = ql_isr; 3632 } else if (qlge->rx_ring[i].type == TX_Q) { 3633 /* 3634 * Outbound queue is for outbound completions 3635 * only. 3636 */ 3637 intr_ctx->handler = ql_msix_tx_isr; 3638 } else { 3639 /* 3640 * Inbound queues handle unicast frames only. 3641 */ 3642 intr_ctx->handler = ql_msix_rx_isr; 3643 } 3644 } 3645 } else { 3646 /* 3647 * All rx_rings use the same intr_ctx since 3648 * there is only one vector. 3649 */ 3650 intr_ctx->intr = 0; 3651 intr_ctx->qlge = qlge; 3652 /* 3653 * We set up each vectors enable/disable/read bits so 3654 * there's no bit/mask calculations in the critical path. 3655 */ 3656 intr_ctx->intr_en_mask = 3657 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3658 INTR_EN_TYPE_ENABLE; 3659 intr_ctx->intr_dis_mask = 3660 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3661 INTR_EN_TYPE_DISABLE; 3662 intr_ctx->intr_read_mask = 3663 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3664 INTR_EN_TYPE_READ; 3665 /* 3666 * Single interrupt means one handler for all rings. 3667 */ 3668 intr_ctx->handler = ql_isr; 3669 for (i = 0; i < qlge->rx_ring_count; i++) 3670 qlge->rx_ring[i].irq = 0; 3671 } 3672 } 3673 3674 3675 /* 3676 * Free allocated interrupts. 3677 */ 3678 static void 3679 ql_free_irq_vectors(qlge_t *qlge) 3680 { 3681 int i; 3682 int rc; 3683 3684 if (qlge->sequence & INIT_INTR_ENABLED) { 3685 /* Disable all interrupts */ 3686 if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) { 3687 /* Call ddi_intr_block_disable() */ 3688 (void) ddi_intr_block_disable(qlge->htable, 3689 qlge->intr_cnt); 3690 } else { 3691 for (i = 0; i < qlge->intr_cnt; i++) { 3692 (void) ddi_intr_disable(qlge->htable[i]); 3693 } 3694 } 3695 3696 qlge->sequence &= ~INIT_INTR_ENABLED; 3697 } 3698 3699 for (i = 0; i < qlge->intr_cnt; i++) { 3700 3701 if (qlge->sequence & INIT_ADD_INTERRUPT) 3702 (void) ddi_intr_remove_handler(qlge->htable[i]); 3703 3704 if (qlge->sequence & INIT_INTR_ALLOC) { 3705 rc = ddi_intr_free(qlge->htable[i]); 3706 if (rc != DDI_SUCCESS) { 3707 /* EMPTY */ 3708 QL_PRINT(DBG_INIT, ("Free intr failed: %d", 3709 rc)); 3710 } 3711 } 3712 } 3713 if (qlge->sequence & INIT_INTR_ALLOC) 3714 qlge->sequence &= ~INIT_INTR_ALLOC; 3715 3716 if (qlge->sequence & INIT_ADD_INTERRUPT) 3717 qlge->sequence &= ~INIT_ADD_INTERRUPT; 3718 3719 if (qlge->htable) { 3720 kmem_free(qlge->htable, qlge->intr_size); 3721 qlge->htable = NULL; 3722 } 3723 } 3724 3725 /* 3726 * Allocate interrupt vectors 3727 * For legacy and MSI, only 1 handle is needed. 3728 * For MSI-X, if fewer than 2 vectors are available, return failure. 3729 * Upon success, this maps the vectors to rx and tx rings for 3730 * interrupts. 3731 */ 3732 static int 3733 ql_request_irq_vectors(qlge_t *qlge, int intr_type) 3734 { 3735 dev_info_t *devinfo; 3736 uint32_t request, orig; 3737 int count, avail, actual; 3738 int minimum; 3739 int rc; 3740 3741 devinfo = qlge->dip; 3742 3743 switch (intr_type) { 3744 case DDI_INTR_TYPE_FIXED: 3745 request = 1; /* Request 1 legacy interrupt handle */ 3746 minimum = 1; 3747 QL_PRINT(DBG_INIT, ("interrupt type: legacy\n")); 3748 break; 3749 3750 case DDI_INTR_TYPE_MSI: 3751 request = 1; /* Request 1 MSI interrupt handle */ 3752 minimum = 1; 3753 QL_PRINT(DBG_INIT, ("interrupt type: MSI\n")); 3754 break; 3755 3756 case DDI_INTR_TYPE_MSIX: 3757 /* 3758 * Ideal number of vectors for the adapter is 3759 * # rss rings + tx completion rings for default completion 3760 * queue. 3761 */ 3762 request = qlge->rx_ring_count; 3763 3764 orig = request; 3765 if (request > (MAX_RX_RINGS)) 3766 request = MAX_RX_RINGS; 3767 minimum = 2; 3768 QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n")); 3769 break; 3770 3771 default: 3772 QL_PRINT(DBG_INIT, ("Invalid parameter\n")); 3773 return (DDI_FAILURE); 3774 } 3775 3776 QL_PRINT(DBG_INIT, ("interrupt handles requested: %d minimum: %d\n", 3777 request, minimum)); 3778 3779 /* 3780 * Get number of supported interrupts 3781 */ 3782 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 3783 if ((rc != DDI_SUCCESS) || (count < minimum)) { 3784 QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, " 3785 "count: %d\n", rc, count)); 3786 return (DDI_FAILURE); 3787 } 3788 QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count)); 3789 3790 /* 3791 * Get number of available interrupts 3792 */ 3793 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 3794 if ((rc != DDI_SUCCESS) || (avail < minimum)) { 3795 QL_PRINT(DBG_INIT, 3796 ("Get interrupt available number failed. Return:" 3797 " %d, available: %d\n", rc, avail)); 3798 return (DDI_FAILURE); 3799 } 3800 QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail)); 3801 3802 if (avail < request) { 3803 QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n", 3804 request, avail)); 3805 request = avail; 3806 } 3807 3808 actual = 0; 3809 qlge->intr_cnt = 0; 3810 3811 /* 3812 * Allocate an array of interrupt handles 3813 */ 3814 qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t)); 3815 qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP); 3816 3817 rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0, 3818 (int)request, &actual, DDI_INTR_ALLOC_NORMAL); 3819 if (rc != DDI_SUCCESS) { 3820 cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:" 3821 " %d, request: %d, actual: %d", 3822 __func__, qlge->instance, rc, request, actual); 3823 goto ql_intr_alloc_fail; 3824 } 3825 qlge->intr_cnt = actual; 3826 3827 qlge->sequence |= INIT_INTR_ALLOC; 3828 3829 /* 3830 * If the actual number of vectors is less than the minumum 3831 * then fail. 3832 */ 3833 if (actual < minimum) { 3834 cmn_err(CE_WARN, 3835 "Insufficient interrupt handles available: %d", actual); 3836 goto ql_intr_alloc_fail; 3837 } 3838 3839 /* 3840 * For MSI-X, actual might force us to reduce number of tx & rx rings 3841 */ 3842 if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) { 3843 if (actual < MAX_RX_RINGS) { 3844 qlge->tx_ring_count = 1; 3845 qlge->rss_ring_count = actual - 1; 3846 qlge->rx_ring_count = qlge->tx_ring_count + 3847 qlge->rss_ring_count; 3848 } 3849 } 3850 /* 3851 * Get priority for first vector, assume remaining are all the same 3852 */ 3853 rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri); 3854 if (rc != DDI_SUCCESS) { 3855 QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc)); 3856 goto ql_intr_alloc_fail; 3857 } 3858 3859 rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap); 3860 if (rc != DDI_SUCCESS) { 3861 QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc)); 3862 goto ql_intr_alloc_fail; 3863 } 3864 3865 qlge->intr_type = intr_type; 3866 3867 return (DDI_SUCCESS); 3868 3869 ql_intr_alloc_fail: 3870 ql_free_irq_vectors(qlge); 3871 3872 return (DDI_FAILURE); 3873 } 3874 3875 /* 3876 * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X, 3877 * MSI or Legacy. In MSI and Legacy modes we only support a single receive and 3878 * transmit queue. 3879 */ 3880 int 3881 ql_alloc_irqs(qlge_t *qlge) 3882 { 3883 int intr_types; 3884 int rval; 3885 3886 /* 3887 * Get supported interrupt types 3888 */ 3889 if (ddi_intr_get_supported_types(qlge->dip, &intr_types) 3890 != DDI_SUCCESS) { 3891 cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed", 3892 __func__, qlge->instance); 3893 3894 return (DDI_FAILURE); 3895 } 3896 3897 QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n", 3898 __func__, qlge->instance, intr_types)); 3899 3900 /* Install MSI-X interrupts */ 3901 if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) { 3902 QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n", 3903 __func__, qlge->instance, intr_types)); 3904 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX); 3905 if (rval == DDI_SUCCESS) { 3906 return (rval); 3907 } 3908 QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed," 3909 " trying MSI interrupts ...\n", __func__, qlge->instance)); 3910 } 3911 3912 /* 3913 * We will have 2 completion queues in MSI / Legacy mode, 3914 * Queue 0 for default completions 3915 * Queue 1 for transmit completions 3916 */ 3917 qlge->rss_ring_count = 1; /* Default completion queue (0) for all */ 3918 qlge->tx_ring_count = 1; /* Single tx completion queue */ 3919 qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count; 3920 3921 QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n", 3922 __func__, qlge->instance)); 3923 /* 3924 * Add the h/w interrupt handler and initialise mutexes 3925 */ 3926 rval = DDI_FAILURE; 3927 3928 /* 3929 * If OS supports MSIX interrupt but fails to allocate, then try 3930 * MSI interrupt. If MSI interrupt allocation fails also, then roll 3931 * back to fixed interrupt. 3932 */ 3933 if (intr_types & DDI_INTR_TYPE_MSI) { 3934 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI); 3935 if (rval == DDI_SUCCESS) { 3936 qlge->intr_type = DDI_INTR_TYPE_MSI; 3937 QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n", 3938 __func__, qlge->instance)); 3939 } 3940 } 3941 3942 /* Try Fixed interrupt Legacy mode */ 3943 if (rval != DDI_SUCCESS) { 3944 rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED); 3945 if (rval != DDI_SUCCESS) { 3946 cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt " 3947 "allocation failed", 3948 __func__, qlge->instance); 3949 } else { 3950 qlge->intr_type = DDI_INTR_TYPE_FIXED; 3951 QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n", 3952 __func__, qlge->instance)); 3953 } 3954 } 3955 3956 return (rval); 3957 } 3958 3959 static void 3960 ql_free_rx_tx_locks(qlge_t *qlge) 3961 { 3962 int i; 3963 struct rx_ring *rx_ring; 3964 struct tx_ring *tx_ring; 3965 3966 for (i = 0; i < qlge->tx_ring_count; i++) { 3967 tx_ring = &qlge->tx_ring[i]; 3968 mutex_destroy(&tx_ring->tx_lock); 3969 } 3970 3971 for (i = 0; i < qlge->rx_ring_count; i++) { 3972 rx_ring = &qlge->rx_ring[i]; 3973 mutex_destroy(&rx_ring->rx_lock); 3974 mutex_destroy(&rx_ring->sbq_lock); 3975 mutex_destroy(&rx_ring->lbq_lock); 3976 } 3977 } 3978 3979 /* 3980 * Frees all resources allocated during attach. 3981 * 3982 * Input: 3983 * dip = pointer to device information structure. 3984 * sequence = bits indicating resources to free. 3985 * 3986 * Context: 3987 * Kernel context. 3988 */ 3989 static void 3990 ql_free_resources(dev_info_t *dip, qlge_t *qlge) 3991 { 3992 3993 /* Disable driver timer */ 3994 ql_stop_timer(qlge); 3995 3996 if (qlge->sequence & INIT_MAC_REGISTERED) { 3997 (void) mac_unregister(qlge->mh); 3998 qlge->sequence &= ~INIT_MAC_REGISTERED; 3999 } 4000 4001 if (qlge->sequence & INIT_MAC_ALLOC) { 4002 /* Nothing to do, macp is already freed */ 4003 qlge->sequence &= ~INIT_MAC_ALLOC; 4004 } 4005 4006 if (qlge->sequence & INIT_PCI_CONFIG_SETUP) { 4007 pci_config_teardown(&qlge->pci_handle); 4008 qlge->sequence &= ~INIT_PCI_CONFIG_SETUP; 4009 } 4010 4011 if (qlge->sequence & INIT_ADD_INTERRUPT) { 4012 ql_free_irq_vectors(qlge); 4013 qlge->sequence &= ~INIT_ADD_INTERRUPT; 4014 } 4015 4016 if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) { 4017 (void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl); 4018 (void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl); 4019 (void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl); 4020 qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT; 4021 } 4022 4023 if (qlge->sequence & INIT_KSTATS) { 4024 ql_fini_kstats(qlge); 4025 qlge->sequence &= ~INIT_KSTATS; 4026 } 4027 4028 if (qlge->sequence & INIT_MUTEX) { 4029 mutex_destroy(&qlge->gen_mutex); 4030 mutex_destroy(&qlge->hw_mutex); 4031 mutex_destroy(&qlge->mbx_mutex); 4032 cv_destroy(&qlge->cv_mbx_intr); 4033 qlge->sequence &= ~INIT_MUTEX; 4034 } 4035 4036 if (qlge->sequence & INIT_LOCKS_CREATED) { 4037 ql_free_rx_tx_locks(qlge); 4038 qlge->sequence &= ~INIT_LOCKS_CREATED; 4039 } 4040 4041 if (qlge->sequence & INIT_MEMORY_ALLOC) { 4042 ql_free_mem_resources(qlge); 4043 qlge->sequence &= ~INIT_MEMORY_ALLOC; 4044 } 4045 4046 if (qlge->sequence & INIT_REGS_SETUP) { 4047 ddi_regs_map_free(&qlge->dev_handle); 4048 qlge->sequence &= ~INIT_REGS_SETUP; 4049 } 4050 4051 if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) { 4052 ddi_regs_map_free(&qlge->dev_doorbell_reg_handle); 4053 qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP; 4054 } 4055 4056 /* 4057 * free flash flt table that allocated in attach stage 4058 */ 4059 if ((qlge->flt.ql_flt_entry_ptr != NULL)&& 4060 (qlge->flt.header.length != 0)) { 4061 kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length); 4062 qlge->flt.ql_flt_entry_ptr = NULL; 4063 } 4064 4065 /* finally, free qlge structure */ 4066 if (qlge->sequence & INIT_SOFTSTATE_ALLOC) { 4067 kmem_free(qlge, sizeof (qlge_t)); 4068 } 4069 4070 ddi_prop_remove_all(dip); 4071 ddi_set_driver_private(dip, NULL); 4072 4073 } 4074 4075 /* 4076 * Set promiscuous mode of the driver 4077 * Caller must catch HW_LOCK 4078 */ 4079 void 4080 ql_set_promiscuous(qlge_t *qlge, int mode) 4081 { 4082 if (mode) { 4083 (void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT, 4084 RT_IDX_VALID, 1); 4085 } else { 4086 (void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT, 4087 RT_IDX_VALID, 0); 4088 } 4089 } 4090 /* 4091 * Write 'data1' to Mac Protocol Address Index Register and 4092 * 'data2' to Mac Protocol Address Data Register 4093 * Assuming that the Mac Protocol semaphore lock has been acquired. 4094 */ 4095 static int 4096 ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2) 4097 { 4098 int return_value = DDI_SUCCESS; 4099 4100 if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX, 4101 MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) { 4102 cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register " 4103 "timeout."); 4104 return_value = DDI_FAILURE; 4105 goto out; 4106 } 4107 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1); 4108 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2); 4109 out: 4110 return (return_value); 4111 } 4112 /* 4113 * Enable the 'index'ed multicast address in the host memory's multicast_list 4114 */ 4115 int 4116 ql_add_multicast_address(qlge_t *qlge, int index) 4117 { 4118 int rtn_val = DDI_FAILURE; 4119 uint32_t offset; 4120 uint32_t value1, value2; 4121 4122 /* Acquire the required semaphore */ 4123 if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) { 4124 return (rtn_val); 4125 } 4126 4127 /* Program Offset0 - lower 32 bits of the MAC address */ 4128 offset = 0; 4129 value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST | 4130 (index << 4) | offset; 4131 value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24) 4132 |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16) 4133 |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8) 4134 |(qlge->multicast_list[index].addr.ether_addr_octet[5])); 4135 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) 4136 goto out; 4137 4138 /* Program offset1: upper 16 bits of the MAC address */ 4139 offset = 1; 4140 value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST | 4141 (index<<4) | offset; 4142 value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8) 4143 |qlge->multicast_list[index].addr.ether_addr_octet[1]); 4144 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) { 4145 goto out; 4146 } 4147 rtn_val = DDI_SUCCESS; 4148 out: 4149 ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK); 4150 return (rtn_val); 4151 } 4152 4153 /* 4154 * Disable the 'index'ed multicast address in the host memory's multicast_list 4155 */ 4156 int 4157 ql_remove_multicast_address(qlge_t *qlge, int index) 4158 { 4159 int rtn_val = DDI_FAILURE; 4160 uint32_t offset; 4161 uint32_t value1, value2; 4162 4163 /* Acquire the required semaphore */ 4164 if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) { 4165 return (rtn_val); 4166 } 4167 /* Program Offset0 - lower 32 bits of the MAC address */ 4168 offset = 0; 4169 value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4); 4170 value2 = 4171 ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24) 4172 |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16) 4173 |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8) 4174 |(qlge->multicast_list[index].addr.ether_addr_octet[5])); 4175 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) { 4176 goto out; 4177 } 4178 /* Program offset1: upper 16 bits of the MAC address */ 4179 offset = 1; 4180 value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4); 4181 value2 = 0; 4182 if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) { 4183 goto out; 4184 } 4185 rtn_val = DDI_SUCCESS; 4186 out: 4187 ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK); 4188 return (rtn_val); 4189 } 4190 4191 /* 4192 * Add a new multicast address to the list of supported list 4193 * This API is called after OS called gld_set_multicast (GLDv2) 4194 * or m_multicst (GLDv3) 4195 * 4196 * Restriction: 4197 * The number of maximum multicast address is limited by hardware. 4198 */ 4199 int 4200 ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep) 4201 { 4202 uint32_t index = qlge->multicast_list_count; 4203 int rval = DDI_SUCCESS; 4204 int status; 4205 4206 if ((ep[0] & 01) == 0) { 4207 rval = EINVAL; 4208 goto exit; 4209 } 4210 4211 /* if there is an availabe space in multicast_list, then add it */ 4212 if (index < MAX_MULTICAST_LIST_SIZE) { 4213 bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet, 4214 ETHERADDRL); 4215 /* increment the total number of addresses in multicast list */ 4216 (void) ql_add_multicast_address(qlge, index); 4217 qlge->multicast_list_count++; 4218 QL_PRINT(DBG_GLD, 4219 ("%s(%d): added to index of multicast list= 0x%x, " 4220 "total %d\n", __func__, qlge->instance, index, 4221 qlge->multicast_list_count)); 4222 4223 if (index > MAX_MULTICAST_HW_SIZE) { 4224 if (!qlge->multicast_promisc) { 4225 status = ql_set_routing_reg(qlge, 4226 RT_IDX_ALLMULTI_SLOT, 4227 RT_IDX_MCAST, 1); 4228 if (status) { 4229 cmn_err(CE_WARN, 4230 "Failed to init routing reg " 4231 "for mcast promisc mode."); 4232 rval = ENOENT; 4233 goto exit; 4234 } 4235 qlge->multicast_promisc = B_TRUE; 4236 } 4237 } 4238 } else { 4239 rval = ENOENT; 4240 } 4241 exit: 4242 return (rval); 4243 } 4244 4245 /* 4246 * Remove an old multicast address from the list of supported multicast 4247 * addresses. This API is called after OS called gld_set_multicast (GLDv2) 4248 * or m_multicst (GLDv3) 4249 * The number of maximum multicast address is limited by hardware. 4250 */ 4251 int 4252 ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep) 4253 { 4254 uint32_t total = qlge->multicast_list_count; 4255 int i = 0; 4256 int rmv_index = 0; 4257 size_t length = sizeof (ql_multicast_addr); 4258 int status; 4259 4260 for (i = 0; i < total; i++) { 4261 if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) { 4262 continue; 4263 } 4264 4265 rmv_index = i; 4266 /* block move the reset of other multicast address forward */ 4267 length = ((total -1) -i) * sizeof (ql_multicast_addr); 4268 if (length > 0) { 4269 bcopy(&qlge->multicast_list[i+1], 4270 &qlge->multicast_list[i], length); 4271 } 4272 qlge->multicast_list_count--; 4273 if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) { 4274 /* 4275 * there is a deletion in multicast list table, 4276 * re-enable them 4277 */ 4278 for (i = rmv_index; i < qlge->multicast_list_count; 4279 i++) { 4280 (void) ql_add_multicast_address(qlge, i); 4281 } 4282 /* and disable the last one */ 4283 (void) ql_remove_multicast_address(qlge, i); 4284 4285 /* disable multicast promiscuous mode */ 4286 if (qlge->multicast_promisc) { 4287 status = ql_set_routing_reg(qlge, 4288 RT_IDX_ALLMULTI_SLOT, 4289 RT_IDX_MCAST, 0); 4290 if (status) { 4291 cmn_err(CE_WARN, 4292 "Failed to init routing reg for " 4293 "mcast promisc mode."); 4294 goto exit; 4295 } 4296 /* write to config register */ 4297 qlge->multicast_promisc = B_FALSE; 4298 } 4299 } 4300 break; 4301 } 4302 exit: 4303 return (DDI_SUCCESS); 4304 } 4305 4306 /* 4307 * Read a XGMAC register 4308 */ 4309 int 4310 ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val) 4311 { 4312 int rtn_val = DDI_FAILURE; 4313 4314 /* wait for XGMAC Address register RDY bit set */ 4315 if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY, 4316 BIT_SET, 10) != DDI_SUCCESS) { 4317 goto out; 4318 } 4319 /* start rx transaction */ 4320 ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT); 4321 4322 /* 4323 * wait for XGMAC Address register RDY bit set, 4324 * which indicates data is ready 4325 */ 4326 if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY, 4327 BIT_SET, 10) != DDI_SUCCESS) { 4328 goto out; 4329 } 4330 /* read data from XGAMC_DATA register */ 4331 *val = ql_read_reg(qlge, REG_XGMAC_DATA); 4332 rtn_val = DDI_SUCCESS; 4333 out: 4334 return (rtn_val); 4335 } 4336 4337 /* 4338 * Implement checksum offload for IPv4 IP packets 4339 */ 4340 static void 4341 ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp, 4342 struct ob_mac_iocb_req *mac_iocb_ptr) 4343 { 4344 struct ip *iphdr = NULL; 4345 struct ether_header *ethhdr; 4346 struct ether_vlan_header *ethvhdr; 4347 struct tcphdr *tcp_hdr; 4348 uint32_t etherType; 4349 int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len; 4350 int ip_hdr_off, tcp_udp_hdr_off, hdr_off; 4351 4352 ethhdr = (struct ether_header *)((void *)bp); 4353 ethvhdr = (struct ether_vlan_header *)((void *)bp); 4354 /* Is this vlan packet? */ 4355 if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) { 4356 mac_hdr_len = sizeof (struct ether_vlan_header); 4357 etherType = ntohs(ethvhdr->ether_type); 4358 } else { 4359 mac_hdr_len = sizeof (struct ether_header); 4360 etherType = ntohs(ethhdr->ether_type); 4361 } 4362 /* Is this IPv4 or IPv6 packet? */ 4363 if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) == 4364 IPV4_VERSION) { 4365 if (etherType == ETHERTYPE_IP /* 0800 */) { 4366 iphdr = (struct ip *)(void *)(bp+mac_hdr_len); 4367 } else { 4368 /* EMPTY */ 4369 QL_PRINT(DBG_TX, 4370 ("%s(%d) : IPv4 None IP packet type 0x%x\n", 4371 __func__, qlge->instance, etherType)); 4372 } 4373 } 4374 /* ipV4 packets */ 4375 if (iphdr != NULL) { 4376 4377 ip_hdr_len = IPH_HDR_LENGTH(iphdr); 4378 QL_PRINT(DBG_TX, 4379 ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:" 4380 " %d bytes \n", __func__, qlge->instance, ip_hdr_len)); 4381 4382 ip_hdr_off = mac_hdr_len; 4383 QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n", 4384 __func__, qlge->instance, ip_hdr_len)); 4385 4386 mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 | 4387 OB_MAC_IOCB_REQ_IPv4); 4388 4389 if (pflags & HCK_IPV4_HDRCKSUM) { 4390 QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n", 4391 __func__, qlge->instance)); 4392 mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB; 4393 mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 | 4394 OB_MAC_IOCB_REQ_IC); 4395 iphdr->ip_sum = 0; 4396 mac_iocb_ptr->hdr_off = (uint16_t) 4397 cpu_to_le16(ip_hdr_off); 4398 } 4399 if (pflags & HCK_FULLCKSUM) { 4400 if (iphdr->ip_p == IPPROTO_TCP) { 4401 tcp_hdr = 4402 (struct tcphdr *)(void *) 4403 ((uint8_t *)(void *)iphdr + ip_hdr_len); 4404 QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n", 4405 __func__, qlge->instance)); 4406 mac_iocb_ptr->opcode = 4407 OPCODE_OB_MAC_OFFLOAD_IOCB; 4408 mac_iocb_ptr->flag1 = 4409 (uint8_t)(mac_iocb_ptr->flag1 | 4410 OB_MAC_IOCB_REQ_TC); 4411 mac_iocb_ptr->flag2 = 4412 (uint8_t)(mac_iocb_ptr->flag2 | 4413 OB_MAC_IOCB_REQ_IC); 4414 iphdr->ip_sum = 0; 4415 tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len; 4416 tcp_udp_hdr_len = tcp_hdr->th_off*4; 4417 QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n", 4418 __func__, qlge->instance, tcp_udp_hdr_len)); 4419 hdr_off = ip_hdr_off; 4420 tcp_udp_hdr_off <<= 6; 4421 hdr_off |= tcp_udp_hdr_off; 4422 mac_iocb_ptr->hdr_off = 4423 (uint16_t)cpu_to_le16(hdr_off); 4424 mac_iocb_ptr->protocol_hdr_len = (uint16_t) 4425 cpu_to_le16(mac_hdr_len + ip_hdr_len + 4426 tcp_udp_hdr_len); 4427 4428 /* 4429 * if the chip is unable to do pseudo header 4430 * cksum calculation, do it in then put the 4431 * result to the data passed to the chip 4432 */ 4433 if (qlge->cfg_flags & 4434 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) { 4435 ql_pseudo_cksum((uint8_t *)iphdr); 4436 } 4437 } else if (iphdr->ip_p == IPPROTO_UDP) { 4438 QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n", 4439 __func__, qlge->instance)); 4440 mac_iocb_ptr->opcode = 4441 OPCODE_OB_MAC_OFFLOAD_IOCB; 4442 mac_iocb_ptr->flag1 = 4443 (uint8_t)(mac_iocb_ptr->flag1 | 4444 OB_MAC_IOCB_REQ_UC); 4445 mac_iocb_ptr->flag2 = 4446 (uint8_t)(mac_iocb_ptr->flag2 | 4447 OB_MAC_IOCB_REQ_IC); 4448 iphdr->ip_sum = 0; 4449 tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len; 4450 tcp_udp_hdr_len = sizeof (struct udphdr); 4451 QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n", 4452 __func__, qlge->instance, tcp_udp_hdr_len)); 4453 hdr_off = ip_hdr_off; 4454 tcp_udp_hdr_off <<= 6; 4455 hdr_off |= tcp_udp_hdr_off; 4456 mac_iocb_ptr->hdr_off = 4457 (uint16_t)cpu_to_le16(hdr_off); 4458 mac_iocb_ptr->protocol_hdr_len = (uint16_t) 4459 cpu_to_le16(mac_hdr_len + ip_hdr_len 4460 + tcp_udp_hdr_len); 4461 4462 /* 4463 * if the chip is unable to calculate pseudo 4464 * hdr cksum,do it in then put the result to 4465 * the data passed to the chip 4466 */ 4467 if (qlge->cfg_flags & 4468 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) { 4469 ql_pseudo_cksum((uint8_t *)iphdr); 4470 } 4471 } 4472 } 4473 } 4474 } 4475 4476 /* 4477 * For TSO/LSO: 4478 * MAC frame transmission with TCP large segment offload is performed in the 4479 * same way as the MAC frame transmission with checksum offload with the 4480 * exception that the maximum TCP segment size (MSS) must be specified to 4481 * allow the chip to segment the data into legal sized frames. 4482 * The host also needs to calculate a pseudo-header checksum over the 4483 * following fields: 4484 * Source IP Address, Destination IP Address, and the Protocol. 4485 * The TCP length is not included in the pseudo-header calculation. 4486 * The pseudo-header checksum is place in the TCP checksum field of the 4487 * prototype header. 4488 */ 4489 static void 4490 ql_lso_pseudo_cksum(uint8_t *buf) 4491 { 4492 uint32_t cksum; 4493 uint16_t iphl; 4494 uint16_t proto; 4495 4496 /* 4497 * Calculate the LSO pseudo-header checksum. 4498 */ 4499 iphl = (uint16_t)(4 * (buf[0] & 0xF)); 4500 cksum = proto = buf[9]; 4501 cksum += (((uint16_t)buf[12])<<8) + buf[13]; 4502 cksum += (((uint16_t)buf[14])<<8) + buf[15]; 4503 cksum += (((uint16_t)buf[16])<<8) + buf[17]; 4504 cksum += (((uint16_t)buf[18])<<8) + buf[19]; 4505 cksum = (cksum>>16) + (cksum & 0xFFFF); 4506 cksum = (cksum>>16) + (cksum & 0xFFFF); 4507 4508 /* 4509 * Point it to the TCP/UDP header, and 4510 * update the checksum field. 4511 */ 4512 buf += iphl + ((proto == IPPROTO_TCP) ? 4513 TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET); 4514 4515 *(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum); 4516 } 4517 4518 /* 4519 * Tell the hardware to do Large Send Offload (LSO) 4520 * 4521 * Some fields in ob_mac_iocb need to be set so hardware can know what is 4522 * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted 4523 * in the right place of the packet etc, thus, hardware can process the 4524 * packet correctly. 4525 */ 4526 static void 4527 ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp, 4528 struct ob_mac_iocb_req *mac_iocb_ptr) 4529 { 4530 struct ip *iphdr = NULL; 4531 struct ether_header *ethhdr; 4532 struct ether_vlan_header *ethvhdr; 4533 struct tcphdr *tcp_hdr; 4534 struct udphdr *udp_hdr; 4535 uint32_t etherType; 4536 uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len; 4537 uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off; 4538 4539 ethhdr = (struct ether_header *)(void *)bp; 4540 ethvhdr = (struct ether_vlan_header *)(void *)bp; 4541 4542 /* Is this vlan packet? */ 4543 if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) { 4544 mac_hdr_len = sizeof (struct ether_vlan_header); 4545 etherType = ntohs(ethvhdr->ether_type); 4546 } else { 4547 mac_hdr_len = sizeof (struct ether_header); 4548 etherType = ntohs(ethhdr->ether_type); 4549 } 4550 /* Is this IPv4 or IPv6 packet? */ 4551 if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) == 4552 IPV4_VERSION) { 4553 if (etherType == ETHERTYPE_IP /* 0800 */) { 4554 iphdr = (struct ip *)(void *)(bp+mac_hdr_len); 4555 } else { 4556 /* EMPTY */ 4557 QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet" 4558 " type 0x%x\n", 4559 __func__, qlge->instance, etherType)); 4560 } 4561 } 4562 4563 if (iphdr != NULL) { /* ipV4 packets */ 4564 ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr); 4565 QL_PRINT(DBG_TX, 4566 ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d" 4567 " bytes \n", __func__, qlge->instance, ip_hdr_len)); 4568 4569 ip_hdr_off = mac_hdr_len; 4570 QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n", 4571 __func__, qlge->instance, ip_hdr_len)); 4572 4573 mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 | 4574 OB_MAC_IOCB_REQ_IPv4); 4575 if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) { 4576 if (iphdr->ip_p == IPPROTO_TCP) { 4577 tcp_hdr = (struct tcphdr *)(void *) 4578 ((uint8_t *)(void *)iphdr + 4579 ip_hdr_len); 4580 QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP " 4581 "packet\n", 4582 __func__, qlge->instance)); 4583 mac_iocb_ptr->opcode = 4584 OPCODE_OB_MAC_OFFLOAD_IOCB; 4585 mac_iocb_ptr->flag1 = 4586 (uint8_t)(mac_iocb_ptr->flag1 | 4587 OB_MAC_IOCB_REQ_LSO); 4588 iphdr->ip_sum = 0; 4589 tcp_udp_hdr_off = 4590 (uint16_t)(mac_hdr_len+ip_hdr_len); 4591 tcp_udp_hdr_len = 4592 (uint16_t)(tcp_hdr->th_off*4); 4593 QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n", 4594 __func__, qlge->instance, tcp_udp_hdr_len)); 4595 hdr_off = ip_hdr_off; 4596 tcp_udp_hdr_off <<= 6; 4597 hdr_off |= tcp_udp_hdr_off; 4598 mac_iocb_ptr->hdr_off = 4599 (uint16_t)cpu_to_le16(hdr_off); 4600 mac_iocb_ptr->protocol_hdr_len = (uint16_t) 4601 cpu_to_le16(mac_hdr_len + ip_hdr_len + 4602 tcp_udp_hdr_len); 4603 mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss); 4604 4605 /* 4606 * if the chip is unable to calculate pseudo 4607 * header checksum, do it in then put the result 4608 * to the data passed to the chip 4609 */ 4610 if (qlge->cfg_flags & 4611 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) 4612 ql_lso_pseudo_cksum((uint8_t *)iphdr); 4613 } else if (iphdr->ip_p == IPPROTO_UDP) { 4614 udp_hdr = (struct udphdr *)(void *) 4615 ((uint8_t *)(void *)iphdr 4616 + ip_hdr_len); 4617 QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP " 4618 "packet\n", 4619 __func__, qlge->instance)); 4620 mac_iocb_ptr->opcode = 4621 OPCODE_OB_MAC_OFFLOAD_IOCB; 4622 mac_iocb_ptr->flag1 = 4623 (uint8_t)(mac_iocb_ptr->flag1 | 4624 OB_MAC_IOCB_REQ_LSO); 4625 iphdr->ip_sum = 0; 4626 tcp_udp_hdr_off = 4627 (uint16_t)(mac_hdr_len+ip_hdr_len); 4628 tcp_udp_hdr_len = 4629 (uint16_t)(udp_hdr->uh_ulen*4); 4630 QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n", 4631 __func__, qlge->instance, tcp_udp_hdr_len)); 4632 hdr_off = ip_hdr_off; 4633 tcp_udp_hdr_off <<= 6; 4634 hdr_off |= tcp_udp_hdr_off; 4635 mac_iocb_ptr->hdr_off = 4636 (uint16_t)cpu_to_le16(hdr_off); 4637 mac_iocb_ptr->protocol_hdr_len = (uint16_t) 4638 cpu_to_le16(mac_hdr_len + ip_hdr_len + 4639 tcp_udp_hdr_len); 4640 mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss); 4641 4642 /* 4643 * if the chip is unable to do pseudo header 4644 * checksum calculation, do it here then put the 4645 * result to the data passed to the chip 4646 */ 4647 if (qlge->cfg_flags & 4648 CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) 4649 ql_lso_pseudo_cksum((uint8_t *)iphdr); 4650 } 4651 } 4652 } 4653 } 4654 4655 /* 4656 * Generic packet sending function which is used to send one packet. 4657 */ 4658 int 4659 ql_send_common(struct tx_ring *tx_ring, mblk_t *mp) 4660 { 4661 struct tx_ring_desc *tx_cb; 4662 struct ob_mac_iocb_req *mac_iocb_ptr; 4663 mblk_t *tp; 4664 size_t msg_len = 0; 4665 size_t off; 4666 caddr_t bp; 4667 size_t nbyte, total_len; 4668 uint_t i = 0; 4669 int j = 0, frags = 0; 4670 uint32_t phy_addr_low, phy_addr_high; 4671 uint64_t phys_addr; 4672 clock_t now; 4673 uint32_t pflags = 0; 4674 uint32_t mss = 0; 4675 enum tx_mode_t tx_mode; 4676 struct oal_entry *oal_entry; 4677 int status; 4678 uint_t ncookies, oal_entries, max_oal_entries; 4679 size_t max_seg_len = 0; 4680 boolean_t use_lso = B_FALSE; 4681 struct oal_entry *tx_entry = NULL; 4682 struct oal_entry *last_oal_entry; 4683 qlge_t *qlge = tx_ring->qlge; 4684 ddi_dma_cookie_t dma_cookie; 4685 size_t tx_buf_len = QL_MAX_COPY_LENGTH; 4686 int force_pullup = 0; 4687 4688 tp = mp; 4689 total_len = msg_len = 0; 4690 max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1; 4691 4692 /* Calculate number of data and segments in the incoming message */ 4693 for (tp = mp; tp != NULL; tp = tp->b_cont) { 4694 nbyte = MBLKL(tp); 4695 total_len += nbyte; 4696 max_seg_len = max(nbyte, max_seg_len); 4697 QL_PRINT(DBG_TX, ("Requested sending data in %d segments, " 4698 "total length: %d\n", frags, nbyte)); 4699 frags++; 4700 } 4701 4702 if (total_len >= QL_LSO_MAX) { 4703 freemsg(mp); 4704 #ifdef QLGE_LOAD_UNLOAD 4705 cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n", 4706 __func__, (int)total_len); 4707 #endif 4708 return (NULL); 4709 } 4710 4711 bp = (caddr_t)mp->b_rptr; 4712 if (bp[0] & 1) { 4713 if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet, 4714 ETHERADDRL) == 0) { 4715 QL_PRINT(DBG_TX, ("Broadcast packet\n")); 4716 tx_ring->brdcstxmt++; 4717 } else { 4718 QL_PRINT(DBG_TX, ("multicast packet\n")); 4719 tx_ring->multixmt++; 4720 } 4721 } 4722 4723 tx_ring->obytes += total_len; 4724 tx_ring->opackets ++; 4725 4726 QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs," 4727 " max seg len: %d\n", total_len, frags, max_seg_len)); 4728 4729 /* claim a free slot in tx ring */ 4730 tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx]; 4731 4732 /* get the tx descriptor */ 4733 mac_iocb_ptr = tx_cb->queue_entry; 4734 4735 bzero((void *)mac_iocb_ptr, sizeof (*mac_iocb_ptr)); 4736 4737 ASSERT(tx_cb->mp == NULL); 4738 4739 /* 4740 * Decide to use DMA map or copy mode. 4741 * DMA map mode must be used when the total msg length is more than the 4742 * tx buffer length. 4743 */ 4744 4745 if (total_len > tx_buf_len) 4746 tx_mode = USE_DMA; 4747 else if (max_seg_len > QL_MAX_COPY_LENGTH) 4748 tx_mode = USE_DMA; 4749 else 4750 tx_mode = USE_COPY; 4751 4752 if (qlge->chksum_cap) { 4753 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags); 4754 QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability " 4755 "is 0x%x \n", pflags, qlge->chksum_cap)); 4756 if (qlge->lso_enable) { 4757 uint32_t lso_flags = 0; 4758 mac_lso_get(mp, &mss, &lso_flags); 4759 use_lso = (lso_flags == HW_LSO); 4760 } 4761 QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n", 4762 mss, use_lso)); 4763 } 4764 4765 do_pullup: 4766 4767 /* concatenate all frags into one large packet if too fragmented */ 4768 if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) || 4769 force_pullup) { 4770 mblk_t *mp1; 4771 if ((mp1 = msgpullup(mp, -1)) != NULL) { 4772 freemsg(mp); 4773 mp = mp1; 4774 frags = 1; 4775 } else { 4776 tx_ring->tx_fail_dma_bind++; 4777 goto bad; 4778 } 4779 } 4780 4781 tx_cb->tx_bytes = (uint32_t)total_len; 4782 tx_cb->mp = mp; 4783 tx_cb->tx_dma_handle_used = 0; 4784 4785 if (tx_mode == USE_DMA) { 4786 msg_len = total_len; 4787 4788 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; 4789 mac_iocb_ptr->tid = tx_ring->prod_idx; 4790 mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len); 4791 mac_iocb_ptr->txq_idx = tx_ring->wq_id; 4792 4793 tx_entry = &mac_iocb_ptr->oal_entry[0]; 4794 oal_entry = NULL; 4795 4796 for (tp = mp, oal_entries = j = 0; tp != NULL; 4797 tp = tp->b_cont) { 4798 /* if too many tx dma handles needed */ 4799 if (j >= QL_MAX_TX_DMA_HANDLES) { 4800 tx_ring->tx_no_dma_handle++; 4801 if (!force_pullup) { 4802 force_pullup = 1; 4803 goto do_pullup; 4804 } else { 4805 goto bad; 4806 } 4807 } 4808 nbyte = (uint16_t)MBLKL(tp); 4809 if (nbyte == 0) 4810 continue; 4811 4812 status = ddi_dma_addr_bind_handle( 4813 tx_cb->tx_dma_handle[j], NULL, 4814 (caddr_t)tp->b_rptr, nbyte, 4815 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 4816 0, &dma_cookie, &ncookies); 4817 4818 QL_PRINT(DBG_TX, ("map sending data segment: %d, " 4819 "length: %d, spans in %d cookies\n", 4820 j, nbyte, ncookies)); 4821 4822 if (status != DDI_DMA_MAPPED) { 4823 goto bad; 4824 } 4825 /* 4826 * Each fragment can span several cookies. One cookie 4827 * will use one tx descriptor to transmit. 4828 */ 4829 for (i = ncookies; i > 0; i--, tx_entry++, 4830 oal_entries++) { 4831 /* 4832 * The number of TX descriptors that can be 4833 * saved in tx iocb and oal list is limited 4834 */ 4835 if (oal_entries > max_oal_entries) { 4836 tx_ring->tx_no_dma_cookie++; 4837 if (!force_pullup) { 4838 force_pullup = 1; 4839 goto do_pullup; 4840 } else { 4841 goto bad; 4842 } 4843 } 4844 4845 if ((oal_entries == TX_DESC_PER_IOCB) && 4846 !oal_entry) { 4847 /* 4848 * Time to switch to an oal list 4849 * The last entry should be copied 4850 * to first entry in the oal list 4851 */ 4852 oal_entry = tx_cb->oal; 4853 tx_entry = 4854 &mac_iocb_ptr->oal_entry[ 4855 TX_DESC_PER_IOCB-1]; 4856 bcopy(tx_entry, oal_entry, 4857 sizeof (*oal_entry)); 4858 4859 /* 4860 * last entry should be updated to 4861 * point to the extended oal list itself 4862 */ 4863 tx_entry->buf_addr_low = 4864 cpu_to_le32( 4865 LS_64BITS(tx_cb->oal_dma_addr)); 4866 tx_entry->buf_addr_high = 4867 cpu_to_le32( 4868 MS_64BITS(tx_cb->oal_dma_addr)); 4869 /* 4870 * Point tx_entry to the oal list 4871 * second entry 4872 */ 4873 tx_entry = &oal_entry[1]; 4874 } 4875 4876 tx_entry->buf_len = 4877 (uint32_t)cpu_to_le32(dma_cookie.dmac_size); 4878 phys_addr = dma_cookie.dmac_laddress; 4879 tx_entry->buf_addr_low = 4880 cpu_to_le32(LS_64BITS(phys_addr)); 4881 tx_entry->buf_addr_high = 4882 cpu_to_le32(MS_64BITS(phys_addr)); 4883 4884 last_oal_entry = tx_entry; 4885 4886 if (i > 1) 4887 ddi_dma_nextcookie( 4888 tx_cb->tx_dma_handle[j], 4889 &dma_cookie); 4890 } 4891 j++; 4892 } 4893 /* 4894 * if OAL is used, the last oal entry in tx iocb indicates 4895 * number of additional address/len pairs in OAL 4896 */ 4897 if (oal_entries > TX_DESC_PER_IOCB) { 4898 tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1]; 4899 tx_entry->buf_len = (uint32_t) 4900 (cpu_to_le32((sizeof (struct oal_entry) * 4901 (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY)); 4902 } 4903 last_oal_entry->buf_len = cpu_to_le32( 4904 le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY); 4905 4906 tx_cb->tx_dma_handle_used = j; 4907 QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n", 4908 j, oal_entries)); 4909 4910 bp = (caddr_t)mp->b_rptr; 4911 } 4912 if (tx_mode == USE_COPY) { 4913 bp = tx_cb->copy_buffer; 4914 off = 0; 4915 nbyte = 0; 4916 frags = 0; 4917 /* 4918 * Copy up to tx_buf_len of the transmit data 4919 * from mp to tx buffer 4920 */ 4921 for (tp = mp; tp != NULL; tp = tp->b_cont) { 4922 nbyte = MBLKL(tp); 4923 if ((off + nbyte) <= tx_buf_len) { 4924 bcopy(tp->b_rptr, &bp[off], nbyte); 4925 off += nbyte; 4926 frags ++; 4927 } 4928 } 4929 4930 msg_len = off; 4931 4932 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; 4933 mac_iocb_ptr->tid = tx_ring->prod_idx; 4934 mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len); 4935 mac_iocb_ptr->txq_idx = tx_ring->wq_id; 4936 4937 QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, " 4938 "from %d segaments\n", msg_len, frags)); 4939 4940 phys_addr = tx_cb->copy_buffer_dma_addr; 4941 phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr)); 4942 phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr)); 4943 4944 QL_DUMP(DBG_TX, "\t requested sending data:\n", 4945 (uint8_t *)tx_cb->copy_buffer, 8, total_len); 4946 4947 mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t) 4948 cpu_to_le32(msg_len | OAL_LAST_ENTRY); 4949 mac_iocb_ptr->oal_entry[0].buf_addr_low = phy_addr_low; 4950 mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high; 4951 4952 freemsg(mp); /* no need, we have copied */ 4953 tx_cb->mp = NULL; 4954 } /* End of Copy Mode */ 4955 4956 /* Do TSO/LSO on TCP packet? */ 4957 if (use_lso && mss) { 4958 ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr); 4959 } else if (pflags & qlge->chksum_cap) { 4960 /* Do checksum offloading */ 4961 ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr); 4962 } 4963 4964 /* let device know the latest outbound IOCB */ 4965 (void) ddi_dma_sync(tx_ring->wq_dma.dma_handle, 4966 (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr), 4967 (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV); 4968 4969 if (tx_mode == USE_DMA) { 4970 /* let device know the latest outbound OAL if necessary */ 4971 if (oal_entries > TX_DESC_PER_IOCB) { 4972 (void) ddi_dma_sync(tx_cb->oal_dma.dma_handle, 4973 (off_t)0, 4974 (sizeof (struct oal_entry) * 4975 (oal_entries -TX_DESC_PER_IOCB+1)), 4976 DDI_DMA_SYNC_FORDEV); 4977 } 4978 } else { /* for USE_COPY mode, tx buffer has changed */ 4979 /* let device know the latest change */ 4980 (void) ddi_dma_sync(tx_cb->oal_dma.dma_handle, 4981 /* copy buf offset */ 4982 (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS), 4983 msg_len, DDI_DMA_SYNC_FORDEV); 4984 } 4985 4986 /* save how the packet was sent */ 4987 tx_cb->tx_type = tx_mode; 4988 4989 QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries); 4990 /* reduce the number of available tx slot */ 4991 atomic_dec_32(&tx_ring->tx_free_count); 4992 4993 tx_ring->prod_idx++; 4994 if (tx_ring->prod_idx >= tx_ring->wq_len) 4995 tx_ring->prod_idx = 0; 4996 4997 now = ddi_get_lbolt(); 4998 qlge->last_tx_time = now; 4999 5000 return (DDI_SUCCESS); 5001 5002 bad: 5003 /* 5004 * if for any reason driver can not send, delete 5005 * the message pointer, mp 5006 */ 5007 now = ddi_get_lbolt(); 5008 freemsg(mp); 5009 mp = NULL; 5010 for (i = 0; i < j; i++) 5011 (void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]); 5012 5013 QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x", 5014 __func__, qlge->instance, (int)now)); 5015 5016 return (DDI_SUCCESS); 5017 } 5018 5019 5020 /* 5021 * Initializes hardware and driver software flags before the driver 5022 * is finally ready to work. 5023 */ 5024 int 5025 ql_do_start(qlge_t *qlge) 5026 { 5027 int i; 5028 struct rx_ring *rx_ring; 5029 uint16_t lbq_buf_size; 5030 int rings_done; 5031 5032 ASSERT(qlge != NULL); 5033 5034 mutex_enter(&qlge->hw_mutex); 5035 5036 /* Reset adapter */ 5037 (void) ql_asic_reset(qlge); 5038 5039 lbq_buf_size = (uint16_t) 5040 ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE); 5041 if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) { 5042 #ifdef QLGE_LOAD_UNLOAD 5043 cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n", 5044 qlge->rx_ring[0].lbq_buf_size, lbq_buf_size); 5045 #endif 5046 /* 5047 * Check if any ring has buffers still with upper layers 5048 * If buffers are pending with upper layers, we use the 5049 * existing buffers and don't reallocate new ones 5050 * Unfortunately there is no way to evict buffers from 5051 * upper layers. Using buffers with the current size may 5052 * cause slightly sub-optimal performance, but that seems 5053 * to be the easiest way to handle this situation. 5054 */ 5055 rings_done = 0; 5056 for (i = 0; i < qlge->rx_ring_count; i++) { 5057 rx_ring = &qlge->rx_ring[i]; 5058 if (rx_ring->rx_indicate == 0) 5059 rings_done++; 5060 else 5061 break; 5062 } 5063 /* 5064 * No buffers pending with upper layers; 5065 * reallocte them for new MTU size 5066 */ 5067 if (rings_done >= qlge->rx_ring_count) { 5068 /* free large buffer pool */ 5069 for (i = 0; i < qlge->rx_ring_count; i++) { 5070 rx_ring = &qlge->rx_ring[i]; 5071 if (rx_ring->type != TX_Q) { 5072 ql_free_sbq_buffers(rx_ring); 5073 ql_free_lbq_buffers(rx_ring); 5074 } 5075 } 5076 /* reallocate large buffer pool */ 5077 for (i = 0; i < qlge->rx_ring_count; i++) { 5078 rx_ring = &qlge->rx_ring[i]; 5079 if (rx_ring->type != TX_Q) { 5080 (void) ql_alloc_sbufs(qlge, rx_ring); 5081 (void) ql_alloc_lbufs(qlge, rx_ring); 5082 } 5083 } 5084 } 5085 } 5086 5087 if (ql_bringup_adapter(qlge) != DDI_SUCCESS) { 5088 cmn_err(CE_WARN, "qlge bringup adapter failed"); 5089 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED); 5090 mutex_exit(&qlge->hw_mutex); 5091 return (DDI_FAILURE); 5092 } 5093 5094 mutex_exit(&qlge->hw_mutex); 5095 5096 /* Get current link state */ 5097 qlge->port_link_state = ql_get_link_state(qlge); 5098 5099 if (qlge->port_link_state == LS_UP) { 5100 QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n", 5101 __func__, qlge->instance)); 5102 /* If driver detects a carrier on */ 5103 CARRIER_ON(qlge); 5104 } else { 5105 QL_PRINT(DBG_GLD, ("%s(%d) Link down\n", 5106 __func__, qlge->instance)); 5107 /* If driver detects a lack of carrier */ 5108 CARRIER_OFF(qlge); 5109 } 5110 qlge->mac_flags = QL_MAC_STARTED; 5111 return (DDI_SUCCESS); 5112 } 5113 5114 /* 5115 * Stop currently running driver 5116 * Driver needs to stop routing new packets to driver and wait until 5117 * all pending tx/rx buffers to be free-ed. 5118 */ 5119 int 5120 ql_do_stop(qlge_t *qlge) 5121 { 5122 int rc = DDI_FAILURE; 5123 uint32_t i, j, k; 5124 struct bq_desc *sbq_desc, *lbq_desc; 5125 struct rx_ring *rx_ring; 5126 5127 ASSERT(qlge != NULL); 5128 5129 CARRIER_OFF(qlge); 5130 5131 rc = ql_bringdown_adapter(qlge); 5132 if (rc != DDI_SUCCESS) { 5133 cmn_err(CE_WARN, "qlge bringdown adapter failed."); 5134 } else 5135 rc = DDI_SUCCESS; 5136 5137 for (k = 0; k < qlge->rx_ring_count; k++) { 5138 rx_ring = &qlge->rx_ring[k]; 5139 if (rx_ring->type != TX_Q) { 5140 j = rx_ring->lbq_use_head; 5141 #ifdef QLGE_LOAD_UNLOAD 5142 cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list" 5143 " to free list %d\n total %d\n", 5144 k, rx_ring->lbuf_in_use_count, 5145 rx_ring->lbuf_free_count, 5146 rx_ring->lbuf_in_use_count + 5147 rx_ring->lbuf_free_count); 5148 #endif 5149 for (i = 0; i < rx_ring->lbuf_in_use_count; i++) { 5150 lbq_desc = rx_ring->lbuf_in_use[j]; 5151 j++; 5152 if (j >= rx_ring->lbq_len) { 5153 j = 0; 5154 } 5155 if (lbq_desc->mp) { 5156 atomic_inc_32(&rx_ring->rx_indicate); 5157 freemsg(lbq_desc->mp); 5158 } 5159 } 5160 rx_ring->lbq_use_head = j; 5161 rx_ring->lbq_use_tail = j; 5162 rx_ring->lbuf_in_use_count = 0; 5163 j = rx_ring->sbq_use_head; 5164 #ifdef QLGE_LOAD_UNLOAD 5165 cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list," 5166 " to free list %d\n total %d \n", 5167 k, rx_ring->sbuf_in_use_count, 5168 rx_ring->sbuf_free_count, 5169 rx_ring->sbuf_in_use_count + 5170 rx_ring->sbuf_free_count); 5171 #endif 5172 for (i = 0; i < rx_ring->sbuf_in_use_count; i++) { 5173 sbq_desc = rx_ring->sbuf_in_use[j]; 5174 j++; 5175 if (j >= rx_ring->sbq_len) { 5176 j = 0; 5177 } 5178 if (sbq_desc->mp) { 5179 atomic_inc_32(&rx_ring->rx_indicate); 5180 freemsg(sbq_desc->mp); 5181 } 5182 } 5183 rx_ring->sbq_use_head = j; 5184 rx_ring->sbq_use_tail = j; 5185 rx_ring->sbuf_in_use_count = 0; 5186 } 5187 } 5188 5189 qlge->mac_flags = QL_MAC_STOPPED; 5190 5191 return (rc); 5192 } 5193 5194 /* 5195 * Support 5196 */ 5197 5198 void 5199 ql_disable_isr(qlge_t *qlge) 5200 { 5201 /* 5202 * disable the hardware interrupt 5203 */ 5204 ISP_DISABLE_GLOBAL_INTRS(qlge); 5205 5206 qlge->flags &= ~INTERRUPTS_ENABLED; 5207 } 5208 5209 5210 5211 /* 5212 * busy wait for 'usecs' microseconds. 5213 */ 5214 void 5215 qlge_delay(clock_t usecs) 5216 { 5217 drv_usecwait(usecs); 5218 } 5219 5220 /* 5221 * retrieve firmware details. 5222 */ 5223 5224 pci_cfg_t * 5225 ql_get_pci_config(qlge_t *qlge) 5226 { 5227 return (&(qlge->pci_cfg)); 5228 } 5229 5230 /* 5231 * Get current Link status 5232 */ 5233 static uint32_t 5234 ql_get_link_state(qlge_t *qlge) 5235 { 5236 uint32_t bitToCheck = 0; 5237 uint32_t temp, linkState; 5238 5239 if (qlge->func_number == qlge->fn0_net) { 5240 bitToCheck = STS_PL0; 5241 } else { 5242 bitToCheck = STS_PL1; 5243 } 5244 temp = ql_read_reg(qlge, REG_STATUS); 5245 QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n", 5246 __func__, qlge->instance, temp)); 5247 5248 if (temp & bitToCheck) { 5249 linkState = LS_UP; 5250 } else { 5251 linkState = LS_DOWN; 5252 } 5253 if (CFG_IST(qlge, CFG_CHIP_8100)) { 5254 /* for Schultz, link Speed is fixed to 10G, full duplex */ 5255 qlge->speed = SPEED_10G; 5256 qlge->duplex = 1; 5257 } 5258 return (linkState); 5259 } 5260 /* 5261 * Get current link status and report to OS 5262 */ 5263 static void 5264 ql_get_and_report_link_state(qlge_t *qlge) 5265 { 5266 uint32_t cur_link_state; 5267 5268 /* Get current link state */ 5269 cur_link_state = ql_get_link_state(qlge); 5270 /* if link state has changed */ 5271 if (cur_link_state != qlge->port_link_state) { 5272 5273 qlge->port_link_state = cur_link_state; 5274 5275 if (qlge->port_link_state == LS_UP) { 5276 QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n", 5277 __func__, qlge->instance)); 5278 /* If driver detects a carrier on */ 5279 CARRIER_ON(qlge); 5280 } else { 5281 QL_PRINT(DBG_GLD, ("%s(%d) Link down\n", 5282 __func__, qlge->instance)); 5283 /* If driver detects a lack of carrier */ 5284 CARRIER_OFF(qlge); 5285 } 5286 } 5287 } 5288 5289 /* 5290 * timer callback function executed after timer expires 5291 */ 5292 static void 5293 ql_timer(void* arg) 5294 { 5295 ql_get_and_report_link_state((qlge_t *)arg); 5296 } 5297 5298 /* 5299 * stop the running timer if activated 5300 */ 5301 static void 5302 ql_stop_timer(qlge_t *qlge) 5303 { 5304 timeout_id_t timer_id; 5305 /* Disable driver timer */ 5306 if (qlge->ql_timer_timeout_id != NULL) { 5307 timer_id = qlge->ql_timer_timeout_id; 5308 qlge->ql_timer_timeout_id = NULL; 5309 (void) untimeout(timer_id); 5310 } 5311 } 5312 5313 /* 5314 * stop then restart timer 5315 */ 5316 void 5317 ql_restart_timer(qlge_t *qlge) 5318 { 5319 ql_stop_timer(qlge); 5320 qlge->ql_timer_ticks = TICKS_PER_SEC / 4; 5321 qlge->ql_timer_timeout_id = timeout(ql_timer, 5322 (void *)qlge, qlge->ql_timer_ticks); 5323 } 5324 5325 /* ************************************************************************* */ 5326 /* 5327 * Hardware K-Stats Data Structures and Subroutines 5328 */ 5329 /* ************************************************************************* */ 5330 static const ql_ksindex_t ql_kstats_hw[] = { 5331 /* PCI related hardware information */ 5332 { 0, "Vendor Id" }, 5333 { 1, "Device Id" }, 5334 { 2, "Command" }, 5335 { 3, "Status" }, 5336 { 4, "Revision Id" }, 5337 { 5, "Cache Line Size" }, 5338 { 6, "Latency Timer" }, 5339 { 7, "Header Type" }, 5340 { 9, "I/O base addr" }, 5341 { 10, "Control Reg Base addr low" }, 5342 { 11, "Control Reg Base addr high" }, 5343 { 12, "Doorbell Reg Base addr low" }, 5344 { 13, "Doorbell Reg Base addr high" }, 5345 { 14, "Subsystem Vendor Id" }, 5346 { 15, "Subsystem Device ID" }, 5347 { 16, "PCIe Device Control" }, 5348 { 17, "PCIe Link Status" }, 5349 5350 { -1, NULL }, 5351 }; 5352 5353 /* 5354 * kstat update function for PCI registers 5355 */ 5356 static int 5357 ql_kstats_get_pci_regs(kstat_t *ksp, int flag) 5358 { 5359 qlge_t *qlge; 5360 kstat_named_t *knp; 5361 5362 if (flag != KSTAT_READ) 5363 return (EACCES); 5364 5365 qlge = ksp->ks_private; 5366 knp = ksp->ks_data; 5367 (knp++)->value.ui32 = qlge->pci_cfg.vendor_id; 5368 (knp++)->value.ui32 = qlge->pci_cfg.device_id; 5369 (knp++)->value.ui32 = qlge->pci_cfg.command; 5370 (knp++)->value.ui32 = qlge->pci_cfg.status; 5371 (knp++)->value.ui32 = qlge->pci_cfg.revision; 5372 (knp++)->value.ui32 = qlge->pci_cfg.cache_line_size; 5373 (knp++)->value.ui32 = qlge->pci_cfg.latency_timer; 5374 (knp++)->value.ui32 = qlge->pci_cfg.header_type; 5375 (knp++)->value.ui32 = qlge->pci_cfg.io_base_address; 5376 (knp++)->value.ui32 = 5377 qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower; 5378 (knp++)->value.ui32 = 5379 qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper; 5380 (knp++)->value.ui32 = 5381 qlge->pci_cfg.pci_doorbell_mem_base_address_lower; 5382 (knp++)->value.ui32 = 5383 qlge->pci_cfg.pci_doorbell_mem_base_address_upper; 5384 (knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id; 5385 (knp++)->value.ui32 = qlge->pci_cfg.sub_device_id; 5386 (knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control; 5387 (knp++)->value.ui32 = qlge->pci_cfg.link_status; 5388 5389 return (0); 5390 } 5391 5392 static const ql_ksindex_t ql_kstats_mii[] = { 5393 /* MAC/MII related hardware information */ 5394 { 0, "mtu"}, 5395 5396 { -1, NULL}, 5397 }; 5398 5399 5400 /* 5401 * kstat update function for MII related information. 5402 */ 5403 static int 5404 ql_kstats_mii_update(kstat_t *ksp, int flag) 5405 { 5406 qlge_t *qlge; 5407 kstat_named_t *knp; 5408 5409 if (flag != KSTAT_READ) 5410 return (EACCES); 5411 5412 qlge = ksp->ks_private; 5413 knp = ksp->ks_data; 5414 5415 (knp++)->value.ui32 = qlge->mtu; 5416 5417 return (0); 5418 } 5419 5420 static const ql_ksindex_t ql_kstats_reg[] = { 5421 /* Register information */ 5422 { 0, "System (0x08)" }, 5423 { 1, "Reset/Fail Over(0x0Ch" }, 5424 { 2, "Function Specific Control(0x10)" }, 5425 { 3, "Status (0x30)" }, 5426 { 4, "Intr Enable (0x34)" }, 5427 { 5, "Intr Status1 (0x3C)" }, 5428 { 6, "Error Status (0x54)" }, 5429 { 7, "XGMAC Flow Control(0x11C)" }, 5430 { 8, "XGMAC Tx Pause Frames(0x230)" }, 5431 { 9, "XGMAC Rx Pause Frames(0x388)" }, 5432 { 10, "XGMAC Rx FIFO Drop Count(0x5B8)" }, 5433 { 11, "interrupts actually allocated" }, 5434 { 12, "interrupts on rx ring 0" }, 5435 { 13, "interrupts on rx ring 1" }, 5436 { 14, "interrupts on rx ring 2" }, 5437 { 15, "interrupts on rx ring 3" }, 5438 { 16, "interrupts on rx ring 4" }, 5439 { 17, "interrupts on rx ring 5" }, 5440 { 18, "interrupts on rx ring 6" }, 5441 { 19, "interrupts on rx ring 7" }, 5442 { 20, "polls on rx ring 0" }, 5443 { 21, "polls on rx ring 1" }, 5444 { 22, "polls on rx ring 2" }, 5445 { 23, "polls on rx ring 3" }, 5446 { 24, "polls on rx ring 4" }, 5447 { 25, "polls on rx ring 5" }, 5448 { 26, "polls on rx ring 6" }, 5449 { 27, "polls on rx ring 7" }, 5450 { 28, "tx no resource on ring 0" }, 5451 { 29, "tx dma bind fail on ring 0" }, 5452 { 30, "tx dma no handle on ring 0" }, 5453 { 31, "tx dma no cookie on ring 0" }, 5454 { 32, "MPI firmware major version"}, 5455 { 33, "MPI firmware minor version"}, 5456 { 34, "MPI firmware sub version"}, 5457 5458 { -1, NULL}, 5459 }; 5460 5461 5462 /* 5463 * kstat update function for device register set 5464 */ 5465 static int 5466 ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag) 5467 { 5468 qlge_t *qlge; 5469 kstat_named_t *knp; 5470 uint32_t val32; 5471 int i = 0; 5472 struct tx_ring *tx_ring; 5473 5474 if (flag != KSTAT_READ) 5475 return (EACCES); 5476 5477 qlge = ksp->ks_private; 5478 knp = ksp->ks_data; 5479 5480 (knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM); 5481 (knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER); 5482 (knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL); 5483 (knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS); 5484 (knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE); 5485 (knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1); 5486 (knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS); 5487 5488 if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) { 5489 return (0); 5490 } 5491 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32); 5492 (knp++)->value.ui32 = val32; 5493 5494 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32); 5495 (knp++)->value.ui32 = val32; 5496 5497 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32); 5498 (knp++)->value.ui32 = val32; 5499 5500 (void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32); 5501 (knp++)->value.ui32 = val32; 5502 5503 ql_sem_unlock(qlge, qlge->xgmac_sem_mask); 5504 5505 (knp++)->value.ui32 = qlge->intr_cnt; 5506 5507 for (i = 0; i < 8; i++) { 5508 (knp++)->value.ui32 = qlge->rx_interrupts[i]; 5509 } 5510 5511 for (i = 0; i < 8; i++) { 5512 (knp++)->value.ui32 = qlge->rx_polls[i]; 5513 } 5514 5515 tx_ring = &qlge->tx_ring[0]; 5516 (knp++)->value.ui32 = tx_ring->defer; 5517 (knp++)->value.ui32 = tx_ring->tx_fail_dma_bind; 5518 (knp++)->value.ui32 = tx_ring->tx_no_dma_handle; 5519 (knp++)->value.ui32 = tx_ring->tx_no_dma_cookie; 5520 5521 (knp++)->value.ui32 = qlge->fw_version_info.major_version; 5522 (knp++)->value.ui32 = qlge->fw_version_info.minor_version; 5523 (knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version; 5524 5525 return (0); 5526 } 5527 5528 5529 static kstat_t * 5530 ql_setup_named_kstat(qlge_t *qlge, int instance, char *name, 5531 const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int)) 5532 { 5533 kstat_t *ksp; 5534 kstat_named_t *knp; 5535 char *np; 5536 int type; 5537 5538 size /= sizeof (ql_ksindex_t); 5539 ksp = kstat_create(ADAPTER_NAME, instance, name, "net", 5540 KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT); 5541 if (ksp == NULL) 5542 return (NULL); 5543 5544 ksp->ks_private = qlge; 5545 ksp->ks_update = update; 5546 for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) { 5547 switch (*np) { 5548 default: 5549 type = KSTAT_DATA_UINT32; 5550 break; 5551 case '&': 5552 np += 1; 5553 type = KSTAT_DATA_CHAR; 5554 break; 5555 } 5556 kstat_named_init(knp, np, (uint8_t)type); 5557 } 5558 kstat_install(ksp); 5559 5560 return (ksp); 5561 } 5562 5563 /* 5564 * Setup various kstat 5565 */ 5566 int 5567 ql_init_kstats(qlge_t *qlge) 5568 { 5569 /* Hardware KStats */ 5570 qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge, 5571 qlge->instance, "chip", ql_kstats_hw, 5572 sizeof (ql_kstats_hw), ql_kstats_get_pci_regs); 5573 if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) { 5574 return (DDI_FAILURE); 5575 } 5576 5577 /* MII KStats */ 5578 qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge, 5579 qlge->instance, "mii", ql_kstats_mii, 5580 sizeof (ql_kstats_mii), ql_kstats_mii_update); 5581 if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) { 5582 return (DDI_FAILURE); 5583 } 5584 5585 /* REG KStats */ 5586 qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge, 5587 qlge->instance, "reg", ql_kstats_reg, 5588 sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats); 5589 if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) { 5590 return (DDI_FAILURE); 5591 } 5592 return (DDI_SUCCESS); 5593 } 5594 5595 /* 5596 * delete all kstat 5597 */ 5598 void 5599 ql_fini_kstats(qlge_t *qlge) 5600 { 5601 int i; 5602 5603 for (i = 0; i < QL_KSTAT_COUNT; i++) { 5604 if (qlge->ql_kstats[i] != NULL) 5605 kstat_delete(qlge->ql_kstats[i]); 5606 } 5607 } 5608 5609 /* ************************************************************************* */ 5610 /* 5611 * kstat end 5612 */ 5613 /* ************************************************************************* */ 5614 5615 /* 5616 * Setup the parameters for receive and transmit rings including buffer sizes 5617 * and completion queue sizes 5618 */ 5619 static int 5620 ql_setup_rings(qlge_t *qlge) 5621 { 5622 uint8_t i; 5623 struct rx_ring *rx_ring; 5624 struct tx_ring *tx_ring; 5625 uint16_t lbq_buf_size; 5626 5627 lbq_buf_size = (uint16_t) 5628 ((qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE); 5629 5630 /* 5631 * rx_ring[0] is always the default queue. 5632 */ 5633 /* 5634 * qlge->rx_ring_count: 5635 * Total number of rx_rings. This includes a number 5636 * of outbound completion handler rx_rings, and a 5637 * number of inbound completion handler rx_rings. 5638 * rss is only enabled if we have more than 1 rx completion 5639 * queue. If we have a single rx completion queue 5640 * then all rx completions go to this queue and 5641 * the last completion queue 5642 */ 5643 5644 qlge->tx_ring_first_cq_id = qlge->rss_ring_count; 5645 5646 for (i = 0; i < qlge->tx_ring_count; i++) { 5647 tx_ring = &qlge->tx_ring[i]; 5648 bzero((void *)tx_ring, sizeof (*tx_ring)); 5649 tx_ring->qlge = qlge; 5650 tx_ring->wq_id = i; 5651 tx_ring->wq_len = qlge->tx_ring_size; 5652 tx_ring->wq_size = (uint32_t)( 5653 tx_ring->wq_len * sizeof (struct ob_mac_iocb_req)); 5654 5655 /* 5656 * The completion queue ID for the tx rings start 5657 * immediately after the last rss completion queue. 5658 */ 5659 tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id); 5660 } 5661 5662 for (i = 0; i < qlge->rx_ring_count; i++) { 5663 rx_ring = &qlge->rx_ring[i]; 5664 bzero((void *)rx_ring, sizeof (*rx_ring)); 5665 rx_ring->qlge = qlge; 5666 rx_ring->cq_id = i; 5667 if (i != 0) 5668 rx_ring->cpu = (i) % qlge->rx_ring_count; 5669 else 5670 rx_ring->cpu = 0; 5671 5672 if (i < qlge->rss_ring_count) { 5673 /* 5674 * Inbound completions (RSS) queues 5675 * Default queue is queue 0 which handles 5676 * unicast plus bcast/mcast and async events. 5677 * Other inbound queues handle unicast frames only. 5678 */ 5679 rx_ring->cq_len = qlge->rx_ring_size; 5680 rx_ring->cq_size = (uint32_t) 5681 (rx_ring->cq_len * sizeof (struct net_rsp_iocb)); 5682 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 5683 rx_ring->lbq_size = (uint32_t) 5684 (rx_ring->lbq_len * sizeof (uint64_t)); 5685 rx_ring->lbq_buf_size = lbq_buf_size; 5686 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 5687 rx_ring->sbq_size = (uint32_t) 5688 (rx_ring->sbq_len * sizeof (uint64_t)); 5689 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 5690 rx_ring->type = RX_Q; 5691 5692 QL_PRINT(DBG_GLD, 5693 ("%s(%d)Allocating rss completion queue %d " 5694 "on cpu %d\n", __func__, qlge->instance, 5695 rx_ring->cq_id, rx_ring->cpu)); 5696 } else { 5697 /* 5698 * Outbound queue handles outbound completions only 5699 */ 5700 /* outbound cq is same size as tx_ring it services. */ 5701 rx_ring->cq_len = qlge->tx_ring_size; 5702 rx_ring->cq_size = (uint32_t) 5703 (rx_ring->cq_len * sizeof (struct net_rsp_iocb)); 5704 rx_ring->lbq_len = 0; 5705 rx_ring->lbq_size = 0; 5706 rx_ring->lbq_buf_size = 0; 5707 rx_ring->sbq_len = 0; 5708 rx_ring->sbq_size = 0; 5709 rx_ring->sbq_buf_size = 0; 5710 rx_ring->type = TX_Q; 5711 5712 QL_PRINT(DBG_GLD, 5713 ("%s(%d)Allocating TX completion queue %d on" 5714 " cpu %d\n", __func__, qlge->instance, 5715 rx_ring->cq_id, rx_ring->cpu)); 5716 } 5717 } 5718 5719 return (DDI_SUCCESS); 5720 } 5721 5722 static int 5723 ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring) 5724 { 5725 struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr; 5726 void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr + 5727 (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) 5728 /* first shadow area is used by wqicb's host copy of consumer index */ 5729 + sizeof (uint64_t); 5730 uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr + 5731 (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) 5732 + sizeof (uint64_t); 5733 /* lrg/sml bufq pointers */ 5734 uint8_t *buf_q_base_reg = 5735 (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr + 5736 (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE); 5737 uint64_t buf_q_base_reg_dma = 5738 qlge->buf_q_ptr_base_addr_dma_attr.dma_addr + 5739 (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE); 5740 caddr_t doorbell_area = 5741 qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id)); 5742 int err = 0; 5743 uint16_t bq_len; 5744 uint64_t tmp; 5745 uint64_t *base_indirect_ptr; 5746 int page_entries; 5747 5748 /* Set up the shadow registers for this ring. */ 5749 rx_ring->prod_idx_sh_reg = shadow_reg; 5750 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; 5751 5752 rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg; 5753 rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma; 5754 5755 QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx," 5756 " phys_addr 0x%lx\n", __func__, rx_ring->cq_id, 5757 rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma)); 5758 5759 buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t)); 5760 buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t)); 5761 rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg; 5762 rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma; 5763 5764 /* PCI doorbell mem area + 0x00 for consumer index register */ 5765 rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area; 5766 rx_ring->cnsmr_idx = 0; 5767 *rx_ring->prod_idx_sh_reg = 0; 5768 rx_ring->curr_entry = rx_ring->cq_dma.vaddr; 5769 5770 /* PCI doorbell mem area + 0x04 for valid register */ 5771 rx_ring->valid_db_reg = (uint32_t *)(void *) 5772 ((uint8_t *)(void *)doorbell_area + 0x04); 5773 5774 /* PCI doorbell mem area + 0x18 for large buffer consumer */ 5775 rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *) 5776 ((uint8_t *)(void *)doorbell_area + 0x18); 5777 5778 /* PCI doorbell mem area + 0x1c */ 5779 rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *) 5780 ((uint8_t *)(void *)doorbell_area + 0x1c); 5781 5782 bzero((void *)cqicb, sizeof (*cqicb)); 5783 5784 cqicb->msix_vect = (uint8_t)rx_ring->irq; 5785 5786 bq_len = (uint16_t)((rx_ring->cq_len == 65536) ? 5787 (uint16_t)0 : (uint16_t)rx_ring->cq_len); 5788 cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); 5789 5790 cqicb->cq_base_addr_lo = 5791 cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr)); 5792 cqicb->cq_base_addr_hi = 5793 cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr)); 5794 5795 cqicb->prod_idx_addr_lo = 5796 cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma)); 5797 cqicb->prod_idx_addr_hi = 5798 cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma)); 5799 5800 /* 5801 * Set up the control block load flags. 5802 */ 5803 cqicb->flags = FLAGS_LC | /* Load queue base address */ 5804 FLAGS_LV | /* Load MSI-X vector */ 5805 FLAGS_LI; /* Load irq delay values */ 5806 if (rx_ring->lbq_len) { 5807 /* Load lbq values */ 5808 cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL); 5809 tmp = (uint64_t)rx_ring->lbq_dma.dma_addr; 5810 base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect; 5811 page_entries = 0; 5812 do { 5813 *base_indirect_ptr = cpu_to_le64(tmp); 5814 tmp += VM_PAGE_SIZE; 5815 base_indirect_ptr++; 5816 page_entries++; 5817 } while (page_entries < (int)( 5818 ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE))); 5819 5820 cqicb->lbq_addr_lo = 5821 cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma)); 5822 cqicb->lbq_addr_hi = 5823 cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma)); 5824 bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ? 5825 (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size); 5826 cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len); 5827 bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 : 5828 (uint16_t)rx_ring->lbq_len); 5829 cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len); 5830 rx_ring->lbq_prod_idx = 0; 5831 rx_ring->lbq_curr_idx = 0; 5832 } 5833 if (rx_ring->sbq_len) { 5834 /* Load sbq values */ 5835 cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS); 5836 tmp = (uint64_t)rx_ring->sbq_dma.dma_addr; 5837 base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect; 5838 page_entries = 0; 5839 5840 do { 5841 *base_indirect_ptr = cpu_to_le64(tmp); 5842 tmp += VM_PAGE_SIZE; 5843 base_indirect_ptr++; 5844 page_entries++; 5845 } while (page_entries < (uint32_t) 5846 (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE))); 5847 5848 cqicb->sbq_addr_lo = 5849 cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma)); 5850 cqicb->sbq_addr_hi = 5851 cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma)); 5852 cqicb->sbq_buf_size = (uint16_t) 5853 cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2)); 5854 bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ? 5855 (uint16_t)0 : (uint16_t)rx_ring->sbq_len); 5856 cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len); 5857 rx_ring->sbq_prod_idx = 0; 5858 rx_ring->sbq_curr_idx = 0; 5859 } 5860 switch (rx_ring->type) { 5861 case TX_Q: 5862 cqicb->irq_delay = (uint16_t) 5863 cpu_to_le16(qlge->tx_coalesce_usecs); 5864 cqicb->pkt_delay = (uint16_t) 5865 cpu_to_le16(qlge->tx_max_coalesced_frames); 5866 break; 5867 5868 case DEFAULT_Q: 5869 cqicb->irq_delay = 0; 5870 cqicb->pkt_delay = 0; 5871 break; 5872 5873 case RX_Q: 5874 /* 5875 * Inbound completion handling rx_rings run in 5876 * separate NAPI contexts. 5877 */ 5878 cqicb->irq_delay = (uint16_t) 5879 cpu_to_le16(qlge->rx_coalesce_usecs); 5880 cqicb->pkt_delay = (uint16_t) 5881 cpu_to_le16(qlge->rx_max_coalesced_frames); 5882 break; 5883 default: 5884 cmn_err(CE_WARN, "Invalid rx_ring->type = %d.", 5885 rx_ring->type); 5886 } 5887 QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n", 5888 rx_ring->cq_id)); 5889 /* QL_DUMP_CQICB(qlge, cqicb); */ 5890 err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr, 5891 rx_ring->cq_id); 5892 if (err) { 5893 cmn_err(CE_WARN, "Failed to load CQICB."); 5894 return (err); 5895 } 5896 5897 rx_ring->rx_packets_dropped_no_buffer = 0; 5898 rx_ring->rx_pkt_dropped_mac_unenabled = 0; 5899 rx_ring->rx_failed_sbq_allocs = 0; 5900 rx_ring->rx_failed_lbq_allocs = 0; 5901 rx_ring->rx_packets = 0; 5902 rx_ring->rx_bytes = 0; 5903 rx_ring->frame_too_long = 0; 5904 rx_ring->frame_too_short = 0; 5905 rx_ring->fcs_err = 0; 5906 5907 return (err); 5908 } 5909 5910 /* 5911 * start RSS 5912 */ 5913 static int 5914 ql_start_rss(qlge_t *qlge) 5915 { 5916 struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr; 5917 int status = 0; 5918 int i; 5919 uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id; 5920 5921 bzero((void *)ricb, sizeof (*ricb)); 5922 5923 ricb->base_cq = RSS_L4K; 5924 ricb->flags = 5925 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | 5926 RSS_RT6); 5927 ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1); 5928 5929 /* 5930 * Fill out the Indirection Table. 5931 */ 5932 for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++) 5933 hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1)); 5934 5935 (void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40); 5936 (void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16); 5937 5938 QL_PRINT(DBG_INIT, ("Initializing RSS.\n")); 5939 5940 status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0); 5941 if (status) { 5942 cmn_err(CE_WARN, "Failed to load RICB."); 5943 return (status); 5944 } 5945 5946 return (status); 5947 } 5948 5949 /* 5950 * load a tx ring control block to hw and start this ring 5951 */ 5952 static int 5953 ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring) 5954 { 5955 struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr; 5956 caddr_t doorbell_area = 5957 qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id); 5958 void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr + 5959 (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE; 5960 uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr + 5961 (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE; 5962 int err = 0; 5963 5964 /* 5965 * Assign doorbell registers for this tx_ring. 5966 */ 5967 5968 /* TX PCI doorbell mem area for tx producer index */ 5969 tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area; 5970 tx_ring->prod_idx = 0; 5971 /* TX PCI doorbell mem area + 0x04 */ 5972 tx_ring->valid_db_reg = (uint32_t *)(void *) 5973 ((uint8_t *)(void *)doorbell_area + 0x04); 5974 5975 /* 5976 * Assign shadow registers for this tx_ring. 5977 */ 5978 tx_ring->cnsmr_idx_sh_reg = shadow_reg; 5979 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; 5980 *tx_ring->cnsmr_idx_sh_reg = 0; 5981 5982 QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx," 5983 " phys_addr 0x%lx\n", 5984 __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg, 5985 tx_ring->cnsmr_idx_sh_reg_dma)); 5986 5987 wqicb->len = 5988 (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); 5989 wqicb->flags = cpu_to_le16(Q_FLAGS_LC | 5990 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); 5991 wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id); 5992 wqicb->rid = 0; 5993 wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr)); 5994 wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr)); 5995 wqicb->cnsmr_idx_addr_lo = 5996 cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma)); 5997 wqicb->cnsmr_idx_addr_hi = 5998 cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma)); 5999 6000 ql_init_tx_ring(tx_ring); 6001 /* QL_DUMP_WQICB(qlge, wqicb); */ 6002 err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr, 6003 tx_ring->wq_id); 6004 6005 if (err) { 6006 cmn_err(CE_WARN, "Failed to load WQICB."); 6007 return (err); 6008 } 6009 return (err); 6010 } 6011 6012 /* 6013 * Set up a MAC, multicast or VLAN address for the 6014 * inbound frame matching. 6015 */ 6016 int 6017 ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type, 6018 uint16_t index) 6019 { 6020 uint32_t offset = 0; 6021 int status = DDI_SUCCESS; 6022 6023 switch (type) { 6024 case MAC_ADDR_TYPE_MULTI_MAC: 6025 case MAC_ADDR_TYPE_CAM_MAC: { 6026 uint32_t cam_output; 6027 uint32_t upper = (addr[0] << 8) | addr[1]; 6028 uint32_t lower = 6029 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | 6030 (addr[5]); 6031 6032 QL_PRINT(DBG_INIT, ("Adding %s ", (type == 6033 MAC_ADDR_TYPE_MULTI_MAC) ? 6034 "MULTICAST" : "UNICAST")); 6035 QL_PRINT(DBG_INIT, 6036 ("addr %02x %02x %02x %02x %02x %02x at index %d in " 6037 "the CAM.\n", 6038 addr[0], addr[1], addr[2], addr[3], addr[4], 6039 addr[5], index)); 6040 6041 status = ql_wait_reg_rdy(qlge, 6042 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0); 6043 if (status) 6044 goto exit; 6045 /* offset 0 - lower 32 bits of the MAC address */ 6046 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX, 6047 (offset++) | 6048 (index << MAC_ADDR_IDX_SHIFT) | /* index */ 6049 type); /* type */ 6050 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower); 6051 status = ql_wait_reg_rdy(qlge, 6052 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0); 6053 if (status) 6054 goto exit; 6055 /* offset 1 - upper 16 bits of the MAC address */ 6056 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX, 6057 (offset++) | 6058 (index << MAC_ADDR_IDX_SHIFT) | /* index */ 6059 type); /* type */ 6060 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper); 6061 status = ql_wait_reg_rdy(qlge, 6062 REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0); 6063 if (status) 6064 goto exit; 6065 /* offset 2 - CQ ID associated with this MAC address */ 6066 ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX, 6067 (offset) | (index << MAC_ADDR_IDX_SHIFT) | /* index */ 6068 type); /* type */ 6069 /* 6070 * This field should also include the queue id 6071 * and possibly the function id. Right now we hardcode 6072 * the route field to NIC core. 6073 */ 6074 if (type == MAC_ADDR_TYPE_CAM_MAC) { 6075 cam_output = (CAM_OUT_ROUTE_NIC | 6076 (qlge->func_number << CAM_OUT_FUNC_SHIFT) | 6077 (0 << 6078 CAM_OUT_CQ_ID_SHIFT)); 6079 6080 /* route to NIC core */ 6081 ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, 6082 cam_output); 6083 } 6084 break; 6085 } 6086 default: 6087 cmn_err(CE_WARN, 6088 "Address type %d not yet supported.", type); 6089 status = DDI_FAILURE; 6090 } 6091 exit: 6092 return (status); 6093 } 6094 6095 /* 6096 * The NIC function for this chip has 16 routing indexes. Each one can be used 6097 * to route different frame types to various inbound queues. We send broadcast 6098 * multicast/error frames to the default queue for slow handling, 6099 * and CAM hit/RSS frames to the fast handling queues. 6100 */ 6101 static int 6102 ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable) 6103 { 6104 int status; 6105 uint32_t value = 0; 6106 6107 QL_PRINT(DBG_INIT, 6108 ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n", 6109 (enable ? "Adding" : "Removing"), 6110 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""), 6111 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""), 6112 ((index == 6113 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""), 6114 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""), 6115 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""), 6116 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""), 6117 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""), 6118 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""), 6119 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""), 6120 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""), 6121 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""), 6122 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""), 6123 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""), 6124 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""), 6125 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""), 6126 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""), 6127 (enable ? "to" : "from"))); 6128 6129 switch (mask) { 6130 case RT_IDX_CAM_HIT: 6131 value = RT_IDX_DST_CAM_Q | /* dest */ 6132 RT_IDX_TYPE_NICQ | /* type */ 6133 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */ 6134 break; 6135 6136 case RT_IDX_VALID: /* Promiscuous Mode frames. */ 6137 value = RT_IDX_DST_DFLT_Q | /* dest */ 6138 RT_IDX_TYPE_NICQ | /* type */ 6139 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */ 6140 break; 6141 6142 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ 6143 value = RT_IDX_DST_DFLT_Q | /* dest */ 6144 RT_IDX_TYPE_NICQ | /* type */ 6145 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */ 6146 break; 6147 6148 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ 6149 value = RT_IDX_DST_DFLT_Q | /* dest */ 6150 RT_IDX_TYPE_NICQ | /* type */ 6151 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */ 6152 break; 6153 6154 case RT_IDX_MCAST: /* Pass up All Multicast frames. */ 6155 value = RT_IDX_DST_CAM_Q | /* dest */ 6156 RT_IDX_TYPE_NICQ | /* type */ 6157 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */ 6158 break; 6159 6160 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ 6161 value = RT_IDX_DST_CAM_Q | /* dest */ 6162 RT_IDX_TYPE_NICQ | /* type */ 6163 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */ 6164 break; 6165 6166 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ 6167 value = RT_IDX_DST_RSS | /* dest */ 6168 RT_IDX_TYPE_NICQ | /* type */ 6169 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */ 6170 break; 6171 6172 case 0: /* Clear the E-bit on an entry. */ 6173 value = RT_IDX_DST_DFLT_Q | /* dest */ 6174 RT_IDX_TYPE_NICQ | /* type */ 6175 (index << RT_IDX_IDX_SHIFT); /* index */ 6176 break; 6177 6178 default: 6179 cmn_err(CE_WARN, "Mask type %d not yet supported.", 6180 mask); 6181 status = -EPERM; 6182 goto exit; 6183 } 6184 6185 if (value != 0) { 6186 status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0); 6187 if (status) 6188 goto exit; 6189 value |= (enable ? RT_IDX_E : 0); 6190 ql_write_reg(qlge, REG_ROUTING_INDEX, value); 6191 ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0); 6192 } 6193 6194 exit: 6195 return (status); 6196 } 6197 6198 /* 6199 * Clear all the entries in the routing table. 6200 * Caller must get semaphore in advance. 6201 */ 6202 6203 static int 6204 ql_stop_routing(qlge_t *qlge) 6205 { 6206 int status = 0; 6207 int i; 6208 /* Clear all the entries in the routing table. */ 6209 for (i = 0; i < 16; i++) { 6210 status = ql_set_routing_reg(qlge, i, 0, 0); 6211 if (status) { 6212 cmn_err(CE_WARN, "Stop routing failed. "); 6213 } 6214 } 6215 return (status); 6216 } 6217 6218 /* Initialize the frame-to-queue routing. */ 6219 static int 6220 ql_route_initialize(qlge_t *qlge) 6221 { 6222 int status = 0; 6223 6224 status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK); 6225 if (status != DDI_SUCCESS) 6226 return (status); 6227 6228 /* Clear all the entries in the routing table. */ 6229 status = ql_stop_routing(qlge); 6230 if (status) { 6231 goto exit; 6232 } 6233 status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); 6234 if (status) { 6235 cmn_err(CE_WARN, 6236 "Failed to init routing register for broadcast packets."); 6237 goto exit; 6238 } 6239 /* 6240 * If we have more than one inbound queue, then turn on RSS in the 6241 * routing block. 6242 */ 6243 if (qlge->rss_ring_count > 1) { 6244 status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT, 6245 RT_IDX_RSS_MATCH, 1); 6246 if (status) { 6247 cmn_err(CE_WARN, 6248 "Failed to init routing register for MATCH RSS " 6249 "packets."); 6250 goto exit; 6251 } 6252 } 6253 6254 status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT, 6255 RT_IDX_CAM_HIT, 1); 6256 if (status) { 6257 cmn_err(CE_WARN, 6258 "Failed to init routing register for CAM packets."); 6259 goto exit; 6260 } 6261 6262 status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT, 6263 RT_IDX_MCAST_MATCH, 1); 6264 if (status) { 6265 cmn_err(CE_WARN, 6266 "Failed to init routing register for Multicast " 6267 "packets."); 6268 } 6269 6270 exit: 6271 ql_sem_unlock(qlge, SEM_RT_IDX_MASK); 6272 return (status); 6273 } 6274 6275 /* 6276 * Initialize hardware 6277 */ 6278 static int 6279 ql_device_initialize(qlge_t *qlge) 6280 { 6281 uint32_t value, mask, required_max_frame_size; 6282 int i; 6283 int status = 0; 6284 uint16_t pause = PAUSE_MODE_DISABLED; 6285 boolean_t update_port_config = B_FALSE; 6286 /* 6287 * Set up the System register to halt on errors. 6288 */ 6289 value = SYS_EFE | SYS_FAE; 6290 mask = value << 16; 6291 ql_write_reg(qlge, REG_SYSTEM, mask | value); 6292 6293 /* Set the default queue. */ 6294 value = NIC_RCV_CFG_DFQ; 6295 mask = NIC_RCV_CFG_DFQ_MASK; 6296 6297 ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value); 6298 6299 /* Enable the MPI interrupt. */ 6300 ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16) 6301 | INTR_MASK_PI); 6302 /* Enable the function, set pagesize, enable error checking. */ 6303 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | 6304 FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024; 6305 /* Set/clear header splitting. */ 6306 if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) { 6307 value |= FSC_SH; 6308 ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE); 6309 } 6310 mask = FSC_VM_PAGESIZE_MASK | 6311 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); 6312 ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value); 6313 /* 6314 * check current port max frame size, if different from OS setting, 6315 * then we need to change 6316 */ 6317 required_max_frame_size = 6318 (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE; 6319 6320 if (ql_get_port_cfg(qlge) == DDI_SUCCESS) { 6321 /* if correct frame size but different from required size */ 6322 if (qlge->port_cfg_info.max_frame_size != 6323 required_max_frame_size) { 6324 QL_PRINT(DBG_MBX, 6325 ("update frame size, current %d, new %d\n", 6326 qlge->port_cfg_info.max_frame_size, 6327 required_max_frame_size)); 6328 qlge->port_cfg_info.max_frame_size = 6329 required_max_frame_size; 6330 update_port_config = B_TRUE; 6331 } 6332 if (qlge->port_cfg_info.link_cfg & STD_PAUSE) 6333 pause = PAUSE_MODE_STANDARD; 6334 else if (qlge->port_cfg_info.link_cfg & PP_PAUSE) 6335 pause = PAUSE_MODE_PER_PRIORITY; 6336 if (pause != qlge->pause) { 6337 update_port_config = B_TRUE; 6338 } 6339 /* 6340 * Always update port config for now to work around 6341 * a hardware bug 6342 */ 6343 update_port_config = B_TRUE; 6344 6345 /* if need to update port configuration */ 6346 if (update_port_config) 6347 (void) ql_set_port_cfg(qlge); 6348 } else 6349 cmn_err(CE_WARN, "ql_get_port_cfg failed"); 6350 6351 /* Start up the rx queues. */ 6352 for (i = 0; i < qlge->rx_ring_count; i++) { 6353 status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]); 6354 if (status) { 6355 cmn_err(CE_WARN, 6356 "Failed to start rx ring[%d]", i); 6357 return (status); 6358 } 6359 } 6360 6361 /* 6362 * If there is more than one inbound completion queue 6363 * then download a RICB to configure RSS. 6364 */ 6365 if (qlge->rss_ring_count > 1) { 6366 status = ql_start_rss(qlge); 6367 if (status) { 6368 cmn_err(CE_WARN, "Failed to start RSS."); 6369 return (status); 6370 } 6371 } 6372 6373 /* Start up the tx queues. */ 6374 for (i = 0; i < qlge->tx_ring_count; i++) { 6375 status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]); 6376 if (status) { 6377 cmn_err(CE_WARN, 6378 "Failed to start tx ring[%d]", i); 6379 return (status); 6380 } 6381 } 6382 qlge->selected_tx_ring = 0; 6383 /* Set the frame routing filter. */ 6384 status = ql_route_initialize(qlge); 6385 if (status) { 6386 cmn_err(CE_WARN, 6387 "Failed to init CAM/Routing tables."); 6388 return (status); 6389 } 6390 6391 return (status); 6392 } 6393 6394 /* 6395 * Issue soft reset to chip. 6396 */ 6397 static int 6398 ql_asic_reset(qlge_t *qlge) 6399 { 6400 uint32_t value; 6401 int max_wait_time = 3; 6402 int status = DDI_SUCCESS; 6403 6404 ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK 6405 |FUNCTION_RESET); 6406 6407 max_wait_time = 3; 6408 do { 6409 value = ql_read_reg(qlge, REG_RESET_FAILOVER); 6410 if ((value & FUNCTION_RESET) == 0) 6411 break; 6412 qlge_delay(QL_ONE_SEC_DELAY); 6413 } while ((--max_wait_time)); 6414 6415 if (max_wait_time == 0) { 6416 cmn_err(CE_WARN, 6417 "TIMEOUT!!! errored out of resetting the chip!"); 6418 status = DDI_FAILURE; 6419 } 6420 6421 return (status); 6422 } 6423 6424 /* 6425 * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in 6426 * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list 6427 * to be used by hardware. 6428 */ 6429 static void 6430 ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring) 6431 { 6432 struct bq_desc *sbq_desc; 6433 int i; 6434 uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr; 6435 uint32_t arm_count; 6436 6437 if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT) 6438 arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT); 6439 else { 6440 /* Adjust to a multiple of 16 */ 6441 arm_count = (rx_ring->sbuf_free_count / 16) * 16; 6442 #ifdef QLGE_LOAD_UNLOAD 6443 cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count); 6444 #endif 6445 } 6446 for (i = 0; i < arm_count; i++) { 6447 sbq_desc = ql_get_sbuf_from_free_list(rx_ring); 6448 if (sbq_desc == NULL) 6449 break; 6450 /* Arm asic */ 6451 *sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr); 6452 sbq_entry++; 6453 6454 /* link the descriptors to in_use_list */ 6455 ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc); 6456 rx_ring->sbq_prod_idx++; 6457 } 6458 ql_update_sbq_prod_idx(qlge, rx_ring); 6459 } 6460 6461 /* 6462 * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in 6463 * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list 6464 * to be used by hardware. 6465 */ 6466 static void 6467 ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring) 6468 { 6469 struct bq_desc *lbq_desc; 6470 int i; 6471 uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr; 6472 uint32_t arm_count; 6473 6474 if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT) 6475 arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT); 6476 else { 6477 /* Adjust to a multiple of 16 */ 6478 arm_count = (rx_ring->lbuf_free_count / 16) * 16; 6479 #ifdef QLGE_LOAD_UNLOAD 6480 cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count); 6481 #endif 6482 } 6483 for (i = 0; i < arm_count; i++) { 6484 lbq_desc = ql_get_lbuf_from_free_list(rx_ring); 6485 if (lbq_desc == NULL) 6486 break; 6487 /* Arm asic */ 6488 *lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr); 6489 lbq_entry++; 6490 6491 /* link the descriptors to in_use_list */ 6492 ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc); 6493 rx_ring->lbq_prod_idx++; 6494 } 6495 ql_update_lbq_prod_idx(qlge, rx_ring); 6496 } 6497 6498 6499 /* 6500 * Initializes the adapter by configuring request and response queues, 6501 * allocates and ARMs small and large receive buffers to the 6502 * hardware 6503 */ 6504 static int 6505 ql_bringup_adapter(qlge_t *qlge) 6506 { 6507 int i; 6508 6509 if (ql_device_initialize(qlge) != DDI_SUCCESS) { 6510 cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed", 6511 __func__, qlge->instance); 6512 goto err_bringup; 6513 } 6514 qlge->sequence |= INIT_ADAPTER_UP; 6515 6516 #ifdef QLGE_TRACK_BUFFER_USAGE 6517 for (i = 0; i < qlge->rx_ring_count; i++) { 6518 if (qlge->rx_ring[i].type != TX_Q) { 6519 qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS; 6520 qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS; 6521 } 6522 qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES; 6523 } 6524 #endif 6525 /* Arm buffers */ 6526 for (i = 0; i < qlge->rx_ring_count; i++) { 6527 if (qlge->rx_ring[i].type != TX_Q) { 6528 ql_arm_sbuf(qlge, &qlge->rx_ring[i]); 6529 ql_arm_lbuf(qlge, &qlge->rx_ring[i]); 6530 } 6531 } 6532 6533 /* Enable work/request queues */ 6534 for (i = 0; i < qlge->tx_ring_count; i++) { 6535 if (qlge->tx_ring[i].valid_db_reg) 6536 ql_write_doorbell_reg(qlge, 6537 qlge->tx_ring[i].valid_db_reg, 6538 REQ_Q_VALID); 6539 } 6540 6541 /* Enable completion queues */ 6542 for (i = 0; i < qlge->rx_ring_count; i++) { 6543 if (qlge->rx_ring[i].valid_db_reg) 6544 ql_write_doorbell_reg(qlge, 6545 qlge->rx_ring[i].valid_db_reg, 6546 RSP_Q_VALID); 6547 } 6548 6549 for (i = 0; i < qlge->tx_ring_count; i++) { 6550 mutex_enter(&qlge->tx_ring[i].tx_lock); 6551 qlge->tx_ring[i].mac_flags = QL_MAC_STARTED; 6552 mutex_exit(&qlge->tx_ring[i].tx_lock); 6553 } 6554 6555 for (i = 0; i < qlge->rx_ring_count; i++) { 6556 mutex_enter(&qlge->rx_ring[i].rx_lock); 6557 qlge->rx_ring[i].mac_flags = QL_MAC_STARTED; 6558 mutex_exit(&qlge->rx_ring[i].rx_lock); 6559 } 6560 6561 /* This mutex will get re-acquired in enable_completion interrupt */ 6562 mutex_exit(&qlge->hw_mutex); 6563 /* Traffic can start flowing now */ 6564 ql_enable_all_completion_interrupts(qlge); 6565 mutex_enter(&qlge->hw_mutex); 6566 6567 ql_enable_global_interrupt(qlge); 6568 6569 qlge->sequence |= ADAPTER_INIT; 6570 return (DDI_SUCCESS); 6571 6572 err_bringup: 6573 (void) ql_asic_reset(qlge); 6574 return (DDI_FAILURE); 6575 } 6576 6577 /* 6578 * Initialize mutexes of each rx/tx rings 6579 */ 6580 static int 6581 ql_init_rx_tx_locks(qlge_t *qlge) 6582 { 6583 struct tx_ring *tx_ring; 6584 struct rx_ring *rx_ring; 6585 int i; 6586 6587 for (i = 0; i < qlge->tx_ring_count; i++) { 6588 tx_ring = &qlge->tx_ring[i]; 6589 mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER, 6590 DDI_INTR_PRI(qlge->intr_pri)); 6591 } 6592 6593 for (i = 0; i < qlge->rx_ring_count; i++) { 6594 rx_ring = &qlge->rx_ring[i]; 6595 mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER, 6596 DDI_INTR_PRI(qlge->intr_pri)); 6597 mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER, 6598 DDI_INTR_PRI(qlge->intr_pri)); 6599 mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER, 6600 DDI_INTR_PRI(qlge->intr_pri)); 6601 } 6602 6603 return (DDI_SUCCESS); 6604 } 6605 6606 /* 6607 * ql_attach - Driver attach. 6608 */ 6609 static int 6610 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 6611 { 6612 int instance; 6613 qlge_t *qlge; 6614 int rval; 6615 uint16_t w; 6616 mac_register_t *macp = NULL; 6617 rval = DDI_FAILURE; 6618 6619 /* first get the instance */ 6620 instance = ddi_get_instance(dip); 6621 6622 switch (cmd) { 6623 case DDI_ATTACH: 6624 /* 6625 * Check that hardware is installed in a DMA-capable slot 6626 */ 6627 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 6628 cmn_err(CE_WARN, "?%s(%d): Not installed in a " 6629 "DMA-capable slot", ADAPTER_NAME, instance); 6630 break; 6631 } 6632 6633 /* 6634 * No support for high-level interrupts 6635 */ 6636 if (ddi_intr_hilevel(dip, 0) != 0) { 6637 cmn_err(CE_WARN, "?%s(%d): No support for high-level" 6638 " intrs", ADAPTER_NAME, instance); 6639 break; 6640 } 6641 6642 /* 6643 * Allocate our per-device-instance structure 6644 */ 6645 6646 qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP); 6647 ASSERT(qlge != NULL); 6648 6649 qlge->sequence |= INIT_SOFTSTATE_ALLOC; 6650 6651 qlge->dip = dip; 6652 qlge->instance = instance; 6653 6654 /* 6655 * Setup the ISP8x00 registers address mapping to be 6656 * accessed by this particular driver. 6657 * 0x0 Configuration Space 6658 * 0x1 I/O Space 6659 * 0x2 1st Memory Space address - Control Register Set 6660 * 0x3 2nd Memory Space address - Doorbell Memory Space 6661 */ 6662 6663 w = 2; 6664 if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0, 6665 sizeof (dev_reg_t), &ql_dev_acc_attr, 6666 &qlge->dev_handle) != DDI_SUCCESS) { 6667 cmn_err(CE_WARN, "%s(%d): Unable to map device " 6668 "registers", ADAPTER_NAME, instance); 6669 ql_free_resources(dip, qlge); 6670 break; 6671 } 6672 6673 QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n", 6674 qlge->iobase)); 6675 6676 qlge->sequence |= INIT_REGS_SETUP; 6677 6678 /* map Doorbell memory space */ 6679 w = 3; 6680 if (ddi_regs_map_setup(dip, w, 6681 (caddr_t *)&qlge->doorbell_reg_iobase, 0, 6682 0x100000 /* sizeof (dev_doorbell_reg_t) */, 6683 &ql_dev_acc_attr, 6684 &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) { 6685 cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell " 6686 "registers", 6687 ADAPTER_NAME, instance); 6688 ql_free_resources(dip, qlge); 6689 break; 6690 } 6691 6692 QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n", 6693 qlge->doorbell_reg_iobase)); 6694 6695 qlge->sequence |= INIT_DOORBELL_REGS_SETUP; 6696 6697 /* 6698 * Allocate a macinfo structure for this instance 6699 */ 6700 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 6701 cmn_err(CE_WARN, "%s(%d): mac_alloc failed", 6702 __func__, instance); 6703 ql_free_resources(dip, qlge); 6704 return (NULL); 6705 } 6706 /* save adapter status to dip private data */ 6707 ddi_set_driver_private(dip, qlge); 6708 QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n", 6709 ADAPTER_NAME, instance)); 6710 6711 qlge->sequence |= INIT_MAC_ALLOC; 6712 6713 /* 6714 * Attach this instance of the device 6715 */ 6716 /* Setup PCI Local Bus Configuration resource. */ 6717 if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) { 6718 cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources", 6719 ADAPTER_NAME, instance); 6720 ql_free_resources(dip, qlge); 6721 break; 6722 } 6723 6724 qlge->sequence |= INIT_PCI_CONFIG_SETUP; 6725 6726 if (ql_init_instance(qlge) != DDI_SUCCESS) { 6727 cmn_err(CE_WARN, "%s(%d): Unable to initialize device " 6728 "instance", ADAPTER_NAME, instance); 6729 ql_free_resources(dip, qlge); 6730 break; 6731 } 6732 6733 /* Setup interrupt vectors */ 6734 if (ql_alloc_irqs(qlge) != DDI_SUCCESS) { 6735 ql_free_resources(dip, qlge); 6736 break; 6737 } 6738 qlge->sequence |= INIT_INTR_ALLOC; 6739 6740 /* Configure queues */ 6741 if (ql_setup_rings(qlge) != DDI_SUCCESS) { 6742 ql_free_resources(dip, qlge); 6743 break; 6744 } 6745 6746 qlge->sequence |= INIT_SETUP_RINGS; 6747 /* 6748 * Map queues to interrupt vectors 6749 */ 6750 ql_resolve_queues_to_irqs(qlge); 6751 /* 6752 * Add interrupt handlers 6753 */ 6754 if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) { 6755 cmn_err(CE_WARN, "Failed to add interrupt " 6756 "handlers"); 6757 ql_free_resources(dip, qlge); 6758 break; 6759 } 6760 6761 qlge->sequence |= INIT_ADD_INTERRUPT; 6762 QL_PRINT(DBG_GLD, ("%s(%d): Add interrupt handler done\n", 6763 ADAPTER_NAME, instance)); 6764 6765 /* Initialize mutex, need the interrupt priority */ 6766 (void) ql_init_rx_tx_locks(qlge); 6767 6768 qlge->sequence |= INIT_LOCKS_CREATED; 6769 6770 /* 6771 * Use a soft interrupt to do something that we do not want 6772 * to do in regular network functions or with mutexs being held 6773 */ 6774 if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl, 6775 DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge) 6776 != DDI_SUCCESS) { 6777 ql_free_resources(dip, qlge); 6778 break; 6779 } 6780 6781 if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl, 6782 DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge) 6783 != DDI_SUCCESS) { 6784 ql_free_resources(dip, qlge); 6785 break; 6786 } 6787 6788 if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl, 6789 DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge) 6790 != DDI_SUCCESS) { 6791 ql_free_resources(dip, qlge); 6792 break; 6793 } 6794 6795 qlge->sequence |= INIT_ADD_SOFT_INTERRUPT; 6796 6797 /* 6798 * mutex to protect the adapter state structure. 6799 * initialize mutexes according to the interrupt priority 6800 */ 6801 mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER, 6802 DDI_INTR_PRI(qlge->intr_pri)); 6803 mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER, 6804 DDI_INTR_PRI(qlge->intr_pri)); 6805 mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER, 6806 DDI_INTR_PRI(qlge->intr_pri)); 6807 6808 /* Mailbox wait and interrupt conditional variable. */ 6809 cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL); 6810 6811 qlge->sequence |= INIT_MUTEX; 6812 6813 /* 6814 * KStats 6815 */ 6816 if (ql_init_kstats(qlge) != DDI_SUCCESS) { 6817 cmn_err(CE_WARN, "%s(%d): KState initialization failed", 6818 ADAPTER_NAME, instance); 6819 ql_free_resources(dip, qlge); 6820 break; 6821 } 6822 qlge->sequence |= INIT_KSTATS; 6823 6824 /* 6825 * Initialize gld macinfo structure 6826 */ 6827 ql_gld3_init(qlge, macp); 6828 6829 if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) { 6830 cmn_err(CE_WARN, "%s(%d): mac_register failed", 6831 __func__, instance); 6832 ql_free_resources(dip, qlge); 6833 break; 6834 } 6835 qlge->sequence |= INIT_MAC_REGISTERED; 6836 QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n", 6837 ADAPTER_NAME, instance)); 6838 6839 mac_free(macp); 6840 macp = NULL; 6841 6842 qlge->mac_flags = QL_MAC_ATTACHED; 6843 6844 /* 6845 * Allocate memory resources 6846 */ 6847 if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) { 6848 cmn_err(CE_WARN, "%s(%d): memory allocation failed", 6849 __func__, qlge->instance); 6850 ql_free_mem_resources(qlge); 6851 ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED); 6852 return (DDI_FAILURE); 6853 } 6854 qlge->sequence |= INIT_MEMORY_ALLOC; 6855 6856 ddi_report_dev(dip); 6857 6858 rval = DDI_SUCCESS; 6859 break; 6860 /* 6861 * DDI_RESUME 6862 * When called with cmd set to DDI_RESUME, attach() must 6863 * restore the hardware state of a device (power may have been 6864 * removed from the device), allow pending requests to con- 6865 * tinue, and service new requests. In this case, the driver 6866 * must not make any assumptions about the state of the 6867 * hardware, but must restore the state of the device except 6868 * for the power level of components. 6869 * 6870 */ 6871 case DDI_RESUME: 6872 6873 if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL) 6874 return (DDI_FAILURE); 6875 6876 QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n", 6877 __func__, qlge->instance)); 6878 6879 mutex_enter(&qlge->gen_mutex); 6880 rval = ql_do_start(qlge); 6881 mutex_exit(&qlge->gen_mutex); 6882 break; 6883 6884 default: 6885 break; 6886 } 6887 return (rval); 6888 } 6889 6890 /* 6891 * Unbind all pending tx dma handles during driver bring down 6892 */ 6893 static void 6894 ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring) 6895 { 6896 struct tx_ring_desc *tx_ring_desc; 6897 int i, j; 6898 6899 if (tx_ring->wq_desc) { 6900 tx_ring_desc = tx_ring->wq_desc; 6901 for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) { 6902 for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) { 6903 if (tx_ring_desc->tx_dma_handle[j]) { 6904 (void) ddi_dma_unbind_handle( 6905 tx_ring_desc->tx_dma_handle[j]); 6906 } 6907 } 6908 tx_ring_desc->tx_dma_handle_used = 0; 6909 } /* end of for loop */ 6910 } 6911 } 6912 /* 6913 * Wait for all the packets sent to the chip to finish transmission 6914 * to prevent buffers to be unmapped before or during a transmit operation 6915 */ 6916 static int 6917 ql_wait_tx_quiesce(qlge_t *qlge) 6918 { 6919 int count = MAX_TX_WAIT_COUNT, i; 6920 int rings_done; 6921 volatile struct tx_ring *tx_ring; 6922 uint32_t consumer_idx; 6923 uint32_t producer_idx; 6924 uint32_t temp; 6925 int done = 0; 6926 int rval = DDI_FAILURE; 6927 6928 while (!done) { 6929 rings_done = 0; 6930 6931 for (i = 0; i < qlge->tx_ring_count; i++) { 6932 tx_ring = &qlge->tx_ring[i]; 6933 temp = ql_read_doorbell_reg(qlge, 6934 tx_ring->prod_idx_db_reg); 6935 producer_idx = temp & 0x0000ffff; 6936 consumer_idx = (temp >> 16); 6937 6938 /* 6939 * Get the pending iocb count, ones which have not been 6940 * pulled down by the chip 6941 */ 6942 if (producer_idx >= consumer_idx) 6943 temp = (producer_idx - consumer_idx); 6944 else 6945 temp = (tx_ring->wq_len - consumer_idx) + 6946 producer_idx; 6947 6948 if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len) 6949 rings_done++; 6950 else { 6951 done = 1; 6952 break; 6953 } 6954 } 6955 6956 /* If all the rings are done */ 6957 if (rings_done >= qlge->tx_ring_count) { 6958 #ifdef QLGE_LOAD_UNLOAD 6959 cmn_err(CE_NOTE, "%s(%d) done successfully \n", 6960 __func__, qlge->instance); 6961 #endif 6962 rval = DDI_SUCCESS; 6963 break; 6964 } 6965 6966 qlge_delay(100); 6967 6968 count--; 6969 if (!count) { 6970 6971 count = MAX_TX_WAIT_COUNT; 6972 #ifdef QLGE_LOAD_UNLOAD 6973 volatile struct rx_ring *rx_ring; 6974 cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending" 6975 " Transmits on queue %d to complete .\n", 6976 __func__, qlge->instance, 6977 (qlge->tx_ring[i].wq_len - 6978 qlge->tx_ring[i].tx_free_count), 6979 i); 6980 6981 rx_ring = &qlge->rx_ring[i+1]; 6982 temp = ql_read_doorbell_reg(qlge, 6983 rx_ring->cnsmr_idx_db_reg); 6984 consumer_idx = temp & 0x0000ffff; 6985 producer_idx = (temp >> 16); 6986 cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d," 6987 " Producer %d, Consumer %d\n", 6988 __func__, qlge->instance, 6989 i+1, 6990 producer_idx, consumer_idx); 6991 6992 temp = ql_read_doorbell_reg(qlge, 6993 tx_ring->prod_idx_db_reg); 6994 producer_idx = temp & 0x0000ffff; 6995 consumer_idx = (temp >> 16); 6996 cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d," 6997 " Producer %d, Consumer %d\n", 6998 __func__, qlge->instance, i, 6999 producer_idx, consumer_idx); 7000 #endif 7001 7002 /* For now move on */ 7003 break; 7004 } 7005 } 7006 /* Stop the request queue */ 7007 mutex_enter(&qlge->hw_mutex); 7008 for (i = 0; i < qlge->tx_ring_count; i++) { 7009 if (qlge->tx_ring[i].valid_db_reg) { 7010 ql_write_doorbell_reg(qlge, 7011 qlge->tx_ring[i].valid_db_reg, 0); 7012 } 7013 } 7014 mutex_exit(&qlge->hw_mutex); 7015 return (rval); 7016 } 7017 7018 /* 7019 * Wait for all the receives indicated to the stack to come back 7020 */ 7021 static int 7022 ql_wait_rx_complete(qlge_t *qlge) 7023 { 7024 int i; 7025 /* Disable all the completion queues */ 7026 mutex_enter(&qlge->hw_mutex); 7027 for (i = 0; i < qlge->rx_ring_count; i++) { 7028 if (qlge->rx_ring[i].valid_db_reg) { 7029 ql_write_doorbell_reg(qlge, 7030 qlge->rx_ring[i].valid_db_reg, 0); 7031 } 7032 } 7033 mutex_exit(&qlge->hw_mutex); 7034 7035 /* Wait for OS to return all rx buffers */ 7036 qlge_delay(QL_ONE_SEC_DELAY); 7037 return (DDI_SUCCESS); 7038 } 7039 7040 /* 7041 * stop the driver 7042 */ 7043 static int 7044 ql_bringdown_adapter(qlge_t *qlge) 7045 { 7046 int i; 7047 int status = DDI_SUCCESS; 7048 7049 qlge->mac_flags = QL_MAC_BRINGDOWN; 7050 if (qlge->sequence & ADAPTER_INIT) { 7051 /* stop forwarding external packets to driver */ 7052 status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK); 7053 if (status) 7054 return (status); 7055 (void) ql_stop_routing(qlge); 7056 ql_sem_unlock(qlge, SEM_RT_IDX_MASK); 7057 /* 7058 * Set the flag for receive and transmit 7059 * operations to cease 7060 */ 7061 for (i = 0; i < qlge->tx_ring_count; i++) { 7062 mutex_enter(&qlge->tx_ring[i].tx_lock); 7063 qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED; 7064 mutex_exit(&qlge->tx_ring[i].tx_lock); 7065 } 7066 7067 for (i = 0; i < qlge->rx_ring_count; i++) { 7068 mutex_enter(&qlge->rx_ring[i].rx_lock); 7069 qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED; 7070 mutex_exit(&qlge->rx_ring[i].rx_lock); 7071 } 7072 7073 /* 7074 * Need interrupts to be running while the transmit 7075 * completions are cleared. Wait for the packets 7076 * queued to the chip to be sent out 7077 */ 7078 (void) ql_wait_tx_quiesce(qlge); 7079 /* Interrupts not needed from now */ 7080 ql_disable_all_completion_interrupts(qlge); 7081 7082 mutex_enter(&qlge->hw_mutex); 7083 /* Disable Global interrupt */ 7084 ql_disable_global_interrupt(qlge); 7085 mutex_exit(&qlge->hw_mutex); 7086 7087 /* Wait for all the indicated packets to come back */ 7088 status = ql_wait_rx_complete(qlge); 7089 7090 mutex_enter(&qlge->hw_mutex); 7091 /* Reset adapter */ 7092 (void) ql_asic_reset(qlge); 7093 /* 7094 * Unbind all tx dma handles to prevent pending tx descriptors' 7095 * dma handles from being re-used. 7096 */ 7097 for (i = 0; i < qlge->tx_ring_count; i++) { 7098 ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]); 7099 } 7100 7101 qlge->sequence &= ~ADAPTER_INIT; 7102 7103 mutex_exit(&qlge->hw_mutex); 7104 } 7105 return (status); 7106 } 7107 7108 /* 7109 * ql_detach 7110 * Used to remove all the states associated with a given 7111 * instances of a device node prior to the removal of that 7112 * instance from the system. 7113 */ 7114 static int 7115 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 7116 { 7117 qlge_t *qlge; 7118 int rval; 7119 7120 rval = DDI_SUCCESS; 7121 7122 switch (cmd) { 7123 case DDI_DETACH: 7124 7125 if ((qlge = QL_GET_DEV(dip)) == NULL) 7126 return (DDI_FAILURE); 7127 rval = ql_bringdown_adapter(qlge); 7128 if (rval != DDI_SUCCESS) 7129 break; 7130 7131 qlge->mac_flags = QL_MAC_DETACH; 7132 7133 /* free memory resources */ 7134 if (qlge->sequence & INIT_MEMORY_ALLOC) { 7135 ql_free_mem_resources(qlge); 7136 qlge->sequence &= ~INIT_MEMORY_ALLOC; 7137 } 7138 ql_free_resources(dip, qlge); 7139 7140 break; 7141 7142 case DDI_SUSPEND: 7143 if ((qlge = QL_GET_DEV(dip)) == NULL) 7144 return (DDI_FAILURE); 7145 7146 mutex_enter(&qlge->gen_mutex); 7147 if ((qlge->mac_flags == QL_MAC_ATTACHED) || 7148 (qlge->mac_flags == QL_MAC_STARTED)) { 7149 (void) ql_do_stop(qlge); 7150 } 7151 qlge->mac_flags = QL_MAC_SUSPENDED; 7152 mutex_exit(&qlge->gen_mutex); 7153 7154 break; 7155 default: 7156 rval = DDI_FAILURE; 7157 break; 7158 } 7159 7160 return (rval); 7161 } 7162 7163 /* 7164 * quiesce(9E) entry point. 7165 * 7166 * This function is called when the system is single-threaded at high 7167 * PIL with preemption disabled. Therefore, this function must not be 7168 * blocked. 7169 * 7170 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 7171 */ 7172 int 7173 ql_quiesce(dev_info_t *dip) 7174 { 7175 qlge_t *qlge; 7176 int i; 7177 7178 if ((qlge = QL_GET_DEV(dip)) == NULL) 7179 return (DDI_FAILURE); 7180 7181 if (CFG_IST(qlge, CFG_CHIP_8100)) { 7182 /* stop forwarding external packets to driver */ 7183 (void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK); 7184 (void) ql_stop_routing(qlge); 7185 ql_sem_unlock(qlge, SEM_RT_IDX_MASK); 7186 /* Stop all the request queues */ 7187 for (i = 0; i < qlge->tx_ring_count; i++) { 7188 if (qlge->tx_ring[i].valid_db_reg) { 7189 ql_write_doorbell_reg(qlge, 7190 qlge->tx_ring[i].valid_db_reg, 0); 7191 } 7192 } 7193 qlge_delay(QL_ONE_SEC_DELAY/4); 7194 /* Interrupts not needed from now */ 7195 /* Disable MPI interrupt */ 7196 ql_write_reg(qlge, REG_INTERRUPT_MASK, 7197 (INTR_MASK_PI << 16)); 7198 ql_disable_global_interrupt(qlge); 7199 7200 /* Disable all the rx completion queues */ 7201 for (i = 0; i < qlge->rx_ring_count; i++) { 7202 if (qlge->rx_ring[i].valid_db_reg) { 7203 ql_write_doorbell_reg(qlge, 7204 qlge->rx_ring[i].valid_db_reg, 0); 7205 } 7206 } 7207 qlge_delay(QL_ONE_SEC_DELAY/4); 7208 qlge->mac_flags = QL_MAC_STOPPED; 7209 /* Reset adapter */ 7210 (void) ql_asic_reset(qlge); 7211 qlge_delay(100); 7212 } 7213 7214 return (DDI_SUCCESS); 7215 } 7216 7217 QL_STREAM_OPS(ql_ops, ql_attach, ql_detach); 7218 7219 /* 7220 * Loadable Driver Interface Structures. 7221 * Declare and initialize the module configuration section... 7222 */ 7223 static struct modldrv modldrv = { 7224 &mod_driverops, /* type of module: driver */ 7225 version, /* name of module */ 7226 &ql_ops /* driver dev_ops */ 7227 }; 7228 7229 static struct modlinkage modlinkage = { 7230 MODREV_1, &modldrv, NULL 7231 }; 7232 7233 /* 7234 * Loadable Module Routines 7235 */ 7236 7237 /* 7238 * _init 7239 * Initializes a loadable module. It is called before any other 7240 * routine in a loadable module. 7241 */ 7242 int 7243 _init(void) 7244 { 7245 int rval; 7246 7247 mac_init_ops(&ql_ops, ADAPTER_NAME); 7248 rval = mod_install(&modlinkage); 7249 if (rval != DDI_SUCCESS) { 7250 mac_fini_ops(&ql_ops); 7251 cmn_err(CE_WARN, "?Unable to install/attach driver '%s'", 7252 ADAPTER_NAME); 7253 } 7254 7255 return (rval); 7256 } 7257 7258 /* 7259 * _fini 7260 * Prepares a module for unloading. It is called when the system 7261 * wants to unload a module. If the module determines that it can 7262 * be unloaded, then _fini() returns the value returned by 7263 * mod_remove(). Upon successful return from _fini() no other 7264 * routine in the module will be called before _init() is called. 7265 */ 7266 int 7267 _fini(void) 7268 { 7269 int rval; 7270 7271 rval = mod_remove(&modlinkage); 7272 if (rval == DDI_SUCCESS) { 7273 mac_fini_ops(&ql_ops); 7274 } 7275 7276 return (rval); 7277 } 7278 7279 /* 7280 * _info 7281 * Returns information about loadable module. 7282 */ 7283 int 7284 _info(struct modinfo *modinfop) 7285 { 7286 return (mod_info(&modlinkage, modinfop)); 7287 } 7288