Lines Matching refs:dp

191 static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
193 static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
194 static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
196 static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
197 static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
199 static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
200 static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
201 static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
202 static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
205 static uint_t sfe_interrupt(struct gem_dev *dp);
262 #define SFE_EEPROM_DELAY(dp) \ argument
263 { (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
268 sfe_read_eeprom(struct gem_dev *dp, uint_t offset) in sfe_read_eeprom() argument
275 OUTL(dp, EROMAR, 0); in sfe_read_eeprom()
276 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
277 OUTL(dp, EROMAR, EROMAR_EESK); in sfe_read_eeprom()
278 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
288 OUTL(dp, EROMAR, EROMAR_EECS | eedi); in sfe_read_eeprom()
289 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
290 OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK); in sfe_read_eeprom()
291 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
294 OUTL(dp, EROMAR, EROMAR_EECS); in sfe_read_eeprom()
299 OUTL(dp, EROMAR, EROMAR_EECS); in sfe_read_eeprom()
300 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
301 OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK); in sfe_read_eeprom()
302 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
304 ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1); in sfe_read_eeprom()
307 OUTL(dp, EROMAR, 0); in sfe_read_eeprom()
308 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
315 sfe_get_mac_addr_dp83815(struct gem_dev *dp) in sfe_get_mac_addr_dp83815() argument
323 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_get_mac_addr_dp83815()
325 mac = dp->dev_addr.ether_addr_octet; in sfe_get_mac_addr_dp83815()
331 val = sfe_read_eeprom(dp, 0x6); in sfe_get_mac_addr_dp83815()
335 val = sfe_read_eeprom(dp, 0x7); in sfe_get_mac_addr_dp83815()
341 val = sfe_read_eeprom(dp, 0x8); in sfe_get_mac_addr_dp83815()
347 val = sfe_read_eeprom(dp, 0x9); in sfe_get_mac_addr_dp83815()
357 sfe_get_mac_addr_sis900(struct gem_dev *dp) in sfe_get_mac_addr_sis900() argument
363 mac = dp->dev_addr.ether_addr_octet; in sfe_get_mac_addr_sis900()
366 val = sfe_read_eeprom(dp, 0x8 + i); in sfe_get_mac_addr_sis900()
418 sfe_get_mac_addr_sis962(struct gem_dev *dp) in sfe_get_mac_addr_sis962() argument
426 OUTL(dp, MEAR, EROMAR_EEREQ); in sfe_get_mac_addr_sis962()
427 for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) { in sfe_get_mac_addr_sis962()
431 CONS "%s: failed to access eeprom", dp->name); in sfe_get_mac_addr_sis962()
436 ret = sfe_get_mac_addr_sis900(dp); in sfe_get_mac_addr_sis962()
439 OUTL(dp, MEAR, EROMAR_EEDONE); in sfe_get_mac_addr_sis962()
445 sfe_reset_chip_sis900(struct gem_dev *dp) in sfe_reset_chip_sis900() argument
450 struct sfe_dev *lp = dp->private; in sfe_reset_chip_sis900()
452 DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__)); in sfe_reset_chip_sis900()
460 OUTL(dp, IMR, 0); in sfe_reset_chip_sis900()
461 lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits; in sfe_reset_chip_sis900()
463 OUTLINL(dp, RFCR, 0); in sfe_reset_chip_sis900()
465 OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR); in sfe_reset_chip_sis900()
471 cmn_err(CE_WARN, "%s: chip reset timeout", dp->name); in sfe_reset_chip_sis900()
474 done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP); in sfe_reset_chip_sis900()
480 OUTL(dp, CR, lp->cr | INL(dp, CR)); in sfe_reset_chip_sis900()
485 dp->name, INL(dp, CFG), CFG_BITS_SIS900)); in sfe_reset_chip_sis900()
492 OUTL(dp, CFG, val); in sfe_reset_chip_sis900()
493 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name, in sfe_reset_chip_sis900()
494 INL(dp, CFG), CFG_BITS_SIS900)); in sfe_reset_chip_sis900()
500 sfe_reset_chip_dp83815(struct gem_dev *dp) in sfe_reset_chip_dp83815() argument
504 struct sfe_dev *lp = dp->private; in sfe_reset_chip_dp83815()
506 DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__)); in sfe_reset_chip_dp83815()
514 OUTL(dp, IMR, 0); in sfe_reset_chip_dp83815()
515 lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits; in sfe_reset_chip_dp83815()
517 OUTL(dp, RFCR, 0); in sfe_reset_chip_dp83815()
519 OUTL(dp, CR, CR_RST); in sfe_reset_chip_dp83815()
522 for (i = 0; INL(dp, CR) & CR_RST; i++) { in sfe_reset_chip_dp83815()
524 cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name); in sfe_reset_chip_dp83815()
529 DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10)); in sfe_reset_chip_dp83815()
531 OUTL(dp, CCSR, CCSR_PMESTS); in sfe_reset_chip_dp83815()
532 OUTL(dp, CCSR, 0); in sfe_reset_chip_dp83815()
536 dp->name, INL(dp, CFG), CFG_BITS_DP83815)); in sfe_reset_chip_dp83815()
537 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); in sfe_reset_chip_dp83815()
538 OUTL(dp, CFG, val | CFG_PAUSE_ADV); in sfe_reset_chip_dp83815()
539 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name, in sfe_reset_chip_dp83815()
540 INL(dp, CFG), CFG_BITS_DP83815)); in sfe_reset_chip_dp83815()
546 sfe_init_chip(struct gem_dev *dp) in sfe_init_chip() argument
555 OUTL(dp, IMR, 0); in sfe_init_chip()
560 OUTL(dp, TXDP, dp->tx_ring_dma); in sfe_init_chip()
563 OUTL(dp, RXDP, dp->rx_ring_dma); in sfe_init_chip()
569 sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr) in sfe_mcast_hash() argument
576 sfe_rxfilter_dump(struct gem_dev *dp, int start, int end) in sfe_rxfilter_dump() argument
582 cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name); in sfe_rxfilter_dump()
586 OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2); in sfe_rxfilter_dump()
587 ram[j] = INL(dp, RFDR); in sfe_rxfilter_dump()
606 sfe_set_rx_filter_dp83815(struct gem_dev *dp) in sfe_set_rx_filter_dp83815() argument
611 uint8_t *mac = dp->cur_addr.ether_addr_octet; in sfe_set_rx_filter_dp83815()
613 struct sfe_dev *lp = dp->private; in sfe_set_rx_filter_dp83815()
616 dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS)); in sfe_set_rx_filter_dp83815()
619 for (i = 0; i < dp->mc_count; i++) { in sfe_set_rx_filter_dp83815()
622 dp->name, i, in sfe_set_rx_filter_dp83815()
623 dp->mc_list[i].addr.ether_addr_octet[0], in sfe_set_rx_filter_dp83815()
624 dp->mc_list[i].addr.ether_addr_octet[1], in sfe_set_rx_filter_dp83815()
625 dp->mc_list[i].addr.ether_addr_octet[2], in sfe_set_rx_filter_dp83815()
626 dp->mc_list[i].addr.ether_addr_octet[3], in sfe_set_rx_filter_dp83815()
627 dp->mc_list[i].addr.ether_addr_octet[4], in sfe_set_rx_filter_dp83815()
628 dp->mc_list[i].addr.ether_addr_octet[5]); in sfe_set_rx_filter_dp83815()
631 if ((dp->rxmode & RXMODE_ENABLE) == 0) { in sfe_set_rx_filter_dp83815()
633 OUTL(dp, RFCR, 0); in sfe_set_rx_filter_dp83815()
640 if (dp->rxmode & RXMODE_PROMISC) { in sfe_set_rx_filter_dp83815()
643 } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) { in sfe_set_rx_filter_dp83815()
646 } else if (dp->mc_count > 4) { in sfe_set_rx_filter_dp83815()
654 for (i = 0; i < dp->mc_count; i++) { in sfe_set_rx_filter_dp83815()
655 j = dp->mc_list[i].hash >> (32 - 9); in sfe_set_rx_filter_dp83815()
665 (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT); in sfe_set_rx_filter_dp83815()
672 dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], in sfe_set_rx_filter_dp83815()
684 OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i); in sfe_set_rx_filter_dp83815()
685 OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]); in sfe_set_rx_filter_dp83815()
694 OUTL(dp, RFCR, j); in sfe_set_rx_filter_dp83815()
695 OUTL(dp, RFDR, 0); in sfe_set_rx_filter_dp83815()
700 for (j = 0; j < dp->mc_count; j++) { in sfe_set_rx_filter_dp83815()
701 mac = &dp->mc_list[j].addr.ether_addr_octet[0]; in sfe_set_rx_filter_dp83815()
703 OUTL(dp, RFCR, in sfe_set_rx_filter_dp83815()
705 OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]); in sfe_set_rx_filter_dp83815()
710 OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815); in sfe_set_rx_filter_dp83815()
711 OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL); in sfe_set_rx_filter_dp83815()
712 OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815); in sfe_set_rx_filter_dp83815()
713 OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL); in sfe_set_rx_filter_dp83815()
720 OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2); in sfe_set_rx_filter_dp83815()
721 OUTL(dp, RFDR, hash_tbl[i]); in sfe_set_rx_filter_dp83815()
725 sfe_rxfilter_dump(dp, 0, 0x10); in sfe_set_rx_filter_dp83815()
726 sfe_rxfilter_dump(dp, 0x200, 0x380); in sfe_set_rx_filter_dp83815()
729 OUTL(dp, RFCR, RFCR_RFEN | mode); in sfe_set_rx_filter_dp83815()
735 sfe_set_rx_filter_sis900(struct gem_dev *dp) in sfe_set_rx_filter_sis900() argument
740 uint8_t *mac = dp->cur_addr.ether_addr_octet; in sfe_set_rx_filter_sis900()
743 struct sfe_dev *lp = dp->private; in sfe_set_rx_filter_sis900()
745 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_set_rx_filter_sis900()
747 if ((dp->rxmode & RXMODE_ENABLE) == 0) { in sfe_set_rx_filter_sis900()
749 OUTLINL(dp, RFCR, 0); in sfe_set_rx_filter_sis900()
765 if (dp->rxmode & RXMODE_PROMISC) { in sfe_set_rx_filter_sis900()
768 } else if ((dp->rxmode & RXMODE_ALLMULTI) || in sfe_set_rx_filter_sis900()
769 dp->mc_count > hash_size*16/2) { in sfe_set_rx_filter_sis900()
778 for (i = 0; i < dp->mc_count; i++) { in sfe_set_rx_filter_sis900()
780 h = dp->mc_list[i].hash >> hash_shift; in sfe_set_rx_filter_sis900()
788 OUTLINL(dp, RFCR, in sfe_set_rx_filter_sis900()
790 OUTLINL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]); in sfe_set_rx_filter_sis900()
799 OUTLINL(dp, RFCR, in sfe_set_rx_filter_sis900()
801 OUTLINL(dp, RFDR, hash_tbl[i]); in sfe_set_rx_filter_sis900()
805 OUTLINL(dp, RFCR, RFCR_RFEN | mode); in sfe_set_rx_filter_sis900()
811 sfe_start_chip(struct gem_dev *dp) in sfe_start_chip() argument
813 struct sfe_dev *lp = dp->private; in sfe_start_chip()
815 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_start_chip()
824 if ((dp->misc_flag & GEM_NOINTR) == 0) { in sfe_start_chip()
825 OUTL(dp, IER, 1); in sfe_start_chip()
826 OUTL(dp, IMR, lp->our_intr_bits); in sfe_start_chip()
830 OUTL(dp, CR, lp->cr | CR_RXE); in sfe_start_chip()
839 sfe_stop_chip(struct gem_dev *dp) in sfe_stop_chip() argument
841 struct sfe_dev *lp = dp->private; in sfe_stop_chip()
846 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_stop_chip()
852 OUTL(dp, IMR, 0); in sfe_stop_chip()
855 OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR); in sfe_stop_chip()
865 dp->name, __func__); in sfe_stop_chip()
869 val = INL(dp, ISR); in sfe_stop_chip()
883 sfe_stop_chip_quiesce(struct gem_dev *dp) in sfe_stop_chip_quiesce() argument
885 struct sfe_dev *lp = dp->private; in sfe_stop_chip_quiesce()
894 OUTL(dp, IMR, 0); in sfe_stop_chip_quiesce()
897 OUTL(dp, CR, CR_TXR | CR_RXR); in sfe_stop_chip_quiesce()
909 val = INL(dp, ISR); in sfe_stop_chip_quiesce()
943 sfe_set_media(struct gem_dev *dp) in sfe_set_media() argument
951 struct sfe_dev *lp = dp->private; in sfe_set_media()
956 dp->name, __func__, in sfe_set_media()
957 dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed])); in sfe_set_media()
961 if (dp->full_duplex) { in sfe_set_media()
965 if (dp->full_duplex) { in sfe_set_media()
974 val = INL(dp, CFG) & CFG_EDB_MASTER; in sfe_set_media()
997 txmxdma = max(dp->txmaxdma, 256); in sfe_set_media()
998 rxmxdma = max(dp->rxmaxdma, 256); in sfe_set_media()
1003 lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT); in sfe_set_media()
1029 val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT); in sfe_set_media()
1036 dp->name, __func__, in sfe_set_media()
1047 OUTL(dp, TXCFG, txcfg); in sfe_set_media()
1053 OUTL(dp, RXCFG, rxcfg); in sfe_set_media()
1056 dp->name, __func__, in sfe_set_media()
1061 pcr = INL(dp, PCR); in sfe_set_media()
1062 switch (dp->flow_control) { in sfe_set_media()
1065 OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST); in sfe_set_media()
1069 OUTL(dp, PCR, in sfe_set_media()
1073 DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name, in sfe_set_media()
1074 INL(dp, PCR), PCR_BITS)); in sfe_set_media()
1077 switch (dp->flow_control) { in sfe_set_media()
1080 OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN); in sfe_set_media()
1083 OUTL(dp, FLOWCTL, 0); in sfe_set_media()
1087 dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS)); in sfe_set_media()
1093 sfe_get_stats(struct gem_dev *dp) in sfe_get_stats() argument
1103 sfe_tx_desc_write(struct gem_dev *dp, int slot, in sfe_tx_desc_write() argument
1115 dp->name, ddi_get_lbolt(), __func__, in sfe_tx_desc_write()
1116 dp->tx_desc_tail, slot, frags, flags); in sfe_tx_desc_write()
1138 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; in sfe_tx_desc_write()
1148 sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot) in sfe_tx_start() argument
1150 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; in sfe_tx_start()
1152 struct sfe_dev *lp = dp->private; in sfe_tx_start()
1155 gem_tx_desc_dma_sync(dp, in sfe_tx_start()
1160 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot]; in sfe_tx_start()
1163 gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV); in sfe_tx_start()
1168 if (dp->mac_active) { in sfe_tx_start()
1169 OUTL(dp, CR, lp->cr | CR_TXE); in sfe_tx_start()
1174 sfe_rx_desc_write(struct gem_dev *dp, int slot, in sfe_rx_desc_write() argument
1187 dp->name, __func__, dp->rx_active_tail, slot, frags); in sfe_rx_desc_write()
1194 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; in sfe_rx_desc_write()
1203 sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc) in sfe_tx_desc_stat() argument
1205 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; in sfe_tx_desc_stat()
1209 struct sfe_dev *lp = dp->private; in sfe_tx_desc_stat()
1216 &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)]; in sfe_tx_desc_stat()
1226 dp->name, ddi_get_lbolt(), __func__, in sfe_tx_desc_stat()
1235 dp->mac_active) { in sfe_tx_desc_stat()
1236 OUTL(dp, CR, lp->cr | CR_TXE); in sfe_tx_desc_stat()
1246 dp->name, slot, status); in sfe_tx_desc_stat()
1250 delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10; in sfe_tx_desc_stat()
1253 dp->name, delay, slot)); in sfe_tx_desc_stat()
1265 &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts); in sfe_tx_desc_stat()
1280 dp->name, status, TXSTAT_BITS)); in sfe_tx_desc_stat()
1282 dp->stats.errxmt++; in sfe_tx_desc_stat()
1285 dp->stats.underflow++; in sfe_tx_desc_stat()
1287 dp->stats.nocarrier++; in sfe_tx_desc_stat()
1289 dp->stats.xmtlatecoll++; in sfe_tx_desc_stat()
1290 } else if ((!dp->full_duplex) && (status & CMDSTS_EC)) { in sfe_tx_desc_stat()
1291 dp->stats.excoll++; in sfe_tx_desc_stat()
1292 dp->stats.collisions += 16; in sfe_tx_desc_stat()
1294 dp->stats.xmit_internal_err++; in sfe_tx_desc_stat()
1296 } else if (!dp->full_duplex) { in sfe_tx_desc_stat()
1301 dp->stats.first_coll++; in sfe_tx_desc_stat()
1303 dp->stats.multi_coll++; in sfe_tx_desc_stat()
1305 dp->stats.collisions += cols; in sfe_tx_desc_stat()
1307 dp->stats.defer++; in sfe_tx_desc_stat()
1314 sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc) in sfe_rx_desc_stat() argument
1325 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; in sfe_rx_desc_stat()
1335 dp->name, ddi_get_lbolt(), __func__, in sfe_rx_desc_stat()
1356 dp->name, status, RXSTAT_BITS)); in sfe_rx_desc_stat()
1359 dp->stats.errrcv++; in sfe_rx_desc_stat()
1362 dp->stats.overflow++; in sfe_rx_desc_stat()
1364 dp->stats.frame_too_long++; in sfe_rx_desc_stat()
1366 dp->stats.runt++; in sfe_rx_desc_stat()
1368 dp->stats.frame++; in sfe_rx_desc_stat()
1370 dp->stats.crc++; in sfe_rx_desc_stat()
1372 dp->stats.rcv_internal_err++; in sfe_rx_desc_stat()
1388 uint8_t *bp = dp->rx_buf_head->rxb_buf; in sfe_rx_desc_stat()
1390 cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len); in sfe_rx_desc_stat()
1405 sfe_tx_desc_init(struct gem_dev *dp, int slot) in sfe_tx_desc_init() argument
1407 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; in sfe_tx_desc_init()
1411 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; in sfe_tx_desc_init()
1417 here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot; in sfe_tx_desc_init()
1420 &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)]; in sfe_tx_desc_init()
1425 sfe_rx_desc_init(struct gem_dev *dp, int slot) in sfe_rx_desc_init() argument
1427 uint_t rx_ring_size = dp->gc.gc_rx_ring_size; in sfe_rx_desc_init()
1431 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; in sfe_rx_desc_init()
1437 here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot; in sfe_rx_desc_init()
1440 &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)]; in sfe_rx_desc_init()
1445 sfe_tx_desc_clean(struct gem_dev *dp, int slot) in sfe_tx_desc_clean() argument
1449 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; in sfe_tx_desc_clean()
1454 sfe_rx_desc_clean(struct gem_dev *dp, int slot) in sfe_rx_desc_clean() argument
1458 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; in sfe_rx_desc_clean()
1466 sfe_interrupt(struct gem_dev *dp) in sfe_interrupt() argument
1468 uint_t rx_ring_size = dp->gc.gc_rx_ring_size; in sfe_interrupt()
1473 struct sfe_dev *lp = dp->private; in sfe_interrupt()
1476 isr = INL(dp, ISR); in sfe_interrupt()
1488 dp->name, ddi_get_lbolt(), __func__, in sfe_interrupt()
1489 isr, INTR_BITS, dp->rx_active_head)); in sfe_interrupt()
1491 if (!dp->mac_active) { in sfe_interrupt()
1501 (void) gem_receive(dp); in sfe_interrupt()
1506 dp->name, isr, INTR_BITS)); in sfe_interrupt()
1508 dp->stats.overflow++; in sfe_interrupt()
1514 dp->name, isr, INTR_BITS)); in sfe_interrupt()
1516 dp->stats.norcvbuf++; in sfe_interrupt()
1522 OUTL(dp, RXDP, dp->rx_ring_dma + in sfe_interrupt()
1524 SLOT(dp->rx_active_head, rx_ring_size)); in sfe_interrupt()
1527 OUTL(dp, CR, lp->cr | CR_RXE); in sfe_interrupt()
1534 if (gem_tx_done(dp)) { in sfe_interrupt()
1545 dp->name, isr, INTR_BITS); in sfe_interrupt()
1550 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF); in sfe_interrupt()
1555 dp->name, __func__, isr, INTR_BITS)); in sfe_interrupt()
1570 sfe_mii_sync_dp83815(struct gem_dev *dp) in sfe_mii_sync_dp83815() argument
1576 sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset) in sfe_mii_read_dp83815() argument
1579 dp->name, __func__, offset)); in sfe_mii_read_dp83815()
1580 return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4)); in sfe_mii_read_dp83815()
1584 sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val) in sfe_mii_write_dp83815() argument
1587 dp->name, __func__, offset, val)); in sfe_mii_write_dp83815()
1588 OUTL(dp, MII_REGS_BASE + offset*4, val); in sfe_mii_write_dp83815()
1592 sfe_mii_config_dp83815(struct gem_dev *dp) in sfe_mii_config_dp83815() argument
1596 srr = INL(dp, SRR) & SRR_REV; in sfe_mii_config_dp83815()
1599 dp->name, srr, in sfe_mii_config_dp83815()
1600 INW(dp, 0x00cc), /* PGSEL */ in sfe_mii_config_dp83815()
1601 INW(dp, 0x00e4), /* PMDCSR */ in sfe_mii_config_dp83815()
1602 INW(dp, 0x00fc), /* TSTDAT */ in sfe_mii_config_dp83815()
1603 INW(dp, 0x00f4), /* DSPCFG */ in sfe_mii_config_dp83815()
1604 INW(dp, 0x00f8))); /* SDCFG */ in sfe_mii_config_dp83815()
1613 OUTW(dp, 0x00cc, 0x0001); /* PGSEL */ in sfe_mii_config_dp83815()
1614 OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */ in sfe_mii_config_dp83815()
1615 OUTW(dp, 0x00fc, 0x0000); /* TSTDAT */ in sfe_mii_config_dp83815()
1616 OUTW(dp, 0x00f4, 0x5040); /* DSPCFG */ in sfe_mii_config_dp83815()
1617 OUTW(dp, 0x00f8, 0x008c); /* SDCFG */ in sfe_mii_config_dp83815()
1618 OUTW(dp, 0x00cc, 0x0000); /* PGSEL */ in sfe_mii_config_dp83815()
1622 dp->name, in sfe_mii_config_dp83815()
1623 INW(dp, 0x00cc), /* PGSEL */ in sfe_mii_config_dp83815()
1624 INW(dp, 0x00e4), /* PMDCSR */ in sfe_mii_config_dp83815()
1625 INW(dp, 0x00fc), /* TSTDAT */ in sfe_mii_config_dp83815()
1626 INW(dp, 0x00f4), /* DSPCFG */ in sfe_mii_config_dp83815()
1627 INW(dp, 0x00f8))); /* SDCFG */ in sfe_mii_config_dp83815()
1633 OUTW(dp, 0x00cc, 0x0001); /* PGSEL */ in sfe_mii_config_dp83815()
1634 OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */ in sfe_mii_config_dp83815()
1635 OUTW(dp, 0x00cc, 0x0000); /* PGSEL */ in sfe_mii_config_dp83815()
1639 dp->name, in sfe_mii_config_dp83815()
1640 INW(dp, 0x00cc), /* PGSEL */ in sfe_mii_config_dp83815()
1641 INW(dp, 0x00e4))); /* PMDCSR */ in sfe_mii_config_dp83815()
1644 return (gem_mii_config_default(dp)); in sfe_mii_config_dp83815()
1648 sfe_mii_probe_dp83815(struct gem_dev *dp) in sfe_mii_probe_dp83815() argument
1654 dp->name, __func__)); in sfe_mii_probe_dp83815()
1655 dp->mii_phy_addr = 0; in sfe_mii_probe_dp83815()
1656 dp->gc.gc_mii_sync = &sfe_mii_sync_sis900; in sfe_mii_probe_dp83815()
1657 dp->gc.gc_mii_read = &sfe_mii_read_sis900; in sfe_mii_probe_dp83815()
1658 dp->gc.gc_mii_write = &sfe_mii_write_sis900; in sfe_mii_probe_dp83815()
1660 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); in sfe_mii_probe_dp83815()
1661 OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS); in sfe_mii_probe_dp83815()
1663 if (gem_mii_probe_default(dp) == GEM_SUCCESS) { in sfe_mii_probe_dp83815()
1669 dp->name, __func__)); in sfe_mii_probe_dp83815()
1670 dp->mii_phy_addr = -1; in sfe_mii_probe_dp83815()
1671 dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815; in sfe_mii_probe_dp83815()
1672 dp->gc.gc_mii_read = &sfe_mii_read_dp83815; in sfe_mii_probe_dp83815()
1673 dp->gc.gc_mii_write = &sfe_mii_write_dp83815; in sfe_mii_probe_dp83815()
1675 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); in sfe_mii_probe_dp83815()
1676 OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST); in sfe_mii_probe_dp83815()
1678 OUTL(dp, CFG, val | CFG_PAUSE_ADV); in sfe_mii_probe_dp83815()
1683 return (gem_mii_probe_default(dp)); in sfe_mii_probe_dp83815()
1687 sfe_mii_init_dp83815(struct gem_dev *dp) in sfe_mii_init_dp83815() argument
1691 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); in sfe_mii_init_dp83815()
1693 if (dp->mii_phy_addr == -1) { in sfe_mii_init_dp83815()
1695 OUTL(dp, CFG, val | CFG_PAUSE_ADV); in sfe_mii_init_dp83815()
1698 OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS); in sfe_mii_init_dp83815()
1707 #define MDIO_DELAY(dp) {(void) INL(dp, MEAR); (void) INL(dp, MEAR); } argument
1709 sfe_mii_sync_sis900(struct gem_dev *dp) in sfe_mii_sync_sis900() argument
1715 OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO); in sfe_mii_sync_sis900()
1716 MDIO_DELAY(dp); in sfe_mii_sync_sis900()
1717 OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC); in sfe_mii_sync_sis900()
1718 MDIO_DELAY(dp); in sfe_mii_sync_sis900()
1723 sfe_mii_config_sis900(struct gem_dev *dp) in sfe_mii_config_sis900() argument
1725 struct sfe_dev *lp = dp->private; in sfe_mii_config_sis900()
1728 if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) { in sfe_mii_config_sis900()
1730 gem_mii_write(dp, 0x0018, 0xD200); in sfe_mii_config_sis900()
1738 gem_mii_write(dp, MII_AN_ADVERT, 0x05e1); in sfe_mii_config_sis900()
1739 gem_mii_write(dp, MII_CONFIG1, 0x0022); in sfe_mii_config_sis900()
1740 gem_mii_write(dp, MII_CONFIG2, 0xff00); in sfe_mii_config_sis900()
1741 gem_mii_write(dp, MII_MASK, 0xffc0); in sfe_mii_config_sis900()
1743 sfe_set_eq_sis630(dp); in sfe_mii_config_sis900()
1745 return (gem_mii_config_default(dp)); in sfe_mii_config_sis900()
1749 sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg) in sfe_mii_read_sis900() argument
1756 cmd = MII_READ_CMD(dp->mii_phy_addr, reg); in sfe_mii_read_sis900()
1760 OUTL(dp, MEAR, data | MEAR_MDDIR); in sfe_mii_read_sis900()
1761 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1762 OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC); in sfe_mii_read_sis900()
1763 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1767 OUTL(dp, MEAR, 0); in sfe_mii_read_sis900()
1768 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1771 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_read_sis900()
1772 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1774 OUTL(dp, MEAR, 0); in sfe_mii_read_sis900()
1776 (void) INL(dp, MEAR); /* delay */ in sfe_mii_read_sis900()
1777 if (INL(dp, MEAR) & MEAR_MDIO) { in sfe_mii_read_sis900()
1779 dp->name, dp->mii_phy_addr); in sfe_mii_read_sis900()
1782 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1785 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_read_sis900()
1786 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1790 OUTL(dp, MEAR, 0); in sfe_mii_read_sis900()
1791 (void) INL(dp, MEAR); /* delay */ in sfe_mii_read_sis900()
1792 ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1); in sfe_mii_read_sis900()
1793 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_read_sis900()
1794 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1799 OUTL(dp, MEAR, 0); in sfe_mii_read_sis900()
1800 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1801 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_read_sis900()
1802 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1809 sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val) in sfe_mii_write_sis900() argument
1815 cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val); in sfe_mii_write_sis900()
1819 OUTL(dp, MEAR, data | MEAR_MDDIR); in sfe_mii_write_sis900()
1820 MDIO_DELAY(dp); in sfe_mii_write_sis900()
1821 OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC); in sfe_mii_write_sis900()
1822 MDIO_DELAY(dp); in sfe_mii_write_sis900()
1827 OUTL(dp, MEAR, 0); in sfe_mii_write_sis900()
1828 MDIO_DELAY(dp); in sfe_mii_write_sis900()
1829 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_write_sis900()
1830 MDIO_DELAY(dp); in sfe_mii_write_sis900()
1836 sfe_set_eq_sis630(struct gem_dev *dp) in sfe_set_eq_sis630() argument
1844 struct sfe_dev *lp = dp->private; in sfe_set_eq_sis630()
1854 if (dp->mii_state == MII_STATE_LINKUP) { in sfe_set_eq_sis630()
1855 reg14h = gem_mii_read(dp, MII_RESV); in sfe_set_eq_sis630()
1856 gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF); in sfe_set_eq_sis630()
1858 eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3; in sfe_set_eq_sis630()
1861 eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3; in sfe_set_eq_sis630()
1894 reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8; in sfe_set_eq_sis630()
1896 gem_mii_write(dp, MII_RESV, reg14h); in sfe_set_eq_sis630()
1898 reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000; in sfe_set_eq_sis630()
1905 gem_mii_write(dp, MII_RESV, reg14h); in sfe_set_eq_sis630()
1915 sfe_chipinfo_init_sis900(struct gem_dev *dp) in sfe_chipinfo_init_sis900() argument
1918 struct sfe_dev *lp = (struct sfe_dev *)dp->private; in sfe_chipinfo_init_sis900()
1943 dp->name); in sfe_chipinfo_init_sis900()
1949 dp->name); in sfe_chipinfo_init_sis900()
1960 sfe_attach_chip(struct gem_dev *dp) in sfe_attach_chip() argument
1962 struct sfe_dev *lp = (struct sfe_dev *)dp->private; in sfe_attach_chip()
1964 DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__)); in sfe_attach_chip()
1968 sfe_chipinfo_init_sis900(dp); in sfe_attach_chip()
1974 if (!(lp->get_mac_addr)(dp)) { in sfe_attach_chip()
1978 dp->name, __func__); in sfe_attach_chip()
1983 dp->mii_phy_addr = -1; /* no need to scan PHY */ in sfe_attach_chip()
1984 dp->misc_flag |= GEM_VLAN_SOFT; in sfe_attach_chip()
1985 dp->txthr += 4; /* VTAG_SIZE */ in sfe_attach_chip()
1987 dp->txthr = min(dp->txthr, TXFIFOSIZE - 2); in sfe_attach_chip()
2007 struct gem_dev *dp; in sfeattach() local
2218 dp = gem_do_attach(dip, 0, gcp, base, &regs_ha, in sfeattach()
2222 if (dp == NULL) { in sfeattach()
2265 struct gem_dev *dp; in sfe_quiesce() local
2268 dp = GEM_GET_DEV(dip); in sfe_quiesce()
2270 if (dp == NULL) in sfe_quiesce()
2273 ret = sfe_stop_chip_quiesce(dp); in sfe_quiesce()