144961713Sgirish /* 244961713Sgirish * CDDL HEADER START 344961713Sgirish * 444961713Sgirish * The contents of this file are subject to the terms of the 544961713Sgirish * Common Development and Distribution License (the "License"). 644961713Sgirish * You may not use this file except in compliance with the License. 744961713Sgirish * 844961713Sgirish * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 944961713Sgirish * or http://www.opensolaris.org/os/licensing. 1044961713Sgirish * See the License for the specific language governing permissions 1144961713Sgirish * and limitations under the License. 1244961713Sgirish * 1344961713Sgirish * When distributing Covered Code, include this CDDL HEADER in each 1444961713Sgirish * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1544961713Sgirish * If applicable, add the following below this CDDL HEADER, with the 1644961713Sgirish * fields enclosed by brackets "[]" replaced with your own identifying 1744961713Sgirish * information: Portions Copyright [yyyy] [name of copyright owner] 1844961713Sgirish * 1944961713Sgirish * CDDL HEADER END 2044961713Sgirish */ 2144961713Sgirish /* 227b26d9ffSSantwona Behera * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 2344961713Sgirish * Use is subject to license terms. 2444961713Sgirish */ 2544961713Sgirish 2644961713Sgirish #include <sys/nxge/nxge_impl.h> 2744961713Sgirish #include <sys/nxge/nxge_rxdma.h> 28678453a8Sspeer #include <sys/nxge/nxge_hio.h> 29678453a8Sspeer 30678453a8Sspeer #if !defined(_BIG_ENDIAN) 31678453a8Sspeer #include <npi_rx_rd32.h> 32678453a8Sspeer #endif 33678453a8Sspeer #include <npi_rx_rd64.h> 34678453a8Sspeer #include <npi_rx_wr64.h> 3544961713Sgirish 3644961713Sgirish #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37678453a8Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 3844961713Sgirish #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 3944961713Sgirish (rdc + nxgep->pt_config.hw_config.start_rdc) 4044961713Sgirish 4144961713Sgirish /* 4244961713Sgirish * Globals: tunable parameters (/etc/system or adb) 4344961713Sgirish * 4444961713Sgirish */ 4544961713Sgirish extern uint32_t nxge_rbr_size; 4644961713Sgirish extern uint32_t nxge_rcr_size; 4744961713Sgirish extern uint32_t nxge_rbr_spare_size; 4844961713Sgirish 4944961713Sgirish extern uint32_t nxge_mblks_pending; 5044961713Sgirish 5144961713Sgirish /* 5244961713Sgirish * Tunable to reduce the amount of time spent in the 5344961713Sgirish * ISR doing Rx Processing. 5444961713Sgirish */ 5544961713Sgirish extern uint32_t nxge_max_rx_pkts; 5644961713Sgirish boolean_t nxge_jumbo_enable; 5744961713Sgirish 5844961713Sgirish /* 5944961713Sgirish * Tunables to manage the receive buffer blocks. 6044961713Sgirish * 6144961713Sgirish * nxge_rx_threshold_hi: copy all buffers. 6244961713Sgirish * nxge_rx_bcopy_size_type: receive buffer block size type. 6344961713Sgirish * nxge_rx_threshold_lo: copy only up to tunable block size type. 6444961713Sgirish */ 6544961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 6644961713Sgirish extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 6744961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 6844961713Sgirish 69b4d05839Sml extern uint32_t nxge_cksum_offload; 70678453a8Sspeer 71678453a8Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 72678453a8Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 7344961713Sgirish 7444961713Sgirish static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 7544961713Sgirish 76678453a8Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 77678453a8Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 7844961713Sgirish 7944961713Sgirish static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 8044961713Sgirish p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 8144961713Sgirish uint32_t, 8244961713Sgirish p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 8344961713Sgirish p_rx_mbox_t *); 8444961713Sgirish static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 8544961713Sgirish p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 8644961713Sgirish 8744961713Sgirish static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 8844961713Sgirish uint16_t, 8944961713Sgirish p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 9044961713Sgirish p_rx_rcr_ring_t *, p_rx_mbox_t *); 9144961713Sgirish static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 9244961713Sgirish p_rx_rcr_ring_t, p_rx_mbox_t); 9344961713Sgirish 9444961713Sgirish static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 9544961713Sgirish uint16_t, 9644961713Sgirish p_nxge_dma_common_t *, 9744961713Sgirish p_rx_rbr_ring_t *, uint32_t); 9844961713Sgirish static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 9944961713Sgirish p_rx_rbr_ring_t); 10044961713Sgirish 10144961713Sgirish static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 10244961713Sgirish p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 10344961713Sgirish static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 10444961713Sgirish 105678453a8Sspeer static mblk_t * 106678453a8Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 10744961713Sgirish 10844961713Sgirish static void nxge_receive_packet(p_nxge_t, 10944961713Sgirish p_rx_rcr_ring_t, 11044961713Sgirish p_rcr_entry_t, 11144961713Sgirish boolean_t *, 11244961713Sgirish mblk_t **, mblk_t **); 11344961713Sgirish 11444961713Sgirish nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 11544961713Sgirish 11644961713Sgirish static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 11744961713Sgirish static void nxge_freeb(p_rx_msg_t); 118da14cebeSEric Cheng static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 119678453a8Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 12044961713Sgirish 12144961713Sgirish static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 12244961713Sgirish uint32_t, uint32_t); 12344961713Sgirish 12444961713Sgirish static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 12544961713Sgirish p_rx_rbr_ring_t); 12644961713Sgirish 12744961713Sgirish 12844961713Sgirish static nxge_status_t 12944961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 13044961713Sgirish 13144961713Sgirish nxge_status_t 13244961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t); 13344961713Sgirish 134678453a8Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135678453a8Sspeer 13644961713Sgirish nxge_status_t 13744961713Sgirish nxge_init_rxdma_channels(p_nxge_t nxgep) 13844961713Sgirish { 139e11f0814SMichael Speer nxge_grp_set_t *set = &nxgep->rx_set; 140da14cebeSEric Cheng int i, count, channel; 141e11f0814SMichael Speer nxge_grp_t *group; 142da14cebeSEric Cheng dc_map_t map; 143da14cebeSEric Cheng int dev_gindex; 14444961713Sgirish 14544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 14644961713Sgirish 147678453a8Sspeer if (!isLDOMguest(nxgep)) { 148678453a8Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 149678453a8Sspeer cmn_err(CE_NOTE, "hw_start_common"); 150678453a8Sspeer return (NXGE_ERROR); 151678453a8Sspeer } 152678453a8Sspeer } 153678453a8Sspeer 154678453a8Sspeer /* 155678453a8Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 156678453a8Sspeer * We only have 8 hardware RDC tables, but we may have 157678453a8Sspeer * up to 16 logical (software-defined) groups of RDCS, 158678453a8Sspeer * if we make use of layer 3 & 4 hardware classification. 159678453a8Sspeer */ 160678453a8Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 161678453a8Sspeer if ((1 << i) & set->lg.map) { 162e11f0814SMichael Speer group = set->group[i]; 163da14cebeSEric Cheng dev_gindex = 164da14cebeSEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 165da14cebeSEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 166678453a8Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 167da14cebeSEric Cheng if ((1 << channel) & map) { 168678453a8Sspeer if ((nxge_grp_dc_add(nxgep, 1696920a987SMisaki Miyashita group, VP_BOUND_RX, channel))) 170e11f0814SMichael Speer goto init_rxdma_channels_exit; 171678453a8Sspeer } 172678453a8Sspeer } 173678453a8Sspeer } 174678453a8Sspeer if (++count == set->lg.count) 175678453a8Sspeer break; 17644961713Sgirish } 17744961713Sgirish 178678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 179678453a8Sspeer return (NXGE_OK); 180e11f0814SMichael Speer 181e11f0814SMichael Speer init_rxdma_channels_exit: 182e11f0814SMichael Speer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 183e11f0814SMichael Speer if ((1 << i) & set->lg.map) { 184e11f0814SMichael Speer group = set->group[i]; 185da14cebeSEric Cheng dev_gindex = 186da14cebeSEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 187da14cebeSEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 188da14cebeSEric Cheng for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 189da14cebeSEric Cheng if ((1 << channel) & map) { 190e11f0814SMichael Speer nxge_grp_dc_remove(nxgep, 191da14cebeSEric Cheng VP_BOUND_RX, channel); 192e11f0814SMichael Speer } 193e11f0814SMichael Speer } 194e11f0814SMichael Speer } 195e11f0814SMichael Speer if (++count == set->lg.count) 196e11f0814SMichael Speer break; 197e11f0814SMichael Speer } 198e11f0814SMichael Speer 199e11f0814SMichael Speer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 200e11f0814SMichael Speer return (NXGE_ERROR); 201678453a8Sspeer } 202678453a8Sspeer 203678453a8Sspeer nxge_status_t 204678453a8Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 205678453a8Sspeer { 20608ac1c49SNicolas Droux nxge_status_t status; 207678453a8Sspeer 208678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 209678453a8Sspeer 210678453a8Sspeer status = nxge_map_rxdma(nxge, channel); 21144961713Sgirish if (status != NXGE_OK) { 212678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 213678453a8Sspeer "<== nxge_init_rxdma: status 0x%x", status)); 214678453a8Sspeer return (status); 21544961713Sgirish } 21644961713Sgirish 21708ac1c49SNicolas Droux #if defined(sun4v) 21808ac1c49SNicolas Droux if (isLDOMguest(nxge)) { 21908ac1c49SNicolas Droux /* set rcr_ring */ 22008ac1c49SNicolas Droux p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 22108ac1c49SNicolas Droux 22208ac1c49SNicolas Droux status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 22308ac1c49SNicolas Droux if (status != NXGE_OK) { 22408ac1c49SNicolas Droux nxge_unmap_rxdma(nxge, channel); 22508ac1c49SNicolas Droux return (status); 22608ac1c49SNicolas Droux } 22708ac1c49SNicolas Droux } 22808ac1c49SNicolas Droux #endif 22908ac1c49SNicolas Droux 230678453a8Sspeer status = nxge_rxdma_hw_start(nxge, channel); 23144961713Sgirish if (status != NXGE_OK) { 232678453a8Sspeer nxge_unmap_rxdma(nxge, channel); 23344961713Sgirish } 23444961713Sgirish 235678453a8Sspeer if (!nxge->statsp->rdc_ksp[channel]) 236678453a8Sspeer nxge_setup_rdc_kstats(nxge, channel); 237678453a8Sspeer 238678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 239678453a8Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 24044961713Sgirish 24144961713Sgirish return (status); 24244961713Sgirish } 24344961713Sgirish 24444961713Sgirish void 24544961713Sgirish nxge_uninit_rxdma_channels(p_nxge_t nxgep) 24644961713Sgirish { 247678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 248678453a8Sspeer int rdc; 249678453a8Sspeer 25044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 25144961713Sgirish 252678453a8Sspeer if (set->owned.map == 0) { 253678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 254678453a8Sspeer "nxge_uninit_rxdma_channels: no channels")); 255678453a8Sspeer return; 256678453a8Sspeer } 25744961713Sgirish 258678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 259678453a8Sspeer if ((1 << rdc) & set->owned.map) { 260678453a8Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 261678453a8Sspeer } 262678453a8Sspeer } 263678453a8Sspeer 264678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 265678453a8Sspeer } 266678453a8Sspeer 267678453a8Sspeer void 268678453a8Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 269678453a8Sspeer { 270678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 271678453a8Sspeer 272678453a8Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 273678453a8Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 274678453a8Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 275678453a8Sspeer } 276678453a8Sspeer 277678453a8Sspeer nxge_rxdma_hw_stop(nxgep, channel); 278678453a8Sspeer nxge_unmap_rxdma(nxgep, channel); 279678453a8Sspeer 280678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 28144961713Sgirish } 28244961713Sgirish 28344961713Sgirish nxge_status_t 28444961713Sgirish nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 28544961713Sgirish { 28644961713Sgirish npi_handle_t handle; 28744961713Sgirish npi_status_t rs = NPI_SUCCESS; 28844961713Sgirish nxge_status_t status = NXGE_OK; 28944961713Sgirish 290330cd344SMichael Speer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 29144961713Sgirish 29244961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 29344961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 29444961713Sgirish 29544961713Sgirish if (rs != NPI_SUCCESS) { 29644961713Sgirish status = NXGE_ERROR | rs; 29744961713Sgirish } 29844961713Sgirish 299330cd344SMichael Speer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 300330cd344SMichael Speer 30144961713Sgirish return (status); 30244961713Sgirish } 30344961713Sgirish 30444961713Sgirish void 30544961713Sgirish nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 30644961713Sgirish { 307678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 308678453a8Sspeer int rdc; 30944961713Sgirish 31044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 31144961713Sgirish 312678453a8Sspeer if (!isLDOMguest(nxgep)) { 313678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 314678453a8Sspeer (void) npi_rxdma_dump_fzc_regs(handle); 31544961713Sgirish } 316678453a8Sspeer 317678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 318678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 319678453a8Sspeer "nxge_rxdma_regs_dump_channels: " 320678453a8Sspeer "NULL ring pointer(s)")); 32144961713Sgirish return; 32244961713Sgirish } 32344961713Sgirish 324678453a8Sspeer if (set->owned.map == 0) { 32544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 326678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 32744961713Sgirish return; 32844961713Sgirish } 32944961713Sgirish 330678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 331678453a8Sspeer if ((1 << rdc) & set->owned.map) { 332678453a8Sspeer rx_rbr_ring_t *ring = 333678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 334678453a8Sspeer if (ring) { 335678453a8Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 336678453a8Sspeer } 33744961713Sgirish } 33844961713Sgirish } 33944961713Sgirish 34044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 34144961713Sgirish } 34244961713Sgirish 34344961713Sgirish nxge_status_t 34444961713Sgirish nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 34544961713Sgirish { 34644961713Sgirish npi_handle_t handle; 34744961713Sgirish npi_status_t rs = NPI_SUCCESS; 34844961713Sgirish nxge_status_t status = NXGE_OK; 34944961713Sgirish 35044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 35144961713Sgirish 35244961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 35344961713Sgirish rs = npi_rxdma_dump_rdc_regs(handle, channel); 35444961713Sgirish 35544961713Sgirish if (rs != NPI_SUCCESS) { 35644961713Sgirish status = NXGE_ERROR | rs; 35744961713Sgirish } 35844961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 35944961713Sgirish return (status); 36044961713Sgirish } 36144961713Sgirish 36244961713Sgirish nxge_status_t 36344961713Sgirish nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 36444961713Sgirish p_rx_dma_ent_msk_t mask_p) 36544961713Sgirish { 36644961713Sgirish npi_handle_t handle; 36744961713Sgirish npi_status_t rs = NPI_SUCCESS; 36844961713Sgirish nxge_status_t status = NXGE_OK; 36944961713Sgirish 37044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 37152ccf843Smisaki "<== nxge_init_rxdma_channel_event_mask")); 37244961713Sgirish 37344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 37444961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 37544961713Sgirish if (rs != NPI_SUCCESS) { 37644961713Sgirish status = NXGE_ERROR | rs; 37744961713Sgirish } 37844961713Sgirish 37944961713Sgirish return (status); 38044961713Sgirish } 38144961713Sgirish 38244961713Sgirish nxge_status_t 38344961713Sgirish nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 38444961713Sgirish p_rx_dma_ctl_stat_t cs_p) 38544961713Sgirish { 38644961713Sgirish npi_handle_t handle; 38744961713Sgirish npi_status_t rs = NPI_SUCCESS; 38844961713Sgirish nxge_status_t status = NXGE_OK; 38944961713Sgirish 39044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 39152ccf843Smisaki "<== nxge_init_rxdma_channel_cntl_stat")); 39244961713Sgirish 39344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 39444961713Sgirish rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 39544961713Sgirish 39644961713Sgirish if (rs != NPI_SUCCESS) { 39744961713Sgirish status = NXGE_ERROR | rs; 39844961713Sgirish } 39944961713Sgirish 40044961713Sgirish return (status); 40144961713Sgirish } 40244961713Sgirish 403678453a8Sspeer /* 404678453a8Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 405678453a8Sspeer * 406678453a8Sspeer * Set the default RDC for an RDC Group (Table) 407678453a8Sspeer * 408678453a8Sspeer * Arguments: 409678453a8Sspeer * nxgep 410678453a8Sspeer * rdcgrp The group to modify 411678453a8Sspeer * rdc The new default RDC. 412678453a8Sspeer * 413678453a8Sspeer * Notes: 414678453a8Sspeer * 415678453a8Sspeer * NPI/NXGE function calls: 416678453a8Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 417678453a8Sspeer * 418678453a8Sspeer * Registers accessed: 419678453a8Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 420678453a8Sspeer * 421678453a8Sspeer * Context: 422678453a8Sspeer * Service domain 423678453a8Sspeer */ 42444961713Sgirish nxge_status_t 425678453a8Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 426678453a8Sspeer p_nxge_t nxgep, 427678453a8Sspeer uint8_t rdcgrp, 428678453a8Sspeer uint8_t rdc) 42944961713Sgirish { 43044961713Sgirish npi_handle_t handle; 43144961713Sgirish npi_status_t rs = NPI_SUCCESS; 43244961713Sgirish p_nxge_dma_pt_cfg_t p_dma_cfgp; 43344961713Sgirish p_nxge_rdc_grp_t rdc_grp_p; 43444961713Sgirish uint8_t actual_rdcgrp, actual_rdc; 43544961713Sgirish 43644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 43752ccf843Smisaki " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 43844961713Sgirish p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 43944961713Sgirish 44044961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 44144961713Sgirish 442678453a8Sspeer /* 443678453a8Sspeer * This has to be rewritten. Do we even allow this anymore? 444678453a8Sspeer */ 44544961713Sgirish rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 446678453a8Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 447678453a8Sspeer rdc_grp_p->def_rdc = rdc; 44844961713Sgirish 44944961713Sgirish actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 45044961713Sgirish actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 45144961713Sgirish 452678453a8Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 45352ccf843Smisaki handle, actual_rdcgrp, actual_rdc); 45444961713Sgirish 45544961713Sgirish if (rs != NPI_SUCCESS) { 45644961713Sgirish return (NXGE_ERROR | rs); 45744961713Sgirish } 45844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 45952ccf843Smisaki " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 46044961713Sgirish return (NXGE_OK); 46144961713Sgirish } 46244961713Sgirish 46344961713Sgirish nxge_status_t 46444961713Sgirish nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 46544961713Sgirish { 46644961713Sgirish npi_handle_t handle; 46744961713Sgirish 46844961713Sgirish uint8_t actual_rdc; 46944961713Sgirish npi_status_t rs = NPI_SUCCESS; 47044961713Sgirish 47144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 47252ccf843Smisaki " ==> nxge_rxdma_cfg_port_default_rdc")); 47344961713Sgirish 47444961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 475678453a8Sspeer actual_rdc = rdc; /* XXX Hack! */ 47644961713Sgirish rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 47744961713Sgirish 47844961713Sgirish 47944961713Sgirish if (rs != NPI_SUCCESS) { 48044961713Sgirish return (NXGE_ERROR | rs); 48144961713Sgirish } 48244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 48352ccf843Smisaki " <== nxge_rxdma_cfg_port_default_rdc")); 48444961713Sgirish 48544961713Sgirish return (NXGE_OK); 48644961713Sgirish } 48744961713Sgirish 48844961713Sgirish nxge_status_t 48944961713Sgirish nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 49044961713Sgirish uint16_t pkts) 49144961713Sgirish { 49244961713Sgirish npi_status_t rs = NPI_SUCCESS; 49344961713Sgirish npi_handle_t handle; 49444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 49552ccf843Smisaki " ==> nxge_rxdma_cfg_rcr_threshold")); 49644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 49744961713Sgirish 49844961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 49944961713Sgirish 50044961713Sgirish if (rs != NPI_SUCCESS) { 50144961713Sgirish return (NXGE_ERROR | rs); 50244961713Sgirish } 50344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 50444961713Sgirish return (NXGE_OK); 50544961713Sgirish } 50644961713Sgirish 50744961713Sgirish nxge_status_t 50844961713Sgirish nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 50944961713Sgirish uint16_t tout, uint8_t enable) 51044961713Sgirish { 51144961713Sgirish npi_status_t rs = NPI_SUCCESS; 51244961713Sgirish npi_handle_t handle; 51344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 51444961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 51544961713Sgirish if (enable == 0) { 51644961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 51744961713Sgirish } else { 51844961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 51952ccf843Smisaki tout); 52044961713Sgirish } 52144961713Sgirish 52244961713Sgirish if (rs != NPI_SUCCESS) { 52344961713Sgirish return (NXGE_ERROR | rs); 52444961713Sgirish } 52544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 52644961713Sgirish return (NXGE_OK); 52744961713Sgirish } 52844961713Sgirish 52944961713Sgirish nxge_status_t 53044961713Sgirish nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 53144961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 53244961713Sgirish { 53344961713Sgirish npi_handle_t handle; 53444961713Sgirish rdc_desc_cfg_t rdc_desc; 53544961713Sgirish p_rcrcfig_b_t cfgb_p; 53644961713Sgirish npi_status_t rs = NPI_SUCCESS; 53744961713Sgirish 53844961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 53944961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 54044961713Sgirish /* 54144961713Sgirish * Use configuration data composed at init time. 54244961713Sgirish * Write to hardware the receive ring configurations. 54344961713Sgirish */ 54444961713Sgirish rdc_desc.mbox_enable = 1; 54544961713Sgirish rdc_desc.mbox_addr = mbox_p->mbox_addr; 54644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 54752ccf843Smisaki "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 54852ccf843Smisaki mbox_p->mbox_addr, rdc_desc.mbox_addr)); 54944961713Sgirish 55044961713Sgirish rdc_desc.rbr_len = rbr_p->rbb_max; 55144961713Sgirish rdc_desc.rbr_addr = rbr_p->rbr_addr; 55244961713Sgirish 55344961713Sgirish switch (nxgep->rx_bksize_code) { 55444961713Sgirish case RBR_BKSIZE_4K: 55544961713Sgirish rdc_desc.page_size = SIZE_4KB; 55644961713Sgirish break; 55744961713Sgirish case RBR_BKSIZE_8K: 55844961713Sgirish rdc_desc.page_size = SIZE_8KB; 55944961713Sgirish break; 56044961713Sgirish case RBR_BKSIZE_16K: 56144961713Sgirish rdc_desc.page_size = SIZE_16KB; 56244961713Sgirish break; 56344961713Sgirish case RBR_BKSIZE_32K: 56444961713Sgirish rdc_desc.page_size = SIZE_32KB; 56544961713Sgirish break; 56644961713Sgirish } 56744961713Sgirish 56844961713Sgirish rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 56944961713Sgirish rdc_desc.valid0 = 1; 57044961713Sgirish 57144961713Sgirish rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 57244961713Sgirish rdc_desc.valid1 = 1; 57344961713Sgirish 57444961713Sgirish rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 57544961713Sgirish rdc_desc.valid2 = 1; 57644961713Sgirish 57744961713Sgirish rdc_desc.full_hdr = rcr_p->full_hdr_flag; 57844961713Sgirish rdc_desc.offset = rcr_p->sw_priv_hdr_len; 57944961713Sgirish 58044961713Sgirish rdc_desc.rcr_len = rcr_p->comp_size; 58144961713Sgirish rdc_desc.rcr_addr = rcr_p->rcr_addr; 58244961713Sgirish 58344961713Sgirish cfgb_p = &(rcr_p->rcr_cfgb); 58444961713Sgirish rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 585678453a8Sspeer /* For now, disable this timeout in a guest domain. */ 586678453a8Sspeer if (isLDOMguest(nxgep)) { 587678453a8Sspeer rdc_desc.rcr_timeout = 0; 588678453a8Sspeer rdc_desc.rcr_timeout_enable = 0; 589678453a8Sspeer } else { 590678453a8Sspeer rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 591678453a8Sspeer rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 592678453a8Sspeer } 59344961713Sgirish 59444961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 59552ccf843Smisaki "rbr_len qlen %d pagesize code %d rcr_len %d", 59652ccf843Smisaki rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 59744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 59852ccf843Smisaki "size 0 %d size 1 %d size 2 %d", 59952ccf843Smisaki rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 60052ccf843Smisaki rbr_p->npi_pkt_buf_size2)); 60144961713Sgirish 60244961713Sgirish rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 60344961713Sgirish if (rs != NPI_SUCCESS) { 60444961713Sgirish return (NXGE_ERROR | rs); 60544961713Sgirish } 60644961713Sgirish 60744961713Sgirish /* 60844961713Sgirish * Enable the timeout and threshold. 60944961713Sgirish */ 61044961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 61152ccf843Smisaki rdc_desc.rcr_threshold); 61244961713Sgirish if (rs != NPI_SUCCESS) { 61344961713Sgirish return (NXGE_ERROR | rs); 61444961713Sgirish } 61544961713Sgirish 61644961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 61752ccf843Smisaki rdc_desc.rcr_timeout); 61844961713Sgirish if (rs != NPI_SUCCESS) { 61944961713Sgirish return (NXGE_ERROR | rs); 62044961713Sgirish } 62144961713Sgirish 622*e759c33aSMichael Speer if (!isLDOMguest(nxgep)) { 623*e759c33aSMichael Speer /* Enable the DMA */ 624*e759c33aSMichael Speer rs = npi_rxdma_cfg_rdc_enable(handle, channel); 625*e759c33aSMichael Speer if (rs != NPI_SUCCESS) { 626*e759c33aSMichael Speer return (NXGE_ERROR | rs); 627*e759c33aSMichael Speer } 62844961713Sgirish } 62944961713Sgirish 63044961713Sgirish /* Kick the DMA engine. */ 63144961713Sgirish npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 632*e759c33aSMichael Speer 633*e759c33aSMichael Speer if (!isLDOMguest(nxgep)) { 634*e759c33aSMichael Speer /* Clear the rbr empty bit */ 635*e759c33aSMichael Speer (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 636*e759c33aSMichael Speer } 63744961713Sgirish 63844961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 63944961713Sgirish 64044961713Sgirish return (NXGE_OK); 64144961713Sgirish } 64244961713Sgirish 64344961713Sgirish nxge_status_t 64444961713Sgirish nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 64544961713Sgirish { 64644961713Sgirish npi_handle_t handle; 64744961713Sgirish npi_status_t rs = NPI_SUCCESS; 64844961713Sgirish 64944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 65044961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 65144961713Sgirish 65244961713Sgirish /* disable the DMA */ 65344961713Sgirish rs = npi_rxdma_cfg_rdc_disable(handle, channel); 65444961713Sgirish if (rs != NPI_SUCCESS) { 65544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 65652ccf843Smisaki "<== nxge_disable_rxdma_channel:failed (0x%x)", 65752ccf843Smisaki rs)); 65844961713Sgirish return (NXGE_ERROR | rs); 65944961713Sgirish } 66044961713Sgirish 66144961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 66244961713Sgirish return (NXGE_OK); 66344961713Sgirish } 66444961713Sgirish 66544961713Sgirish nxge_status_t 66644961713Sgirish nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 66744961713Sgirish { 66844961713Sgirish npi_handle_t handle; 66944961713Sgirish nxge_status_t status = NXGE_OK; 67044961713Sgirish 67144961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 67252ccf843Smisaki "<== nxge_init_rxdma_channel_rcrflush")); 67344961713Sgirish 67444961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 67544961713Sgirish npi_rxdma_rdc_rcr_flush(handle, channel); 67644961713Sgirish 67744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 67852ccf843Smisaki "<== nxge_init_rxdma_channel_rcrflsh")); 67944961713Sgirish return (status); 68044961713Sgirish 68144961713Sgirish } 68244961713Sgirish 68344961713Sgirish #define MID_INDEX(l, r) ((r + l + 1) >> 1) 68444961713Sgirish 68544961713Sgirish #define TO_LEFT -1 68644961713Sgirish #define TO_RIGHT 1 68744961713Sgirish #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 68844961713Sgirish #define BOTH_LEFT (TO_LEFT + TO_LEFT) 68944961713Sgirish #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 69044961713Sgirish #define NO_HINT 0xffffffff 69144961713Sgirish 69244961713Sgirish /*ARGSUSED*/ 69344961713Sgirish nxge_status_t 69444961713Sgirish nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 695a3c5bd6dSspeer uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 696a3c5bd6dSspeer uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 69744961713Sgirish { 69844961713Sgirish int bufsize; 69944961713Sgirish uint64_t pktbuf_pp; 70044961713Sgirish uint64_t dvma_addr; 70144961713Sgirish rxring_info_t *ring_info; 70244961713Sgirish int base_side, end_side; 70344961713Sgirish int r_index, l_index, anchor_index; 70444961713Sgirish int found, search_done; 70544961713Sgirish uint32_t offset, chunk_size, block_size, page_size_mask; 70644961713Sgirish uint32_t chunk_index, block_index, total_index; 70744961713Sgirish int max_iterations, iteration; 70844961713Sgirish rxbuf_index_info_t *bufinfo; 70944961713Sgirish 71044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 71144961713Sgirish 71244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 71352ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 71452ccf843Smisaki pkt_buf_addr_pp, 71552ccf843Smisaki pktbufsz_type)); 716adfcba55Sjoycey #if defined(__i386) 717adfcba55Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 718adfcba55Sjoycey #else 71944961713Sgirish pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 720adfcba55Sjoycey #endif 72144961713Sgirish 72244961713Sgirish switch (pktbufsz_type) { 72344961713Sgirish case 0: 72444961713Sgirish bufsize = rbr_p->pkt_buf_size0; 72544961713Sgirish break; 72644961713Sgirish case 1: 72744961713Sgirish bufsize = rbr_p->pkt_buf_size1; 72844961713Sgirish break; 72944961713Sgirish case 2: 73044961713Sgirish bufsize = rbr_p->pkt_buf_size2; 73144961713Sgirish break; 73244961713Sgirish case RCR_SINGLE_BLOCK: 73344961713Sgirish bufsize = 0; 73444961713Sgirish anchor_index = 0; 73544961713Sgirish break; 73644961713Sgirish default: 73744961713Sgirish return (NXGE_ERROR); 73844961713Sgirish } 73944961713Sgirish 74044961713Sgirish if (rbr_p->num_blocks == 1) { 74144961713Sgirish anchor_index = 0; 74244961713Sgirish ring_info = rbr_p->ring_info; 74344961713Sgirish bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 74444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 74552ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 74652ccf843Smisaki "buf_pp $%p btype %d anchor_index %d " 74752ccf843Smisaki "bufinfo $%p", 74852ccf843Smisaki pkt_buf_addr_pp, 74952ccf843Smisaki pktbufsz_type, 75052ccf843Smisaki anchor_index, 75152ccf843Smisaki bufinfo)); 75244961713Sgirish 75344961713Sgirish goto found_index; 75444961713Sgirish } 75544961713Sgirish 75644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 75752ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 75852ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 75952ccf843Smisaki pkt_buf_addr_pp, 76052ccf843Smisaki pktbufsz_type, 76152ccf843Smisaki anchor_index)); 76244961713Sgirish 76344961713Sgirish ring_info = rbr_p->ring_info; 76444961713Sgirish found = B_FALSE; 76544961713Sgirish bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 76644961713Sgirish iteration = 0; 76744961713Sgirish max_iterations = ring_info->max_iterations; 76844961713Sgirish /* 769a3c5bd6dSspeer * First check if this block has been seen 77044961713Sgirish * recently. This is indicated by a hint which 77144961713Sgirish * is initialized when the first buffer of the block 77244961713Sgirish * is seen. The hint is reset when the last buffer of 77344961713Sgirish * the block has been processed. 77444961713Sgirish * As three block sizes are supported, three hints 77544961713Sgirish * are kept. The idea behind the hints is that once 77644961713Sgirish * the hardware uses a block for a buffer of that 77744961713Sgirish * size, it will use it exclusively for that size 77844961713Sgirish * and will use it until it is exhausted. It is assumed 77944961713Sgirish * that there would a single block being used for the same 78044961713Sgirish * buffer sizes at any given time. 78144961713Sgirish */ 78244961713Sgirish if (ring_info->hint[pktbufsz_type] != NO_HINT) { 78344961713Sgirish anchor_index = ring_info->hint[pktbufsz_type]; 78444961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 78544961713Sgirish chunk_size = bufinfo[anchor_index].buf_size; 78644961713Sgirish if ((pktbuf_pp >= dvma_addr) && 78752ccf843Smisaki (pktbuf_pp < (dvma_addr + chunk_size))) { 78844961713Sgirish found = B_TRUE; 78944961713Sgirish /* 79044961713Sgirish * check if this is the last buffer in the block 79144961713Sgirish * If so, then reset the hint for the size; 79244961713Sgirish */ 79344961713Sgirish 79444961713Sgirish if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 79544961713Sgirish ring_info->hint[pktbufsz_type] = NO_HINT; 79644961713Sgirish } 79744961713Sgirish } 79844961713Sgirish 79944961713Sgirish if (found == B_FALSE) { 80044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 80152ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (!found)" 80252ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 80352ccf843Smisaki pkt_buf_addr_pp, 80452ccf843Smisaki pktbufsz_type, 80552ccf843Smisaki anchor_index)); 80644961713Sgirish 80744961713Sgirish /* 80844961713Sgirish * This is the first buffer of the block of this 80944961713Sgirish * size. Need to search the whole information 81044961713Sgirish * array. 81144961713Sgirish * the search algorithm uses a binary tree search 81244961713Sgirish * algorithm. It assumes that the information is 81344961713Sgirish * already sorted with increasing order 81444961713Sgirish * info[0] < info[1] < info[2] .... < info[n-1] 81544961713Sgirish * where n is the size of the information array 81644961713Sgirish */ 81744961713Sgirish r_index = rbr_p->num_blocks - 1; 81844961713Sgirish l_index = 0; 81944961713Sgirish search_done = B_FALSE; 82044961713Sgirish anchor_index = MID_INDEX(r_index, l_index); 82144961713Sgirish while (search_done == B_FALSE) { 82244961713Sgirish if ((r_index == l_index) || 82352ccf843Smisaki (iteration >= max_iterations)) 82444961713Sgirish search_done = B_TRUE; 82544961713Sgirish end_side = TO_RIGHT; /* to the right */ 82644961713Sgirish base_side = TO_LEFT; /* to the left */ 82744961713Sgirish /* read the DVMA address information and sort it */ 82844961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 82944961713Sgirish chunk_size = bufinfo[anchor_index].buf_size; 83044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 83152ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (searching)" 83252ccf843Smisaki "buf_pp $%p btype %d " 83352ccf843Smisaki "anchor_index %d chunk_size %d dvmaaddr $%p", 83452ccf843Smisaki pkt_buf_addr_pp, 83552ccf843Smisaki pktbufsz_type, 83652ccf843Smisaki anchor_index, 83752ccf843Smisaki chunk_size, 83852ccf843Smisaki dvma_addr)); 83944961713Sgirish 84044961713Sgirish if (pktbuf_pp >= dvma_addr) 84144961713Sgirish base_side = TO_RIGHT; /* to the right */ 84244961713Sgirish if (pktbuf_pp < (dvma_addr + chunk_size)) 84344961713Sgirish end_side = TO_LEFT; /* to the left */ 84444961713Sgirish 84544961713Sgirish switch (base_side + end_side) { 84652ccf843Smisaki case IN_MIDDLE: 84752ccf843Smisaki /* found */ 84852ccf843Smisaki found = B_TRUE; 84952ccf843Smisaki search_done = B_TRUE; 85052ccf843Smisaki if ((pktbuf_pp + bufsize) < 85152ccf843Smisaki (dvma_addr + chunk_size)) 85252ccf843Smisaki ring_info->hint[pktbufsz_type] = 85352ccf843Smisaki bufinfo[anchor_index].buf_index; 85452ccf843Smisaki break; 85552ccf843Smisaki case BOTH_RIGHT: 85652ccf843Smisaki /* not found: go to the right */ 85752ccf843Smisaki l_index = anchor_index + 1; 85852ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 85952ccf843Smisaki break; 86052ccf843Smisaki 86152ccf843Smisaki case BOTH_LEFT: 86252ccf843Smisaki /* not found: go to the left */ 86352ccf843Smisaki r_index = anchor_index - 1; 86452ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 86552ccf843Smisaki break; 86652ccf843Smisaki default: /* should not come here */ 86752ccf843Smisaki return (NXGE_ERROR); 86844961713Sgirish } 86944961713Sgirish iteration++; 87044961713Sgirish } 87144961713Sgirish 87244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 87352ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (search done)" 87452ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 87552ccf843Smisaki pkt_buf_addr_pp, 87652ccf843Smisaki pktbufsz_type, 87752ccf843Smisaki anchor_index)); 87844961713Sgirish } 87944961713Sgirish 88044961713Sgirish if (found == B_FALSE) { 88144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 88252ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (search failed)" 88352ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 88452ccf843Smisaki pkt_buf_addr_pp, 88552ccf843Smisaki pktbufsz_type, 88652ccf843Smisaki anchor_index)); 88744961713Sgirish return (NXGE_ERROR); 88844961713Sgirish } 88944961713Sgirish 89044961713Sgirish found_index: 89144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 89252ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 89352ccf843Smisaki "buf_pp $%p btype %d bufsize %d anchor_index %d", 89452ccf843Smisaki pkt_buf_addr_pp, 89552ccf843Smisaki pktbufsz_type, 89652ccf843Smisaki bufsize, 89752ccf843Smisaki anchor_index)); 89844961713Sgirish 89944961713Sgirish /* index of the first block in this chunk */ 90044961713Sgirish chunk_index = bufinfo[anchor_index].start_index; 90144961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 90244961713Sgirish page_size_mask = ring_info->block_size_mask; 90344961713Sgirish 90444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 90552ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 90652ccf843Smisaki "buf_pp $%p btype %d bufsize %d " 90752ccf843Smisaki "anchor_index %d chunk_index %d dvma $%p", 90852ccf843Smisaki pkt_buf_addr_pp, 90952ccf843Smisaki pktbufsz_type, 91052ccf843Smisaki bufsize, 91152ccf843Smisaki anchor_index, 91252ccf843Smisaki chunk_index, 91352ccf843Smisaki dvma_addr)); 91444961713Sgirish 91544961713Sgirish offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 91644961713Sgirish block_size = rbr_p->block_size; /* System block(page) size */ 91744961713Sgirish 91844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 91952ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 92052ccf843Smisaki "buf_pp $%p btype %d bufsize %d " 92152ccf843Smisaki "anchor_index %d chunk_index %d dvma $%p " 92252ccf843Smisaki "offset %d block_size %d", 92352ccf843Smisaki pkt_buf_addr_pp, 92452ccf843Smisaki pktbufsz_type, 92552ccf843Smisaki bufsize, 92652ccf843Smisaki anchor_index, 92752ccf843Smisaki chunk_index, 92852ccf843Smisaki dvma_addr, 92952ccf843Smisaki offset, 93052ccf843Smisaki block_size)); 93144961713Sgirish 93244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 93344961713Sgirish 93444961713Sgirish block_index = (offset / block_size); /* index within chunk */ 93544961713Sgirish total_index = chunk_index + block_index; 93644961713Sgirish 93744961713Sgirish 93844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 93952ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 94052ccf843Smisaki "total_index %d dvma_addr $%p " 94152ccf843Smisaki "offset %d block_size %d " 94252ccf843Smisaki "block_index %d ", 94352ccf843Smisaki total_index, dvma_addr, 94452ccf843Smisaki offset, block_size, 94552ccf843Smisaki block_index)); 946adfcba55Sjoycey #if defined(__i386) 947adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 94852ccf843Smisaki (uint32_t)offset); 949adfcba55Sjoycey #else 950adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 95152ccf843Smisaki (uint64_t)offset); 952adfcba55Sjoycey #endif 95344961713Sgirish 95444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 95552ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 95652ccf843Smisaki "total_index %d dvma_addr $%p " 95752ccf843Smisaki "offset %d block_size %d " 95852ccf843Smisaki "block_index %d " 95952ccf843Smisaki "*pkt_buf_addr_p $%p", 96052ccf843Smisaki total_index, dvma_addr, 96152ccf843Smisaki offset, block_size, 96252ccf843Smisaki block_index, 96352ccf843Smisaki *pkt_buf_addr_p)); 96444961713Sgirish 96544961713Sgirish 96644961713Sgirish *msg_index = total_index; 96744961713Sgirish *bufoffset = (offset & page_size_mask); 96844961713Sgirish 96944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 97052ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: get msg index: " 97152ccf843Smisaki "msg_index %d bufoffset_index %d", 97252ccf843Smisaki *msg_index, 97352ccf843Smisaki *bufoffset)); 97444961713Sgirish 97544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 97644961713Sgirish 97744961713Sgirish return (NXGE_OK); 97844961713Sgirish } 97944961713Sgirish 98044961713Sgirish /* 98144961713Sgirish * used by quick sort (qsort) function 98244961713Sgirish * to perform comparison 98344961713Sgirish */ 98444961713Sgirish static int 98544961713Sgirish nxge_sort_compare(const void *p1, const void *p2) 98644961713Sgirish { 98744961713Sgirish 98844961713Sgirish rxbuf_index_info_t *a, *b; 98944961713Sgirish 99044961713Sgirish a = (rxbuf_index_info_t *)p1; 99144961713Sgirish b = (rxbuf_index_info_t *)p2; 99244961713Sgirish 99344961713Sgirish if (a->dvma_addr > b->dvma_addr) 99444961713Sgirish return (1); 99544961713Sgirish if (a->dvma_addr < b->dvma_addr) 99644961713Sgirish return (-1); 99744961713Sgirish return (0); 99844961713Sgirish } 99944961713Sgirish 100044961713Sgirish 100144961713Sgirish 100244961713Sgirish /* 100344961713Sgirish * grabbed this sort implementation from common/syscall/avl.c 100444961713Sgirish * 100544961713Sgirish */ 100644961713Sgirish /* 100744961713Sgirish * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 100844961713Sgirish * v = Ptr to array/vector of objs 100944961713Sgirish * n = # objs in the array 101044961713Sgirish * s = size of each obj (must be multiples of a word size) 101144961713Sgirish * f = ptr to function to compare two objs 101244961713Sgirish * returns (-1 = less than, 0 = equal, 1 = greater than 101344961713Sgirish */ 101444961713Sgirish void 101544961713Sgirish nxge_ksort(caddr_t v, int n, int s, int (*f)()) 101644961713Sgirish { 101744961713Sgirish int g, i, j, ii; 101844961713Sgirish unsigned int *p1, *p2; 101944961713Sgirish unsigned int tmp; 102044961713Sgirish 102144961713Sgirish /* No work to do */ 102244961713Sgirish if (v == NULL || n <= 1) 102344961713Sgirish return; 102444961713Sgirish /* Sanity check on arguments */ 102544961713Sgirish ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 102644961713Sgirish ASSERT(s > 0); 102744961713Sgirish 102844961713Sgirish for (g = n / 2; g > 0; g /= 2) { 102944961713Sgirish for (i = g; i < n; i++) { 103044961713Sgirish for (j = i - g; j >= 0 && 103152ccf843Smisaki (*f)(v + j * s, v + (j + g) * s) == 1; 103252ccf843Smisaki j -= g) { 103344961713Sgirish p1 = (unsigned *)(v + j * s); 103444961713Sgirish p2 = (unsigned *)(v + (j + g) * s); 103544961713Sgirish for (ii = 0; ii < s / 4; ii++) { 103644961713Sgirish tmp = *p1; 103744961713Sgirish *p1++ = *p2; 103844961713Sgirish *p2++ = tmp; 103944961713Sgirish } 104044961713Sgirish } 104144961713Sgirish } 104244961713Sgirish } 104344961713Sgirish } 104444961713Sgirish 104544961713Sgirish /* 104644961713Sgirish * Initialize data structures required for rxdma 104744961713Sgirish * buffer dvma->vmem address lookup 104844961713Sgirish */ 104944961713Sgirish /*ARGSUSED*/ 105044961713Sgirish static nxge_status_t 105144961713Sgirish nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 105244961713Sgirish { 105344961713Sgirish 105444961713Sgirish int index; 105544961713Sgirish rxring_info_t *ring_info; 105644961713Sgirish int max_iteration = 0, max_index = 0; 105744961713Sgirish 105844961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 105944961713Sgirish 106044961713Sgirish ring_info = rbrp->ring_info; 106144961713Sgirish ring_info->hint[0] = NO_HINT; 106244961713Sgirish ring_info->hint[1] = NO_HINT; 106344961713Sgirish ring_info->hint[2] = NO_HINT; 106444961713Sgirish max_index = rbrp->num_blocks; 106544961713Sgirish 106644961713Sgirish /* read the DVMA address information and sort it */ 106744961713Sgirish /* do init of the information array */ 106844961713Sgirish 106944961713Sgirish 107044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 107152ccf843Smisaki " nxge_rxbuf_index_info_init Sort ptrs")); 107244961713Sgirish 107344961713Sgirish /* sort the array */ 107444961713Sgirish nxge_ksort((void *)ring_info->buffer, max_index, 107552ccf843Smisaki sizeof (rxbuf_index_info_t), nxge_sort_compare); 107644961713Sgirish 107744961713Sgirish 107844961713Sgirish 107944961713Sgirish for (index = 0; index < max_index; index++) { 108044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 108152ccf843Smisaki " nxge_rxbuf_index_info_init: sorted chunk %d " 108252ccf843Smisaki " ioaddr $%p kaddr $%p size %x", 108352ccf843Smisaki index, ring_info->buffer[index].dvma_addr, 108452ccf843Smisaki ring_info->buffer[index].kaddr, 108552ccf843Smisaki ring_info->buffer[index].buf_size)); 108644961713Sgirish } 108744961713Sgirish 108844961713Sgirish max_iteration = 0; 108944961713Sgirish while (max_index >= (1ULL << max_iteration)) 109044961713Sgirish max_iteration++; 109144961713Sgirish ring_info->max_iterations = max_iteration + 1; 109244961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 109352ccf843Smisaki " nxge_rxbuf_index_info_init Find max iter %d", 109452ccf843Smisaki ring_info->max_iterations)); 109544961713Sgirish 109644961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 109744961713Sgirish return (NXGE_OK); 109844961713Sgirish } 109944961713Sgirish 11000a8e077aSspeer /* ARGSUSED */ 110144961713Sgirish void 110244961713Sgirish nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 110344961713Sgirish { 110444961713Sgirish #ifdef NXGE_DEBUG 110544961713Sgirish 110644961713Sgirish uint32_t bptr; 110744961713Sgirish uint64_t pp; 110844961713Sgirish 110944961713Sgirish bptr = entry_p->bits.hdw.pkt_buf_addr; 111044961713Sgirish 111144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 111252ccf843Smisaki "\trcr entry $%p " 111352ccf843Smisaki "\trcr entry 0x%0llx " 111452ccf843Smisaki "\trcr entry 0x%08x " 111552ccf843Smisaki "\trcr entry 0x%08x " 111652ccf843Smisaki "\tvalue 0x%0llx\n" 111752ccf843Smisaki "\tmulti = %d\n" 111852ccf843Smisaki "\tpkt_type = 0x%x\n" 111952ccf843Smisaki "\tzero_copy = %d\n" 112052ccf843Smisaki "\tnoport = %d\n" 112152ccf843Smisaki "\tpromis = %d\n" 112252ccf843Smisaki "\terror = 0x%04x\n" 112352ccf843Smisaki "\tdcf_err = 0x%01x\n" 112452ccf843Smisaki "\tl2_len = %d\n" 112552ccf843Smisaki "\tpktbufsize = %d\n" 112652ccf843Smisaki "\tpkt_buf_addr = $%p\n" 112752ccf843Smisaki "\tpkt_buf_addr (<< 6) = $%p\n", 112852ccf843Smisaki entry_p, 112952ccf843Smisaki *(int64_t *)entry_p, 113052ccf843Smisaki *(int32_t *)entry_p, 113152ccf843Smisaki *(int32_t *)((char *)entry_p + 32), 113252ccf843Smisaki entry_p->value, 113352ccf843Smisaki entry_p->bits.hdw.multi, 113452ccf843Smisaki entry_p->bits.hdw.pkt_type, 113552ccf843Smisaki entry_p->bits.hdw.zero_copy, 113652ccf843Smisaki entry_p->bits.hdw.noport, 113752ccf843Smisaki entry_p->bits.hdw.promis, 113852ccf843Smisaki entry_p->bits.hdw.error, 113952ccf843Smisaki entry_p->bits.hdw.dcf_err, 114052ccf843Smisaki entry_p->bits.hdw.l2_len, 114152ccf843Smisaki entry_p->bits.hdw.pktbufsz, 114252ccf843Smisaki bptr, 114352ccf843Smisaki entry_p->bits.ldw.pkt_buf_addr)); 114444961713Sgirish 114544961713Sgirish pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 114652ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT; 114744961713Sgirish 114844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 114952ccf843Smisaki pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 115044961713Sgirish #endif 115144961713Sgirish } 115244961713Sgirish 115344961713Sgirish void 115444961713Sgirish nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 115544961713Sgirish { 115644961713Sgirish npi_handle_t handle; 115744961713Sgirish rbr_stat_t rbr_stat; 115844961713Sgirish addr44_t hd_addr; 115944961713Sgirish addr44_t tail_addr; 116044961713Sgirish uint16_t qlen; 116144961713Sgirish 116244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 116352ccf843Smisaki "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 116444961713Sgirish 116544961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 116644961713Sgirish 116744961713Sgirish /* RBR head */ 116844961713Sgirish hd_addr.addr = 0; 116944961713Sgirish (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1170adfcba55Sjoycey #if defined(__i386) 117153f3d8ecSyc printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 117252ccf843Smisaki (void *)(uint32_t)hd_addr.addr); 1173adfcba55Sjoycey #else 117453f3d8ecSyc printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 117552ccf843Smisaki (void *)hd_addr.addr); 1176adfcba55Sjoycey #endif 117744961713Sgirish 117844961713Sgirish /* RBR stats */ 117944961713Sgirish (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 118044961713Sgirish printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 118144961713Sgirish 118244961713Sgirish /* RCR tail */ 118344961713Sgirish tail_addr.addr = 0; 118444961713Sgirish (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1185adfcba55Sjoycey #if defined(__i386) 118653f3d8ecSyc printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 118752ccf843Smisaki (void *)(uint32_t)tail_addr.addr); 1188adfcba55Sjoycey #else 118953f3d8ecSyc printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 119052ccf843Smisaki (void *)tail_addr.addr); 1191adfcba55Sjoycey #endif 119244961713Sgirish 119344961713Sgirish /* RCR qlen */ 119444961713Sgirish (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 119544961713Sgirish printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 119644961713Sgirish 119744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 119852ccf843Smisaki "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 119944961713Sgirish } 120044961713Sgirish 120144961713Sgirish nxge_status_t 120244961713Sgirish nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 120344961713Sgirish { 1204678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1205678453a8Sspeer nxge_status_t status; 1206678453a8Sspeer npi_status_t rs; 1207678453a8Sspeer int rdc; 120844961713Sgirish 120944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 121052ccf843Smisaki "==> nxge_rxdma_hw_mode: mode %d", enable)); 121144961713Sgirish 121244961713Sgirish if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 121344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1214678453a8Sspeer "<== nxge_rxdma_mode: not initialized")); 121544961713Sgirish return (NXGE_ERROR); 121644961713Sgirish } 121744961713Sgirish 1218678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1219678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1220678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1221678453a8Sspeer "NULL ring pointer(s)")); 122244961713Sgirish return (NXGE_ERROR); 122344961713Sgirish } 122444961713Sgirish 1225678453a8Sspeer if (set->owned.map == 0) { 122644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1227678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 1228678453a8Sspeer return (NULL); 122944961713Sgirish } 123044961713Sgirish 1231678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1232678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1233678453a8Sspeer rx_rbr_ring_t *ring = 1234678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1235678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1236678453a8Sspeer if (ring) { 1237678453a8Sspeer if (enable) { 1238678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1239678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1240678453a8Sspeer "channel %d (enable)", rdc)); 1241678453a8Sspeer rs = npi_rxdma_cfg_rdc_enable 1242678453a8Sspeer (handle, rdc); 1243678453a8Sspeer } else { 1244678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1245678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1246678453a8Sspeer "channel %d disable)", rdc)); 1247678453a8Sspeer rs = npi_rxdma_cfg_rdc_disable 1248678453a8Sspeer (handle, rdc); 1249678453a8Sspeer } 1250678453a8Sspeer } 125144961713Sgirish } 125244961713Sgirish } 125344961713Sgirish 125444961713Sgirish status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 125544961713Sgirish 125644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 125752ccf843Smisaki "<== nxge_rxdma_hw_mode: status 0x%x", status)); 125844961713Sgirish 125944961713Sgirish return (status); 126044961713Sgirish } 126144961713Sgirish 126244961713Sgirish void 126344961713Sgirish nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 126444961713Sgirish { 126544961713Sgirish npi_handle_t handle; 126644961713Sgirish 126744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 126852ccf843Smisaki "==> nxge_rxdma_enable_channel: channel %d", channel)); 126944961713Sgirish 127044961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 127144961713Sgirish (void) npi_rxdma_cfg_rdc_enable(handle, channel); 127244961713Sgirish 127344961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 127444961713Sgirish } 127544961713Sgirish 127644961713Sgirish void 127744961713Sgirish nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 127844961713Sgirish { 127944961713Sgirish npi_handle_t handle; 128044961713Sgirish 128144961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 128252ccf843Smisaki "==> nxge_rxdma_disable_channel: channel %d", channel)); 128344961713Sgirish 128444961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 128544961713Sgirish (void) npi_rxdma_cfg_rdc_disable(handle, channel); 128644961713Sgirish 128744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 128844961713Sgirish } 128944961713Sgirish 129044961713Sgirish void 129144961713Sgirish nxge_hw_start_rx(p_nxge_t nxgep) 129244961713Sgirish { 129344961713Sgirish NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 129444961713Sgirish 129544961713Sgirish (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 129644961713Sgirish (void) nxge_rx_mac_enable(nxgep); 129744961713Sgirish 129844961713Sgirish NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 129944961713Sgirish } 130044961713Sgirish 130144961713Sgirish /*ARGSUSED*/ 130244961713Sgirish void 130344961713Sgirish nxge_fixup_rxdma_rings(p_nxge_t nxgep) 130444961713Sgirish { 1305678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1306678453a8Sspeer int rdc; 130744961713Sgirish 130844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 130944961713Sgirish 1310678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1311678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1312678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1313678453a8Sspeer "NULL ring pointer(s)")); 131444961713Sgirish return; 131544961713Sgirish } 131644961713Sgirish 1317678453a8Sspeer if (set->owned.map == 0) { 131844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1319678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 132044961713Sgirish return; 132144961713Sgirish } 132244961713Sgirish 1323678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1324678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1325678453a8Sspeer rx_rbr_ring_t *ring = 1326678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1327678453a8Sspeer if (ring) { 1328678453a8Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 1329678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 133052ccf843Smisaki "==> nxge_fixup_rxdma_rings: " 133152ccf843Smisaki "channel %d ring $%px", 133252ccf843Smisaki rdc, ring)); 1333678453a8Sspeer (void) nxge_rxdma_fixup_channel 1334678453a8Sspeer (nxgep, rdc, rdc); 1335678453a8Sspeer } 1336678453a8Sspeer } 133744961713Sgirish } 133844961713Sgirish 133944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 134044961713Sgirish } 134144961713Sgirish 134244961713Sgirish void 134344961713Sgirish nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 134444961713Sgirish { 134544961713Sgirish int i; 134644961713Sgirish 134744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 134844961713Sgirish i = nxge_rxdma_get_ring_index(nxgep, channel); 134944961713Sgirish if (i < 0) { 135044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 135152ccf843Smisaki "<== nxge_rxdma_fix_channel: no entry found")); 135244961713Sgirish return; 135344961713Sgirish } 135444961713Sgirish 135544961713Sgirish nxge_rxdma_fixup_channel(nxgep, channel, i); 135644961713Sgirish 1357678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 135844961713Sgirish } 135944961713Sgirish 136044961713Sgirish void 136144961713Sgirish nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 136244961713Sgirish { 136344961713Sgirish int ndmas; 136444961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 136544961713Sgirish p_rx_rbr_ring_t *rbr_rings; 136644961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 136744961713Sgirish p_rx_rcr_ring_t *rcr_rings; 136844961713Sgirish p_rx_mbox_areas_t rx_mbox_areas_p; 136944961713Sgirish p_rx_mbox_t *rx_mbox_p; 137044961713Sgirish p_nxge_dma_pool_t dma_buf_poolp; 137144961713Sgirish p_nxge_dma_pool_t dma_cntl_poolp; 137244961713Sgirish p_rx_rbr_ring_t rbrp; 137344961713Sgirish p_rx_rcr_ring_t rcrp; 137444961713Sgirish p_rx_mbox_t mboxp; 137544961713Sgirish p_nxge_dma_common_t dmap; 137644961713Sgirish nxge_status_t status = NXGE_OK; 137744961713Sgirish 137844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 137944961713Sgirish 138044961713Sgirish (void) nxge_rxdma_stop_channel(nxgep, channel); 138144961713Sgirish 138244961713Sgirish dma_buf_poolp = nxgep->rx_buf_pool_p; 138344961713Sgirish dma_cntl_poolp = nxgep->rx_cntl_pool_p; 138444961713Sgirish 138544961713Sgirish if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 138644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 138752ccf843Smisaki "<== nxge_rxdma_fixup_channel: buf not allocated")); 138844961713Sgirish return; 138944961713Sgirish } 139044961713Sgirish 139144961713Sgirish ndmas = dma_buf_poolp->ndmas; 139244961713Sgirish if (!ndmas) { 139344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 139452ccf843Smisaki "<== nxge_rxdma_fixup_channel: no dma allocated")); 139544961713Sgirish return; 139644961713Sgirish } 139744961713Sgirish 1398a3c5bd6dSspeer rx_rbr_rings = nxgep->rx_rbr_rings; 139944961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 140044961713Sgirish rbr_rings = rx_rbr_rings->rbr_rings; 140144961713Sgirish rcr_rings = rx_rcr_rings->rcr_rings; 140244961713Sgirish rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 140344961713Sgirish rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 140444961713Sgirish 140544961713Sgirish /* Reinitialize the receive block and completion rings */ 140644961713Sgirish rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 140752ccf843Smisaki rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 140852ccf843Smisaki mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 140944961713Sgirish 141044961713Sgirish 141144961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 141244961713Sgirish rbrp->rbr_rd_index = 0; 141344961713Sgirish rcrp->comp_rd_index = 0; 141444961713Sgirish rcrp->comp_wt_index = 0; 141544961713Sgirish 141644961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 141744961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 141844961713Sgirish 141944961713Sgirish status = nxge_rxdma_start_channel(nxgep, channel, 142052ccf843Smisaki rbrp, rcrp, mboxp); 142144961713Sgirish if (status != NXGE_OK) { 142244961713Sgirish goto nxge_rxdma_fixup_channel_fail; 142344961713Sgirish } 142444961713Sgirish if (status != NXGE_OK) { 142544961713Sgirish goto nxge_rxdma_fixup_channel_fail; 142644961713Sgirish } 142744961713Sgirish 142844961713Sgirish nxge_rxdma_fixup_channel_fail: 142944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 143052ccf843Smisaki "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 143144961713Sgirish 143244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 143344961713Sgirish } 143444961713Sgirish 1435da14cebeSEric Cheng /* 1436da14cebeSEric Cheng * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 1437da14cebeSEric Cheng * map <channel> to an index into nxgep->rx_rbr_rings. 1438da14cebeSEric Cheng * (device ring index -> port ring index) 1439da14cebeSEric Cheng */ 144044961713Sgirish int 144144961713Sgirish nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 144244961713Sgirish { 1443da14cebeSEric Cheng int i, ndmas; 1444da14cebeSEric Cheng uint16_t rdc; 1445da14cebeSEric Cheng p_rx_rbr_rings_t rx_rbr_rings; 1446da14cebeSEric Cheng p_rx_rbr_ring_t *rbr_rings; 1447da14cebeSEric Cheng 1448da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1449da14cebeSEric Cheng "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1450da14cebeSEric Cheng 1451da14cebeSEric Cheng rx_rbr_rings = nxgep->rx_rbr_rings; 1452da14cebeSEric Cheng if (rx_rbr_rings == NULL) { 1453da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1454da14cebeSEric Cheng "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1455da14cebeSEric Cheng return (-1); 1456da14cebeSEric Cheng } 1457da14cebeSEric Cheng ndmas = rx_rbr_rings->ndmas; 1458da14cebeSEric Cheng if (!ndmas) { 1459da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1460da14cebeSEric Cheng "<== nxge_rxdma_get_ring_index: no channel")); 1461da14cebeSEric Cheng return (-1); 1462da14cebeSEric Cheng } 1463da14cebeSEric Cheng 1464da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1465da14cebeSEric Cheng "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1466da14cebeSEric Cheng 1467da14cebeSEric Cheng rbr_rings = rx_rbr_rings->rbr_rings; 1468da14cebeSEric Cheng for (i = 0; i < ndmas; i++) { 1469da14cebeSEric Cheng rdc = rbr_rings[i]->rdc; 1470da14cebeSEric Cheng if (channel == rdc) { 1471da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1472da14cebeSEric Cheng "==> nxge_rxdma_get_rbr_ring: channel %d " 1473da14cebeSEric Cheng "(index %d) ring %d", channel, i, rbr_rings[i])); 1474da14cebeSEric Cheng return (i); 1475da14cebeSEric Cheng } 1476da14cebeSEric Cheng } 1477da14cebeSEric Cheng 1478da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 1479da14cebeSEric Cheng "<== nxge_rxdma_get_rbr_ring_index: not found")); 1480da14cebeSEric Cheng 1481da14cebeSEric Cheng return (-1); 148244961713Sgirish } 148344961713Sgirish 148444961713Sgirish p_rx_rbr_ring_t 148544961713Sgirish nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 148644961713Sgirish { 1487678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1488678453a8Sspeer nxge_channel_t rdc; 148944961713Sgirish 149044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 149152ccf843Smisaki "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 149244961713Sgirish 1493678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1494678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1495678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: " 1496678453a8Sspeer "NULL ring pointer(s)")); 149744961713Sgirish return (NULL); 149844961713Sgirish } 1499678453a8Sspeer 1500678453a8Sspeer if (set->owned.map == 0) { 150144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1502678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 150344961713Sgirish return (NULL); 150444961713Sgirish } 150544961713Sgirish 1506678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1507678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1508678453a8Sspeer rx_rbr_ring_t *ring = 1509678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1510678453a8Sspeer if (ring) { 1511678453a8Sspeer if (channel == ring->rdc) { 1512678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 1513678453a8Sspeer "==> nxge_rxdma_get_rbr_ring: " 1514678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1515678453a8Sspeer return (ring); 1516678453a8Sspeer } 1517678453a8Sspeer } 151844961713Sgirish } 151944961713Sgirish } 152044961713Sgirish 152144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 152252ccf843Smisaki "<== nxge_rxdma_get_rbr_ring: not found")); 152344961713Sgirish 152444961713Sgirish return (NULL); 152544961713Sgirish } 152644961713Sgirish 152744961713Sgirish p_rx_rcr_ring_t 152844961713Sgirish nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 152944961713Sgirish { 1530678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1531678453a8Sspeer nxge_channel_t rdc; 153244961713Sgirish 153344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 153452ccf843Smisaki "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 153544961713Sgirish 1536678453a8Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1537678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1538678453a8Sspeer "<== nxge_rxdma_get_rcr_ring: " 1539678453a8Sspeer "NULL ring pointer(s)")); 154044961713Sgirish return (NULL); 154144961713Sgirish } 1542678453a8Sspeer 1543678453a8Sspeer if (set->owned.map == 0) { 154444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1545678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 154644961713Sgirish return (NULL); 154744961713Sgirish } 154844961713Sgirish 1549678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1550678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1551678453a8Sspeer rx_rcr_ring_t *ring = 1552678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 1553678453a8Sspeer if (ring) { 1554678453a8Sspeer if (channel == ring->rdc) { 1555678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 1556678453a8Sspeer "==> nxge_rxdma_get_rcr_ring: " 1557678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1558678453a8Sspeer return (ring); 1559678453a8Sspeer } 1560678453a8Sspeer } 156144961713Sgirish } 156244961713Sgirish } 156344961713Sgirish 156444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 156552ccf843Smisaki "<== nxge_rxdma_get_rcr_ring: not found")); 156644961713Sgirish 156744961713Sgirish return (NULL); 156844961713Sgirish } 156944961713Sgirish 157044961713Sgirish /* 157144961713Sgirish * Static functions start here. 157244961713Sgirish */ 157344961713Sgirish static p_rx_msg_t 157444961713Sgirish nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 157544961713Sgirish { 157644961713Sgirish p_rx_msg_t nxge_mp = NULL; 157744961713Sgirish p_nxge_dma_common_t dmamsg_p; 157844961713Sgirish uchar_t *buffer; 157944961713Sgirish 158044961713Sgirish nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 158144961713Sgirish if (nxge_mp == NULL) { 158256d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 158352ccf843Smisaki "Allocation of a rx msg failed.")); 158444961713Sgirish goto nxge_allocb_exit; 158544961713Sgirish } 158644961713Sgirish 158744961713Sgirish nxge_mp->use_buf_pool = B_FALSE; 158844961713Sgirish if (dmabuf_p) { 158944961713Sgirish nxge_mp->use_buf_pool = B_TRUE; 159044961713Sgirish dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 159144961713Sgirish *dmamsg_p = *dmabuf_p; 159244961713Sgirish dmamsg_p->nblocks = 1; 159344961713Sgirish dmamsg_p->block_size = size; 159444961713Sgirish dmamsg_p->alength = size; 159544961713Sgirish buffer = (uchar_t *)dmabuf_p->kaddrp; 159644961713Sgirish 159744961713Sgirish dmabuf_p->kaddrp = (void *) 159852ccf843Smisaki ((char *)dmabuf_p->kaddrp + size); 159944961713Sgirish dmabuf_p->ioaddr_pp = (void *) 160052ccf843Smisaki ((char *)dmabuf_p->ioaddr_pp + size); 160144961713Sgirish dmabuf_p->alength -= size; 160244961713Sgirish dmabuf_p->offset += size; 160344961713Sgirish dmabuf_p->dma_cookie.dmac_laddress += size; 160444961713Sgirish dmabuf_p->dma_cookie.dmac_size -= size; 160544961713Sgirish 160644961713Sgirish } else { 160744961713Sgirish buffer = KMEM_ALLOC(size, KM_NOSLEEP); 160844961713Sgirish if (buffer == NULL) { 160956d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 161052ccf843Smisaki "Allocation of a receive page failed.")); 161144961713Sgirish goto nxge_allocb_fail1; 161244961713Sgirish } 161344961713Sgirish } 161444961713Sgirish 161544961713Sgirish nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 161644961713Sgirish if (nxge_mp->rx_mblk_p == NULL) { 161756d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 161844961713Sgirish goto nxge_allocb_fail2; 161944961713Sgirish } 162044961713Sgirish 162144961713Sgirish nxge_mp->buffer = buffer; 162244961713Sgirish nxge_mp->block_size = size; 162344961713Sgirish nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 162444961713Sgirish nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 162544961713Sgirish nxge_mp->ref_cnt = 1; 162644961713Sgirish nxge_mp->free = B_TRUE; 162744961713Sgirish nxge_mp->rx_use_bcopy = B_FALSE; 162844961713Sgirish 162914ea4bb7Ssd atomic_inc_32(&nxge_mblks_pending); 163044961713Sgirish 163144961713Sgirish goto nxge_allocb_exit; 163244961713Sgirish 163344961713Sgirish nxge_allocb_fail2: 163444961713Sgirish if (!nxge_mp->use_buf_pool) { 163544961713Sgirish KMEM_FREE(buffer, size); 163644961713Sgirish } 163744961713Sgirish 163844961713Sgirish nxge_allocb_fail1: 163944961713Sgirish KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 164044961713Sgirish nxge_mp = NULL; 164144961713Sgirish 164244961713Sgirish nxge_allocb_exit: 164344961713Sgirish return (nxge_mp); 164444961713Sgirish } 164544961713Sgirish 164644961713Sgirish p_mblk_t 164744961713Sgirish nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 164844961713Sgirish { 164944961713Sgirish p_mblk_t mp; 165044961713Sgirish 165144961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 165244961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 165352ccf843Smisaki "offset = 0x%08X " 165452ccf843Smisaki "size = 0x%08X", 165552ccf843Smisaki nxge_mp, offset, size)); 165644961713Sgirish 165744961713Sgirish mp = desballoc(&nxge_mp->buffer[offset], size, 165852ccf843Smisaki 0, &nxge_mp->freeb); 165944961713Sgirish if (mp == NULL) { 166044961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 166144961713Sgirish goto nxge_dupb_exit; 166244961713Sgirish } 166344961713Sgirish atomic_inc_32(&nxge_mp->ref_cnt); 166444961713Sgirish 166544961713Sgirish 166644961713Sgirish nxge_dupb_exit: 166744961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 166852ccf843Smisaki nxge_mp)); 166944961713Sgirish return (mp); 167044961713Sgirish } 167144961713Sgirish 167244961713Sgirish p_mblk_t 167344961713Sgirish nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 167444961713Sgirish { 167544961713Sgirish p_mblk_t mp; 167644961713Sgirish uchar_t *dp; 167744961713Sgirish 167844961713Sgirish mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 167944961713Sgirish if (mp == NULL) { 168044961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 168144961713Sgirish goto nxge_dupb_bcopy_exit; 168244961713Sgirish } 168344961713Sgirish dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 168444961713Sgirish bcopy((void *)&nxge_mp->buffer[offset], dp, size); 168544961713Sgirish mp->b_wptr = dp + size; 168644961713Sgirish 168744961713Sgirish nxge_dupb_bcopy_exit: 168844961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 168952ccf843Smisaki nxge_mp)); 169044961713Sgirish return (mp); 169144961713Sgirish } 169244961713Sgirish 169344961713Sgirish void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 169444961713Sgirish p_rx_msg_t rx_msg_p); 169544961713Sgirish 169644961713Sgirish void 169744961713Sgirish nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 169844961713Sgirish { 169944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 170044961713Sgirish 170144961713Sgirish /* Reuse this buffer */ 170244961713Sgirish rx_msg_p->free = B_FALSE; 170344961713Sgirish rx_msg_p->cur_usage_cnt = 0; 170444961713Sgirish rx_msg_p->max_usage_cnt = 0; 170544961713Sgirish rx_msg_p->pkt_buf_size = 0; 170644961713Sgirish 170744961713Sgirish if (rx_rbr_p->rbr_use_bcopy) { 170844961713Sgirish rx_msg_p->rx_use_bcopy = B_FALSE; 170944961713Sgirish atomic_dec_32(&rx_rbr_p->rbr_consumed); 171044961713Sgirish } 171144961713Sgirish 171244961713Sgirish /* 171344961713Sgirish * Get the rbr header pointer and its offset index. 171444961713Sgirish */ 171544961713Sgirish MUTEX_ENTER(&rx_rbr_p->post_lock); 171644961713Sgirish rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 171752ccf843Smisaki rx_rbr_p->rbr_wrap_mask); 171844961713Sgirish rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 171944961713Sgirish MUTEX_EXIT(&rx_rbr_p->post_lock); 172030ac2e7bSml npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 172130ac2e7bSml rx_rbr_p->rdc, 1); 172244961713Sgirish 172344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 172452ccf843Smisaki "<== nxge_post_page (channel %d post_next_index %d)", 172552ccf843Smisaki rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 172644961713Sgirish 172744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 172844961713Sgirish } 172944961713Sgirish 173044961713Sgirish void 173144961713Sgirish nxge_freeb(p_rx_msg_t rx_msg_p) 173244961713Sgirish { 173344961713Sgirish size_t size; 173444961713Sgirish uchar_t *buffer = NULL; 173544961713Sgirish int ref_cnt; 1736958cea9eSml boolean_t free_state = B_FALSE; 173744961713Sgirish 1738007969e0Stm rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1739007969e0Stm 174044961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 174144961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, 174252ccf843Smisaki "nxge_freeb:rx_msg_p = $%p (block pending %d)", 174352ccf843Smisaki rx_msg_p, nxge_mblks_pending)); 174444961713Sgirish 1745958cea9eSml /* 1746958cea9eSml * First we need to get the free state, then 1747958cea9eSml * atomic decrement the reference count to prevent 1748958cea9eSml * the race condition with the interrupt thread that 1749958cea9eSml * is processing a loaned up buffer block. 1750958cea9eSml */ 1751958cea9eSml free_state = rx_msg_p->free; 1752958cea9eSml ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 175344961713Sgirish if (!ref_cnt) { 175430ac2e7bSml atomic_dec_32(&nxge_mblks_pending); 175544961713Sgirish buffer = rx_msg_p->buffer; 175644961713Sgirish size = rx_msg_p->block_size; 175744961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 175852ccf843Smisaki "will free: rx_msg_p = $%p (block pending %d)", 175952ccf843Smisaki rx_msg_p, nxge_mblks_pending)); 176044961713Sgirish 176144961713Sgirish if (!rx_msg_p->use_buf_pool) { 176244961713Sgirish KMEM_FREE(buffer, size); 176344961713Sgirish } 176414ea4bb7Ssd 176514ea4bb7Ssd KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1766007969e0Stm 17673e82a89eSmisaki if (ring) { 17683e82a89eSmisaki /* 17693e82a89eSmisaki * Decrement the receive buffer ring's reference 17703e82a89eSmisaki * count, too. 17713e82a89eSmisaki */ 17723e82a89eSmisaki atomic_dec_32(&ring->rbr_ref_cnt); 1773007969e0Stm 17743e82a89eSmisaki /* 1775678453a8Sspeer * Free the receive buffer ring, if 17763e82a89eSmisaki * 1. all the receive buffers have been freed 17773e82a89eSmisaki * 2. and we are in the proper state (that is, 17783e82a89eSmisaki * we are not UNMAPPING). 17793e82a89eSmisaki */ 17803e82a89eSmisaki if (ring->rbr_ref_cnt == 0 && 17813e82a89eSmisaki ring->rbr_state == RBR_UNMAPPED) { 1782678453a8Sspeer /* 1783678453a8Sspeer * Free receive data buffers, 1784678453a8Sspeer * buffer index information 1785678453a8Sspeer * (rxring_info) and 1786678453a8Sspeer * the message block ring. 1787678453a8Sspeer */ 1788678453a8Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 1789678453a8Sspeer "nxge_freeb:rx_msg_p = $%p " 1790678453a8Sspeer "(block pending %d) free buffers", 1791678453a8Sspeer rx_msg_p, nxge_mblks_pending)); 1792678453a8Sspeer nxge_rxdma_databuf_free(ring); 1793678453a8Sspeer if (ring->ring_info) { 1794678453a8Sspeer KMEM_FREE(ring->ring_info, 1795678453a8Sspeer sizeof (rxring_info_t)); 1796678453a8Sspeer } 1797678453a8Sspeer 1798678453a8Sspeer if (ring->rx_msg_ring) { 1799678453a8Sspeer KMEM_FREE(ring->rx_msg_ring, 1800678453a8Sspeer ring->tnblocks * 1801678453a8Sspeer sizeof (p_rx_msg_t)); 1802678453a8Sspeer } 18033e82a89eSmisaki KMEM_FREE(ring, sizeof (*ring)); 18043e82a89eSmisaki } 1805007969e0Stm } 180614ea4bb7Ssd return; 180744961713Sgirish } 180844961713Sgirish 180944961713Sgirish /* 181044961713Sgirish * Repost buffer. 181144961713Sgirish */ 18123e82a89eSmisaki if (free_state && (ref_cnt == 1) && ring) { 181344961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, 181444961713Sgirish "nxge_freeb: post page $%p:", rx_msg_p)); 1815007969e0Stm if (ring->rbr_state == RBR_POSTING) 1816007969e0Stm nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 181744961713Sgirish } 181844961713Sgirish 181944961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 182044961713Sgirish } 182144961713Sgirish 182244961713Sgirish uint_t 182344961713Sgirish nxge_rx_intr(void *arg1, void *arg2) 182444961713Sgirish { 182544961713Sgirish p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 182644961713Sgirish p_nxge_t nxgep = (p_nxge_t)arg2; 182744961713Sgirish p_nxge_ldg_t ldgp; 182844961713Sgirish uint8_t channel; 182944961713Sgirish npi_handle_t handle; 183044961713Sgirish rx_dma_ctl_stat_t cs; 1831da14cebeSEric Cheng p_rx_rcr_ring_t rcr_ring; 1832da14cebeSEric Cheng mblk_t *mp; 183344961713Sgirish 183444961713Sgirish if (ldvp == NULL) { 183544961713Sgirish NXGE_DEBUG_MSG((NULL, INT_CTL, 183652ccf843Smisaki "<== nxge_rx_intr: arg2 $%p arg1 $%p", 183752ccf843Smisaki nxgep, ldvp)); 183844961713Sgirish return (DDI_INTR_CLAIMED); 183944961713Sgirish } 184044961713Sgirish 184144961713Sgirish if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 184244961713Sgirish nxgep = ldvp->nxgep; 184344961713Sgirish } 18441d36aa9eSspeer 18451d36aa9eSspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 18461d36aa9eSspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 18471d36aa9eSspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 18481d36aa9eSspeer "<== nxge_rx_intr: interface not started or intialized")); 18491d36aa9eSspeer return (DDI_INTR_CLAIMED); 18501d36aa9eSspeer } 18511d36aa9eSspeer 185244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 185352ccf843Smisaki "==> nxge_rx_intr: arg2 $%p arg1 $%p", 185452ccf843Smisaki nxgep, ldvp)); 185544961713Sgirish 185644961713Sgirish /* 1857*e759c33aSMichael Speer * Get the PIO handle. 185844961713Sgirish */ 185944961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 1860da14cebeSEric Cheng 1861*e759c33aSMichael Speer /* 1862*e759c33aSMichael Speer * Get the ring to enable us to process packets. 1863*e759c33aSMichael Speer */ 1864da14cebeSEric Cheng rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1865da14cebeSEric Cheng 1866da14cebeSEric Cheng /* 1867da14cebeSEric Cheng * The RCR ring lock must be held when packets 1868da14cebeSEric Cheng * are being processed and the hardware registers are 1869da14cebeSEric Cheng * being read or written to prevent race condition 1870da14cebeSEric Cheng * among the interrupt thread, the polling thread 1871da14cebeSEric Cheng * (will cause fatal errors such as rcrincon bit set) 1872da14cebeSEric Cheng * and the setting of the poll_flag. 1873da14cebeSEric Cheng */ 1874da14cebeSEric Cheng MUTEX_ENTER(&rcr_ring->lock); 1875da14cebeSEric Cheng 187644961713Sgirish /* 187744961713Sgirish * Get the control and status for this channel. 187844961713Sgirish */ 187944961713Sgirish channel = ldvp->channel; 188044961713Sgirish ldgp = ldvp->ldgp; 1881da14cebeSEric Cheng 1882*e759c33aSMichael Speer if (!isLDOMguest(nxgep) && (!nxgep->rx_channel_started[channel])) { 1883*e759c33aSMichael Speer NXGE_DEBUG_MSG((nxgep, INT_CTL, 1884*e759c33aSMichael Speer "<== nxge_rx_intr: channel is not started")); 1885*e759c33aSMichael Speer 1886*e759c33aSMichael Speer /* 1887*e759c33aSMichael Speer * We received an interrupt before the ring is started. 1888*e759c33aSMichael Speer */ 1889*e759c33aSMichael Speer RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1890*e759c33aSMichael Speer &cs.value); 1891*e759c33aSMichael Speer cs.value &= RX_DMA_CTL_STAT_WR1C; 1892*e759c33aSMichael Speer cs.bits.hdw.mex = 1; 1893*e759c33aSMichael Speer RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1894*e759c33aSMichael Speer cs.value); 1895*e759c33aSMichael Speer 1896*e759c33aSMichael Speer /* 1897*e759c33aSMichael Speer * Rearm this logical group if this is a single device 1898*e759c33aSMichael Speer * group. 1899*e759c33aSMichael Speer */ 1900*e759c33aSMichael Speer if (ldgp->nldvs == 1) { 1901*e759c33aSMichael Speer if (isLDOMguest(nxgep)) { 1902*e759c33aSMichael Speer nxge_hio_ldgimgn(nxgep, ldgp); 1903*e759c33aSMichael Speer } else { 1904*e759c33aSMichael Speer ldgimgm_t mgm; 1905*e759c33aSMichael Speer 1906*e759c33aSMichael Speer mgm.value = 0; 1907*e759c33aSMichael Speer mgm.bits.ldw.arm = 1; 1908*e759c33aSMichael Speer mgm.bits.ldw.timer = ldgp->ldg_timer; 1909*e759c33aSMichael Speer 1910*e759c33aSMichael Speer NXGE_REG_WR64(handle, 1911*e759c33aSMichael Speer LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1912*e759c33aSMichael Speer mgm.value); 1913*e759c33aSMichael Speer } 1914da14cebeSEric Cheng } 1915*e759c33aSMichael Speer MUTEX_EXIT(&rcr_ring->lock); 1916*e759c33aSMichael Speer return (DDI_INTR_CLAIMED); 1917da14cebeSEric Cheng } 1918da14cebeSEric Cheng 1919da14cebeSEric Cheng ASSERT(rcr_ring->ldgp == ldgp); 1920da14cebeSEric Cheng ASSERT(rcr_ring->ldvp == ldvp); 1921da14cebeSEric Cheng 192244961713Sgirish RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 192344961713Sgirish 192444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 192552ccf843Smisaki "cs 0x%016llx rcrto 0x%x rcrthres %x", 192652ccf843Smisaki channel, 192752ccf843Smisaki cs.value, 192852ccf843Smisaki cs.bits.hdw.rcrto, 192952ccf843Smisaki cs.bits.hdw.rcrthres)); 193044961713Sgirish 1931da14cebeSEric Cheng mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 193244961713Sgirish 193344961713Sgirish /* error events. */ 193444961713Sgirish if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1935678453a8Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 193644961713Sgirish } 193744961713Sgirish 193844961713Sgirish /* 193944961713Sgirish * Enable the mailbox update interrupt if we want 194044961713Sgirish * to use mailbox. We probably don't need to use 194144961713Sgirish * mailbox as it only saves us one pio read. 194244961713Sgirish * Also write 1 to rcrthres and rcrto to clear 194344961713Sgirish * these two edge triggered bits. 194444961713Sgirish */ 194544961713Sgirish cs.value &= RX_DMA_CTL_STAT_WR1C; 1946da14cebeSEric Cheng cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 194744961713Sgirish RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 194852ccf843Smisaki cs.value); 194944961713Sgirish 195044961713Sgirish /* 1951da14cebeSEric Cheng * If the polling mode is enabled, disable the interrupt. 195244961713Sgirish */ 1953da14cebeSEric Cheng if (rcr_ring->poll_flag) { 1954da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1955da14cebeSEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1956da14cebeSEric Cheng "(disabling interrupts)", channel, ldgp, ldvp)); 1957da14cebeSEric Cheng /* 1958da14cebeSEric Cheng * Disarm this logical group if this is a single device 1959da14cebeSEric Cheng * group. 1960da14cebeSEric Cheng */ 1961da14cebeSEric Cheng if (ldgp->nldvs == 1) { 1962da14cebeSEric Cheng ldgimgm_t mgm; 1963da14cebeSEric Cheng mgm.value = 0; 1964da14cebeSEric Cheng mgm.bits.ldw.arm = 0; 1965678453a8Sspeer NXGE_REG_WR64(handle, 1966da14cebeSEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1967da14cebeSEric Cheng } 1968da14cebeSEric Cheng } else { 1969da14cebeSEric Cheng /* 197008ac1c49SNicolas Droux * Rearm this logical group if this is a single device 197108ac1c49SNicolas Droux * group. 1972da14cebeSEric Cheng */ 1973da14cebeSEric Cheng if (ldgp->nldvs == 1) { 1974da14cebeSEric Cheng if (isLDOMguest(nxgep)) { 1975da14cebeSEric Cheng nxge_hio_ldgimgn(nxgep, ldgp); 1976da14cebeSEric Cheng } else { 1977da14cebeSEric Cheng ldgimgm_t mgm; 1978da14cebeSEric Cheng 1979da14cebeSEric Cheng mgm.value = 0; 1980da14cebeSEric Cheng mgm.bits.ldw.arm = 1; 1981da14cebeSEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 1982da14cebeSEric Cheng 1983da14cebeSEric Cheng NXGE_REG_WR64(handle, 1984da14cebeSEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1985da14cebeSEric Cheng mgm.value); 1986da14cebeSEric Cheng } 1987678453a8Sspeer } 1988da14cebeSEric Cheng 1989da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1990da14cebeSEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p " 1991da14cebeSEric Cheng "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 199244961713Sgirish } 1993da14cebeSEric Cheng MUTEX_EXIT(&rcr_ring->lock); 199444961713Sgirish 1995da14cebeSEric Cheng if (mp) { 1996da14cebeSEric Cheng if (!isLDOMguest(nxgep)) 1997da14cebeSEric Cheng mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1998da14cebeSEric Cheng rcr_ring->rcr_gen_num); 1999da14cebeSEric Cheng #if defined(sun4v) 2000da14cebeSEric Cheng else { /* isLDOMguest(nxgep) */ 2001da14cebeSEric Cheng nxge_hio_data_t *nhd = (nxge_hio_data_t *) 2002da14cebeSEric Cheng nxgep->nxge_hw_p->hio; 2003da14cebeSEric Cheng nx_vio_fp_t *vio = &nhd->hio.vio; 2004da14cebeSEric Cheng 2005da14cebeSEric Cheng if (vio->cb.vio_net_rx_cb) { 2006da14cebeSEric Cheng (*vio->cb.vio_net_rx_cb) 2007da14cebeSEric Cheng (nxgep->hio_vr->vhp, mp); 2008da14cebeSEric Cheng } 2009da14cebeSEric Cheng } 2010da14cebeSEric Cheng #endif 2011da14cebeSEric Cheng } 2012da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 2013da14cebeSEric Cheng return (DDI_INTR_CLAIMED); 201444961713Sgirish } 201544961713Sgirish 201644961713Sgirish /* 201744961713Sgirish * Process the packets received in the specified logical device 201844961713Sgirish * and pass up a chain of message blocks to the upper layer. 2019da14cebeSEric Cheng * The RCR ring lock must be held before calling this function. 202044961713Sgirish */ 2021da14cebeSEric Cheng static mblk_t * 2022678453a8Sspeer nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 202344961713Sgirish { 202444961713Sgirish p_mblk_t mp; 202544961713Sgirish p_rx_rcr_ring_t rcrp; 202644961713Sgirish 202744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 2028678453a8Sspeer rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 2029678453a8Sspeer 2030da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2031da14cebeSEric Cheng "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 2032da14cebeSEric Cheng "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 2033678453a8Sspeer if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 203444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 203552ccf843Smisaki "<== nxge_rx_pkts_vring: no mp")); 2036da14cebeSEric Cheng return (NULL); 203744961713Sgirish } 203844961713Sgirish 203944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 204052ccf843Smisaki mp)); 204144961713Sgirish 204244961713Sgirish #ifdef NXGE_DEBUG 204344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 204452ccf843Smisaki "==> nxge_rx_pkts_vring:calling mac_rx " 204552ccf843Smisaki "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 204652ccf843Smisaki "mac_handle $%p", 204752ccf843Smisaki mp->b_wptr - mp->b_rptr, 204852ccf843Smisaki mp, mp->b_cont, mp->b_next, 204952ccf843Smisaki rcrp, rcrp->rcr_mac_handle)); 205044961713Sgirish 205144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 205252ccf843Smisaki "==> nxge_rx_pkts_vring: dump packets " 205352ccf843Smisaki "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 205452ccf843Smisaki mp, 205552ccf843Smisaki mp->b_rptr, 205652ccf843Smisaki mp->b_wptr, 205752ccf843Smisaki nxge_dump_packet((char *)mp->b_rptr, 205852ccf843Smisaki mp->b_wptr - mp->b_rptr))); 205914ea4bb7Ssd if (mp->b_cont) { 206014ea4bb7Ssd NXGE_DEBUG_MSG((nxgep, RX_CTL, 206152ccf843Smisaki "==> nxge_rx_pkts_vring: dump b_cont packets " 206252ccf843Smisaki "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 206352ccf843Smisaki mp->b_cont, 206452ccf843Smisaki mp->b_cont->b_rptr, 206552ccf843Smisaki mp->b_cont->b_wptr, 206652ccf843Smisaki nxge_dump_packet((char *)mp->b_cont->b_rptr, 206752ccf843Smisaki mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 206814ea4bb7Ssd } 206944961713Sgirish if (mp->b_next) { 207044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 207152ccf843Smisaki "==> nxge_rx_pkts_vring: dump next packets " 207252ccf843Smisaki "(b_rptr $%p): %s", 207352ccf843Smisaki mp->b_next->b_rptr, 207452ccf843Smisaki nxge_dump_packet((char *)mp->b_next->b_rptr, 207552ccf843Smisaki mp->b_next->b_wptr - mp->b_next->b_rptr))); 207644961713Sgirish } 207744961713Sgirish #endif 2078da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2079da14cebeSEric Cheng "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 2080da14cebeSEric Cheng rcrp->rdc, rcrp->rcr_mac_handle)); 208144961713Sgirish 2082da14cebeSEric Cheng return (mp); 208344961713Sgirish } 208444961713Sgirish 208544961713Sgirish 208644961713Sgirish /* 208744961713Sgirish * This routine is the main packet receive processing function. 208844961713Sgirish * It gets the packet type, error code, and buffer related 208944961713Sgirish * information from the receive completion entry. 209044961713Sgirish * How many completion entries to process is based on the number of packets 209144961713Sgirish * queued by the hardware, a hardware maintained tail pointer 209244961713Sgirish * and a configurable receive packet count. 209344961713Sgirish * 209444961713Sgirish * A chain of message blocks will be created as result of processing 209544961713Sgirish * the completion entries. This chain of message blocks will be returned and 209644961713Sgirish * a hardware control status register will be updated with the number of 209744961713Sgirish * packets were removed from the hardware queue. 209844961713Sgirish * 2099da14cebeSEric Cheng * The RCR ring lock is held when entering this function. 210044961713Sgirish */ 2101678453a8Sspeer static mblk_t * 2102678453a8Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 2103678453a8Sspeer int bytes_to_pickup) 210444961713Sgirish { 210544961713Sgirish npi_handle_t handle; 210644961713Sgirish uint8_t channel; 210744961713Sgirish uint32_t comp_rd_index; 210844961713Sgirish p_rcr_entry_t rcr_desc_rd_head_p; 210944961713Sgirish p_rcr_entry_t rcr_desc_rd_head_pp; 211044961713Sgirish p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 211144961713Sgirish uint16_t qlen, nrcr_read, npkt_read; 2112678453a8Sspeer uint32_t qlen_hw; 211344961713Sgirish boolean_t multi; 2114678453a8Sspeer rcrcfig_b_t rcr_cfg_b; 2115678453a8Sspeer int totallen = 0; 2116a3c5bd6dSspeer #if defined(_BIG_ENDIAN) 211744961713Sgirish npi_status_t rs = NPI_SUCCESS; 211844961713Sgirish #endif 211944961713Sgirish 2120da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 212152ccf843Smisaki "channel %d", rcr_p->rdc)); 212244961713Sgirish 212344961713Sgirish if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 212444961713Sgirish return (NULL); 212544961713Sgirish } 212644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 212744961713Sgirish channel = rcr_p->rdc; 212844961713Sgirish 212944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 213052ccf843Smisaki "==> nxge_rx_pkts: START: rcr channel %d " 213152ccf843Smisaki "head_p $%p head_pp $%p index %d ", 213252ccf843Smisaki channel, rcr_p->rcr_desc_rd_head_p, 213352ccf843Smisaki rcr_p->rcr_desc_rd_head_pp, 213452ccf843Smisaki rcr_p->comp_rd_index)); 213544961713Sgirish 213644961713Sgirish 2137a3c5bd6dSspeer #if !defined(_BIG_ENDIAN) 213844961713Sgirish qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 213944961713Sgirish #else 214044961713Sgirish rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 214144961713Sgirish if (rs != NPI_SUCCESS) { 2142678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 214344961713Sgirish "channel %d, get qlen failed 0x%08x", 214452ccf843Smisaki channel, rs)); 214544961713Sgirish return (NULL); 214644961713Sgirish } 214744961713Sgirish #endif 214844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 214952ccf843Smisaki "qlen %d", channel, qlen)); 215044961713Sgirish 215144961713Sgirish 215244961713Sgirish 215344961713Sgirish if (!qlen) { 2154da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 215552ccf843Smisaki "==> nxge_rx_pkts:rcr channel %d " 215652ccf843Smisaki "qlen %d (no pkts)", channel, qlen)); 215744961713Sgirish 215844961713Sgirish return (NULL); 215944961713Sgirish } 216044961713Sgirish 216144961713Sgirish comp_rd_index = rcr_p->comp_rd_index; 216244961713Sgirish 216344961713Sgirish rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 216444961713Sgirish rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 216544961713Sgirish nrcr_read = npkt_read = 0; 216644961713Sgirish 216744961713Sgirish /* 216844961713Sgirish * Number of packets queued 216944961713Sgirish * (The jumbo or multi packet will be counted as only one 217044961713Sgirish * packets and it may take up more than one completion entry). 217144961713Sgirish */ 217244961713Sgirish qlen_hw = (qlen < nxge_max_rx_pkts) ? 217352ccf843Smisaki qlen : nxge_max_rx_pkts; 217444961713Sgirish head_mp = NULL; 217544961713Sgirish tail_mp = &head_mp; 217644961713Sgirish nmp = mp_cont = NULL; 217744961713Sgirish multi = B_FALSE; 217844961713Sgirish 2179a3c5bd6dSspeer while (qlen_hw) { 218044961713Sgirish 218144961713Sgirish #ifdef NXGE_DEBUG 218244961713Sgirish nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 218344961713Sgirish #endif 218444961713Sgirish /* 218544961713Sgirish * Process one completion ring entry. 218644961713Sgirish */ 218744961713Sgirish nxge_receive_packet(nxgep, 218852ccf843Smisaki rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 218944961713Sgirish 219044961713Sgirish /* 219144961713Sgirish * message chaining modes 219244961713Sgirish */ 219314ea4bb7Ssd if (nmp) { 219444961713Sgirish nmp->b_next = NULL; 219514ea4bb7Ssd if (!multi && !mp_cont) { /* frame fits a partition */ 219614ea4bb7Ssd *tail_mp = nmp; 219714ea4bb7Ssd tail_mp = &nmp->b_next; 2198678453a8Sspeer totallen += MBLKL(nmp); 219914ea4bb7Ssd nmp = NULL; 220014ea4bb7Ssd } else if (multi && !mp_cont) { /* first segment */ 220114ea4bb7Ssd *tail_mp = nmp; 220214ea4bb7Ssd tail_mp = &nmp->b_cont; 2203678453a8Sspeer totallen += MBLKL(nmp); 220414ea4bb7Ssd } else if (multi && mp_cont) { /* mid of multi segs */ 220514ea4bb7Ssd *tail_mp = mp_cont; 220614ea4bb7Ssd tail_mp = &mp_cont->b_cont; 2207678453a8Sspeer totallen += MBLKL(mp_cont); 220814ea4bb7Ssd } else if (!multi && mp_cont) { /* last segment */ 2209a3c5bd6dSspeer *tail_mp = mp_cont; 221014ea4bb7Ssd tail_mp = &nmp->b_next; 2211678453a8Sspeer totallen += MBLKL(mp_cont); 221214ea4bb7Ssd nmp = NULL; 221314ea4bb7Ssd } 221444961713Sgirish } 221544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 221652ccf843Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 221752ccf843Smisaki "before updating: multi %d " 221852ccf843Smisaki "nrcr_read %d " 221952ccf843Smisaki "npk read %d " 222052ccf843Smisaki "head_pp $%p index %d ", 222152ccf843Smisaki channel, 222252ccf843Smisaki multi, 222352ccf843Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 222452ccf843Smisaki comp_rd_index)); 222544961713Sgirish 222644961713Sgirish if (!multi) { 222744961713Sgirish qlen_hw--; 222844961713Sgirish npkt_read++; 222944961713Sgirish } 223044961713Sgirish 223144961713Sgirish /* 223244961713Sgirish * Update the next read entry. 223344961713Sgirish */ 223444961713Sgirish comp_rd_index = NEXT_ENTRY(comp_rd_index, 223552ccf843Smisaki rcr_p->comp_wrap_mask); 223644961713Sgirish 223744961713Sgirish rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 223852ccf843Smisaki rcr_p->rcr_desc_first_p, 223952ccf843Smisaki rcr_p->rcr_desc_last_p); 224044961713Sgirish 224144961713Sgirish nrcr_read++; 224244961713Sgirish 224344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 224452ccf843Smisaki "<== nxge_rx_pkts: (SAM, process one packet) " 224552ccf843Smisaki "nrcr_read %d", 224652ccf843Smisaki nrcr_read)); 224744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 224852ccf843Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 224952ccf843Smisaki "multi %d " 225052ccf843Smisaki "nrcr_read %d " 225152ccf843Smisaki "npk read %d " 225252ccf843Smisaki "head_pp $%p index %d ", 225352ccf843Smisaki channel, 225452ccf843Smisaki multi, 225552ccf843Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 225652ccf843Smisaki comp_rd_index)); 225744961713Sgirish 2258678453a8Sspeer if ((bytes_to_pickup != -1) && 2259678453a8Sspeer (totallen >= bytes_to_pickup)) { 2260678453a8Sspeer break; 2261678453a8Sspeer } 226244961713Sgirish } 226344961713Sgirish 226444961713Sgirish rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 226544961713Sgirish rcr_p->comp_rd_index = comp_rd_index; 226644961713Sgirish rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 226714ea4bb7Ssd if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 226852ccf843Smisaki (nxgep->intr_threshold != rcr_p->intr_threshold)) { 22697b26d9ffSSantwona Behera 22707b26d9ffSSantwona Behera rcr_p->intr_timeout = (nxgep->intr_timeout < 22717b26d9ffSSantwona Behera NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 22727b26d9ffSSantwona Behera nxgep->intr_timeout; 22737b26d9ffSSantwona Behera 22747b26d9ffSSantwona Behera rcr_p->intr_threshold = (nxgep->intr_threshold < 22757b26d9ffSSantwona Behera NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 22767b26d9ffSSantwona Behera nxgep->intr_threshold; 22777b26d9ffSSantwona Behera 227814ea4bb7Ssd rcr_cfg_b.value = 0x0ULL; 22797b26d9ffSSantwona Behera rcr_cfg_b.bits.ldw.entout = 1; 228014ea4bb7Ssd rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 228114ea4bb7Ssd rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 22827b26d9ffSSantwona Behera 228314ea4bb7Ssd RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 228452ccf843Smisaki channel, rcr_cfg_b.value); 228514ea4bb7Ssd } 228644961713Sgirish 228744961713Sgirish cs.bits.ldw.pktread = npkt_read; 228844961713Sgirish cs.bits.ldw.ptrread = nrcr_read; 228944961713Sgirish RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 229052ccf843Smisaki channel, cs.value); 229144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 229252ccf843Smisaki "==> nxge_rx_pkts: EXIT: rcr channel %d " 229352ccf843Smisaki "head_pp $%p index %016llx ", 229452ccf843Smisaki channel, 229552ccf843Smisaki rcr_p->rcr_desc_rd_head_pp, 229652ccf843Smisaki rcr_p->comp_rd_index)); 229744961713Sgirish /* 229844961713Sgirish * Update RCR buffer pointer read and number of packets 229944961713Sgirish * read. 230044961713Sgirish */ 230144961713Sgirish 2302da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2303da14cebeSEric Cheng "channel %d", rcr_p->rdc)); 2304da14cebeSEric Cheng 230544961713Sgirish return (head_mp); 230644961713Sgirish } 230744961713Sgirish 230844961713Sgirish void 230944961713Sgirish nxge_receive_packet(p_nxge_t nxgep, 231044961713Sgirish p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 231144961713Sgirish boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 231244961713Sgirish { 231344961713Sgirish p_mblk_t nmp = NULL; 231444961713Sgirish uint64_t multi; 231544961713Sgirish uint64_t dcf_err; 231644961713Sgirish uint8_t channel; 231744961713Sgirish 231844961713Sgirish boolean_t first_entry = B_TRUE; 231944961713Sgirish boolean_t is_tcp_udp = B_FALSE; 232044961713Sgirish boolean_t buffer_free = B_FALSE; 232144961713Sgirish boolean_t error_send_up = B_FALSE; 232244961713Sgirish uint8_t error_type; 232344961713Sgirish uint16_t l2_len; 232444961713Sgirish uint16_t skip_len; 232544961713Sgirish uint8_t pktbufsz_type; 232644961713Sgirish uint64_t rcr_entry; 232744961713Sgirish uint64_t *pkt_buf_addr_pp; 232844961713Sgirish uint64_t *pkt_buf_addr_p; 232944961713Sgirish uint32_t buf_offset; 233044961713Sgirish uint32_t bsize; 233144961713Sgirish uint32_t error_disp_cnt; 233244961713Sgirish uint32_t msg_index; 233344961713Sgirish p_rx_rbr_ring_t rx_rbr_p; 233444961713Sgirish p_rx_msg_t *rx_msg_ring_p; 233544961713Sgirish p_rx_msg_t rx_msg_p; 233644961713Sgirish uint16_t sw_offset_bytes = 0, hdr_size = 0; 233744961713Sgirish nxge_status_t status = NXGE_OK; 233844961713Sgirish boolean_t is_valid = B_FALSE; 233944961713Sgirish p_nxge_rx_ring_stats_t rdc_stats; 2340a3c5bd6dSspeer uint32_t bytes_read; 2341a3c5bd6dSspeer uint64_t pkt_type; 2342a3c5bd6dSspeer uint64_t frag; 23434202ea4bSsbehera boolean_t pkt_too_long_err = B_FALSE; 234444961713Sgirish #ifdef NXGE_DEBUG 234544961713Sgirish int dump_len; 234644961713Sgirish #endif 234744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 234844961713Sgirish first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 234944961713Sgirish 235044961713Sgirish rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 235144961713Sgirish 235244961713Sgirish multi = (rcr_entry & RCR_MULTI_MASK); 235344961713Sgirish dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 235444961713Sgirish pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 235544961713Sgirish 235644961713Sgirish error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 235744961713Sgirish frag = (rcr_entry & RCR_FRAG_MASK); 235844961713Sgirish 235944961713Sgirish l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 236044961713Sgirish 236144961713Sgirish pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 236252ccf843Smisaki RCR_PKTBUFSZ_SHIFT); 2363adfcba55Sjoycey #if defined(__i386) 2364adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 236552ccf843Smisaki RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2366adfcba55Sjoycey #else 236744961713Sgirish pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 236852ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT); 2369adfcba55Sjoycey #endif 237044961713Sgirish 237144961713Sgirish channel = rcr_p->rdc; 237244961713Sgirish 237344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 237452ccf843Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 237552ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 237652ccf843Smisaki "error_type 0x%x pkt_type 0x%x " 237752ccf843Smisaki "pktbufsz_type %d ", 237852ccf843Smisaki rcr_desc_rd_head_p, 237952ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 238052ccf843Smisaki multi, 238152ccf843Smisaki error_type, 238252ccf843Smisaki pkt_type, 238352ccf843Smisaki pktbufsz_type)); 238444961713Sgirish 238544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 238652ccf843Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 238752ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 238852ccf843Smisaki "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 238952ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 239052ccf843Smisaki multi, 239152ccf843Smisaki error_type, 239252ccf843Smisaki pkt_type)); 239344961713Sgirish 239444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 239552ccf843Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 239652ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 239752ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 239844961713Sgirish 239944961713Sgirish /* get the stats ptr */ 240044961713Sgirish rdc_stats = rcr_p->rdc_stats; 240144961713Sgirish 240244961713Sgirish if (!l2_len) { 240344961713Sgirish 240444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 240552ccf843Smisaki "<== nxge_receive_packet: failed: l2 length is 0.")); 240644961713Sgirish return; 240744961713Sgirish } 240844961713Sgirish 24094202ea4bSsbehera /* 2410da14cebeSEric Cheng * Software workaround for BMAC hardware limitation that allows 24114202ea4bSsbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 24124202ea4bSsbehera * instead of 0x2400 for jumbo. 24134202ea4bSsbehera */ 24144202ea4bSsbehera if (l2_len > nxgep->mac.maxframesize) { 24154202ea4bSsbehera pkt_too_long_err = B_TRUE; 24164202ea4bSsbehera } 24174202ea4bSsbehera 241856d930aeSspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 241956d930aeSspeer l2_len -= ETHERFCSL; 242056d930aeSspeer 242144961713Sgirish /* shift 6 bits to get the full io address */ 2422adfcba55Sjoycey #if defined(__i386) 2423adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 242452ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 2425adfcba55Sjoycey #else 242644961713Sgirish pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 242752ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 2428adfcba55Sjoycey #endif 242944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 243052ccf843Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 243152ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 243252ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 243344961713Sgirish 243444961713Sgirish rx_rbr_p = rcr_p->rx_rbr_p; 243544961713Sgirish rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 243644961713Sgirish 243744961713Sgirish if (first_entry) { 243844961713Sgirish hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 243952ccf843Smisaki RXDMA_HDR_SIZE_DEFAULT); 244044961713Sgirish 244144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 244252ccf843Smisaki "==> nxge_receive_packet: first entry 0x%016llx " 244352ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d hdr %d", 244452ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 244552ccf843Smisaki hdr_size)); 244644961713Sgirish } 244744961713Sgirish 244844961713Sgirish MUTEX_ENTER(&rx_rbr_p->lock); 244944961713Sgirish 245044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 245152ccf843Smisaki "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 245252ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 245352ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 245444961713Sgirish 245544961713Sgirish /* 245644961713Sgirish * Packet buffer address in the completion entry points 245744961713Sgirish * to the starting buffer address (offset 0). 245844961713Sgirish * Use the starting buffer address to locate the corresponding 245944961713Sgirish * kernel address. 246044961713Sgirish */ 246144961713Sgirish status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 246252ccf843Smisaki pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 246352ccf843Smisaki &buf_offset, 246452ccf843Smisaki &msg_index); 246544961713Sgirish 246644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 246752ccf843Smisaki "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 246852ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 246952ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 247044961713Sgirish 247144961713Sgirish if (status != NXGE_OK) { 247244961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 247344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 247452ccf843Smisaki "<== nxge_receive_packet: found vaddr failed %d", 247552ccf843Smisaki status)); 247644961713Sgirish return; 247744961713Sgirish } 247844961713Sgirish 247944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 248052ccf843Smisaki "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 248152ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 248252ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 248344961713Sgirish 248444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 248552ccf843Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 248652ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 248752ccf843Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 248844961713Sgirish 248944961713Sgirish rx_msg_p = rx_msg_ring_p[msg_index]; 249044961713Sgirish 249144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 249252ccf843Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 249352ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 249452ccf843Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 249544961713Sgirish 249644961713Sgirish switch (pktbufsz_type) { 249744961713Sgirish case RCR_PKTBUFSZ_0: 249844961713Sgirish bsize = rx_rbr_p->pkt_buf_size0_bytes; 249944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 250052ccf843Smisaki "==> nxge_receive_packet: 0 buf %d", bsize)); 250144961713Sgirish break; 250244961713Sgirish case RCR_PKTBUFSZ_1: 250344961713Sgirish bsize = rx_rbr_p->pkt_buf_size1_bytes; 250444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 250552ccf843Smisaki "==> nxge_receive_packet: 1 buf %d", bsize)); 250644961713Sgirish break; 250744961713Sgirish case RCR_PKTBUFSZ_2: 250844961713Sgirish bsize = rx_rbr_p->pkt_buf_size2_bytes; 250944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 251052ccf843Smisaki "==> nxge_receive_packet: 2 buf %d", bsize)); 251144961713Sgirish break; 251244961713Sgirish case RCR_SINGLE_BLOCK: 251344961713Sgirish bsize = rx_msg_p->block_size; 251444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 251552ccf843Smisaki "==> nxge_receive_packet: single %d", bsize)); 251644961713Sgirish 251744961713Sgirish break; 251844961713Sgirish default: 251944961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 252044961713Sgirish return; 252144961713Sgirish } 252244961713Sgirish 252344961713Sgirish DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 252452ccf843Smisaki (buf_offset + sw_offset_bytes), 252552ccf843Smisaki (hdr_size + l2_len), 252652ccf843Smisaki DDI_DMA_SYNC_FORCPU); 252744961713Sgirish 252844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 252952ccf843Smisaki "==> nxge_receive_packet: after first dump:usage count")); 253044961713Sgirish 253144961713Sgirish if (rx_msg_p->cur_usage_cnt == 0) { 253244961713Sgirish if (rx_rbr_p->rbr_use_bcopy) { 253344961713Sgirish atomic_inc_32(&rx_rbr_p->rbr_consumed); 253444961713Sgirish if (rx_rbr_p->rbr_consumed < 253552ccf843Smisaki rx_rbr_p->rbr_threshold_hi) { 253644961713Sgirish if (rx_rbr_p->rbr_threshold_lo == 0 || 253752ccf843Smisaki ((rx_rbr_p->rbr_consumed >= 253852ccf843Smisaki rx_rbr_p->rbr_threshold_lo) && 253952ccf843Smisaki (rx_rbr_p->rbr_bufsize_type >= 254052ccf843Smisaki pktbufsz_type))) { 254144961713Sgirish rx_msg_p->rx_use_bcopy = B_TRUE; 254244961713Sgirish } 254344961713Sgirish } else { 254444961713Sgirish rx_msg_p->rx_use_bcopy = B_TRUE; 254544961713Sgirish } 254644961713Sgirish } 254744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 254852ccf843Smisaki "==> nxge_receive_packet: buf %d (new block) ", 254952ccf843Smisaki bsize)); 255044961713Sgirish 255144961713Sgirish rx_msg_p->pkt_buf_size_code = pktbufsz_type; 255244961713Sgirish rx_msg_p->pkt_buf_size = bsize; 255344961713Sgirish rx_msg_p->cur_usage_cnt = 1; 255444961713Sgirish if (pktbufsz_type == RCR_SINGLE_BLOCK) { 255544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 255652ccf843Smisaki "==> nxge_receive_packet: buf %d " 255752ccf843Smisaki "(single block) ", 255852ccf843Smisaki bsize)); 255944961713Sgirish /* 256044961713Sgirish * Buffer can be reused once the free function 256144961713Sgirish * is called. 256244961713Sgirish */ 256344961713Sgirish rx_msg_p->max_usage_cnt = 1; 256444961713Sgirish buffer_free = B_TRUE; 256544961713Sgirish } else { 256644961713Sgirish rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 256744961713Sgirish if (rx_msg_p->max_usage_cnt == 1) { 256844961713Sgirish buffer_free = B_TRUE; 256944961713Sgirish } 257044961713Sgirish } 257144961713Sgirish } else { 257244961713Sgirish rx_msg_p->cur_usage_cnt++; 257344961713Sgirish if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 257444961713Sgirish buffer_free = B_TRUE; 257544961713Sgirish } 257644961713Sgirish } 257744961713Sgirish 257844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 257944961713Sgirish "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 258052ccf843Smisaki msg_index, l2_len, 258152ccf843Smisaki rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 258244961713Sgirish 25834202ea4bSsbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 258444961713Sgirish rdc_stats->ierrors++; 258544961713Sgirish if (dcf_err) { 258644961713Sgirish rdc_stats->dcf_err++; 258744961713Sgirish #ifdef NXGE_DEBUG 258844961713Sgirish if (!rdc_stats->dcf_err) { 258944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 259044961713Sgirish "nxge_receive_packet: channel %d dcf_err rcr" 259144961713Sgirish " 0x%llx", channel, rcr_entry)); 259244961713Sgirish } 259344961713Sgirish #endif 259444961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 259552ccf843Smisaki NXGE_FM_EREPORT_RDMC_DCF_ERR); 25964202ea4bSsbehera } else if (pkt_too_long_err) { 25974202ea4bSsbehera rdc_stats->pkt_too_long_err++; 25984202ea4bSsbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 25994202ea4bSsbehera " channel %d packet length [%d] > " 26004202ea4bSsbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 26014202ea4bSsbehera nxgep->mac.maxframesize)); 260244961713Sgirish } else { 260344961713Sgirish /* Update error stats */ 260444961713Sgirish error_disp_cnt = NXGE_ERROR_SHOW_MAX; 260544961713Sgirish rdc_stats->errlog.compl_err_type = error_type; 260644961713Sgirish 260744961713Sgirish switch (error_type) { 2608f6485eecSyc /* 2609f6485eecSyc * Do not send FMA ereport for RCR_L2_ERROR and 2610f6485eecSyc * RCR_L4_CSUM_ERROR because most likely they indicate 2611f6485eecSyc * back pressure rather than HW failures. 2612f6485eecSyc */ 261353f3d8ecSyc case RCR_L2_ERROR: 261453f3d8ecSyc rdc_stats->l2_err++; 261553f3d8ecSyc if (rdc_stats->l2_err < 261653f3d8ecSyc error_disp_cnt) { 261744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 261853f3d8ecSyc " nxge_receive_packet:" 261953f3d8ecSyc " channel %d RCR L2_ERROR", 262053f3d8ecSyc channel)); 262153f3d8ecSyc } 262253f3d8ecSyc break; 262353f3d8ecSyc case RCR_L4_CSUM_ERROR: 262453f3d8ecSyc error_send_up = B_TRUE; 262553f3d8ecSyc rdc_stats->l4_cksum_err++; 262653f3d8ecSyc if (rdc_stats->l4_cksum_err < 262753f3d8ecSyc error_disp_cnt) { 262853f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 262953f3d8ecSyc " nxge_receive_packet:" 263053f3d8ecSyc " channel %d" 263153f3d8ecSyc " RCR L4_CSUM_ERROR", channel)); 263253f3d8ecSyc } 263353f3d8ecSyc break; 2634f6485eecSyc /* 2635f6485eecSyc * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2636f6485eecSyc * RCR_ZCP_SOFT_ERROR because they reflect the same 2637f6485eecSyc * FFLP and ZCP errors that have been reported by 2638f6485eecSyc * nxge_fflp.c and nxge_zcp.c. 2639f6485eecSyc */ 264053f3d8ecSyc case RCR_FFLP_SOFT_ERROR: 264153f3d8ecSyc error_send_up = B_TRUE; 264253f3d8ecSyc rdc_stats->fflp_soft_err++; 264353f3d8ecSyc if (rdc_stats->fflp_soft_err < 264453f3d8ecSyc error_disp_cnt) { 264553f3d8ecSyc NXGE_ERROR_MSG((nxgep, 264653f3d8ecSyc NXGE_ERR_CTL, 264753f3d8ecSyc " nxge_receive_packet:" 264853f3d8ecSyc " channel %d" 264953f3d8ecSyc " RCR FFLP_SOFT_ERROR", channel)); 265053f3d8ecSyc } 265153f3d8ecSyc break; 265253f3d8ecSyc case RCR_ZCP_SOFT_ERROR: 265353f3d8ecSyc error_send_up = B_TRUE; 265453f3d8ecSyc rdc_stats->fflp_soft_err++; 265553f3d8ecSyc if (rdc_stats->zcp_soft_err < 265653f3d8ecSyc error_disp_cnt) 265753f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 265853f3d8ecSyc " nxge_receive_packet: Channel %d" 265953f3d8ecSyc " RCR ZCP_SOFT_ERROR", channel)); 266053f3d8ecSyc break; 266153f3d8ecSyc default: 266253f3d8ecSyc rdc_stats->rcr_unknown_err++; 266353f3d8ecSyc if (rdc_stats->rcr_unknown_err 266453f3d8ecSyc < error_disp_cnt) { 266553f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 266653f3d8ecSyc " nxge_receive_packet: Channel %d" 266753f3d8ecSyc " RCR entry 0x%llx error 0x%x", 266853f3d8ecSyc rcr_entry, channel, error_type)); 266953f3d8ecSyc } 267053f3d8ecSyc break; 267144961713Sgirish } 267244961713Sgirish } 267344961713Sgirish 267444961713Sgirish /* 267544961713Sgirish * Update and repost buffer block if max usage 267644961713Sgirish * count is reached. 267744961713Sgirish */ 267844961713Sgirish if (error_send_up == B_FALSE) { 2679958cea9eSml atomic_inc_32(&rx_msg_p->ref_cnt); 268044961713Sgirish if (buffer_free == B_TRUE) { 268144961713Sgirish rx_msg_p->free = B_TRUE; 268244961713Sgirish } 268344961713Sgirish 268444961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 268544961713Sgirish nxge_freeb(rx_msg_p); 268644961713Sgirish return; 268744961713Sgirish } 268844961713Sgirish } 268944961713Sgirish 269044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 269152ccf843Smisaki "==> nxge_receive_packet: DMA sync second ")); 269244961713Sgirish 269353f3d8ecSyc bytes_read = rcr_p->rcvd_pkt_bytes; 269444961713Sgirish skip_len = sw_offset_bytes + hdr_size; 269544961713Sgirish if (!rx_msg_p->rx_use_bcopy) { 2696958cea9eSml /* 2697958cea9eSml * For loaned up buffers, the driver reference count 2698958cea9eSml * will be incremented first and then the free state. 2699958cea9eSml */ 270053f3d8ecSyc if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 270114ea4bb7Ssd if (first_entry) { 270214ea4bb7Ssd nmp->b_rptr = &nmp->b_rptr[skip_len]; 270353f3d8ecSyc if (l2_len < bsize - skip_len) { 270414ea4bb7Ssd nmp->b_wptr = &nmp->b_rptr[l2_len]; 270553f3d8ecSyc } else { 270653f3d8ecSyc nmp->b_wptr = &nmp->b_rptr[bsize 270753f3d8ecSyc - skip_len]; 270853f3d8ecSyc } 270914ea4bb7Ssd } else { 271053f3d8ecSyc if (l2_len - bytes_read < bsize) { 271114ea4bb7Ssd nmp->b_wptr = 271214ea4bb7Ssd &nmp->b_rptr[l2_len - bytes_read]; 271353f3d8ecSyc } else { 271453f3d8ecSyc nmp->b_wptr = &nmp->b_rptr[bsize]; 271553f3d8ecSyc } 271614ea4bb7Ssd } 271744961713Sgirish } 271853f3d8ecSyc } else { 271953f3d8ecSyc if (first_entry) { 272053f3d8ecSyc nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 272153f3d8ecSyc l2_len < bsize - skip_len ? 272253f3d8ecSyc l2_len : bsize - skip_len); 272353f3d8ecSyc } else { 272453f3d8ecSyc nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 272553f3d8ecSyc l2_len - bytes_read < bsize ? 272653f3d8ecSyc l2_len - bytes_read : bsize); 272753f3d8ecSyc } 272853f3d8ecSyc } 272953f3d8ecSyc if (nmp != NULL) { 2730f720bc57Syc if (first_entry) { 2731f720bc57Syc /* 2732f720bc57Syc * Jumbo packets may be received with more than one 2733f720bc57Syc * buffer, increment ipackets for the first entry only. 2734f720bc57Syc */ 2735f720bc57Syc rdc_stats->ipackets++; 2736f720bc57Syc 2737f720bc57Syc /* Update ibytes for kstat. */ 2738f720bc57Syc rdc_stats->ibytes += skip_len 2739f720bc57Syc + l2_len < bsize ? l2_len : bsize; 2740f720bc57Syc /* 2741f720bc57Syc * Update the number of bytes read so far for the 2742f720bc57Syc * current frame. 2743f720bc57Syc */ 274453f3d8ecSyc bytes_read = nmp->b_wptr - nmp->b_rptr; 2745f720bc57Syc } else { 2746f720bc57Syc rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2747f720bc57Syc l2_len - bytes_read : bsize; 274853f3d8ecSyc bytes_read += nmp->b_wptr - nmp->b_rptr; 2749f720bc57Syc } 275053f3d8ecSyc 275153f3d8ecSyc NXGE_DEBUG_MSG((nxgep, RX_CTL, 275253f3d8ecSyc "==> nxge_receive_packet after dupb: " 275353f3d8ecSyc "rbr consumed %d " 275453f3d8ecSyc "pktbufsz_type %d " 275553f3d8ecSyc "nmp $%p rptr $%p wptr $%p " 275653f3d8ecSyc "buf_offset %d bzise %d l2_len %d skip_len %d", 275753f3d8ecSyc rx_rbr_p->rbr_consumed, 275853f3d8ecSyc pktbufsz_type, 275953f3d8ecSyc nmp, nmp->b_rptr, nmp->b_wptr, 276053f3d8ecSyc buf_offset, bsize, l2_len, skip_len)); 276144961713Sgirish } else { 276244961713Sgirish cmn_err(CE_WARN, "!nxge_receive_packet: " 276352ccf843Smisaki "update stats (error)"); 27642e59129aSraghus atomic_inc_32(&rx_msg_p->ref_cnt); 27652e59129aSraghus if (buffer_free == B_TRUE) { 27662e59129aSraghus rx_msg_p->free = B_TRUE; 27672e59129aSraghus } 27682e59129aSraghus MUTEX_EXIT(&rx_rbr_p->lock); 27692e59129aSraghus nxge_freeb(rx_msg_p); 27702e59129aSraghus return; 277144961713Sgirish } 2772ee5416c9Syc 277344961713Sgirish if (buffer_free == B_TRUE) { 277444961713Sgirish rx_msg_p->free = B_TRUE; 277544961713Sgirish } 2776f720bc57Syc 277744961713Sgirish is_valid = (nmp != NULL); 277853f3d8ecSyc 277953f3d8ecSyc rcr_p->rcvd_pkt_bytes = bytes_read; 278053f3d8ecSyc 278144961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 278244961713Sgirish 278344961713Sgirish if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 278444961713Sgirish atomic_inc_32(&rx_msg_p->ref_cnt); 278544961713Sgirish nxge_freeb(rx_msg_p); 278644961713Sgirish } 278744961713Sgirish 278844961713Sgirish if (is_valid) { 2789a3c5bd6dSspeer nmp->b_cont = NULL; 279044961713Sgirish if (first_entry) { 279144961713Sgirish *mp = nmp; 279244961713Sgirish *mp_cont = NULL; 279353f3d8ecSyc } else { 279444961713Sgirish *mp_cont = nmp; 279553f3d8ecSyc } 279644961713Sgirish } 279744961713Sgirish 279844961713Sgirish /* 2799f720bc57Syc * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2800f720bc57Syc * If a packet is not fragmented and no error bit is set, then 2801f720bc57Syc * L4 checksum is OK. 280244961713Sgirish */ 2803f720bc57Syc 280444961713Sgirish if (is_valid && !multi) { 2805678453a8Sspeer /* 2806b4d05839Sml * If the checksum flag nxge_chksum_offload 2807b4d05839Sml * is 1, TCP and UDP packets can be sent 2808678453a8Sspeer * up with good checksum. If the checksum flag 2809b4d05839Sml * is set to 0, checksum reporting will apply to 2810678453a8Sspeer * TCP packets only (workaround for a hardware bug). 2811b4d05839Sml * If the checksum flag nxge_cksum_offload is 2812b4d05839Sml * greater than 1, both TCP and UDP packets 2813b4d05839Sml * will not be reported its hardware checksum results. 2814678453a8Sspeer */ 2815b4d05839Sml if (nxge_cksum_offload == 1) { 2816678453a8Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 281752ccf843Smisaki pkt_type == RCR_PKT_IS_UDP) ? 281852ccf843Smisaki B_TRUE: B_FALSE); 2819b4d05839Sml } else if (!nxge_cksum_offload) { 2820678453a8Sspeer /* TCP checksum only. */ 2821678453a8Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 282252ccf843Smisaki B_TRUE: B_FALSE); 2823678453a8Sspeer } 282444961713Sgirish 282544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 282652ccf843Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 282752ccf843Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 282844961713Sgirish 282944961713Sgirish if (is_tcp_udp && !frag && !error_type) { 283044961713Sgirish (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 283152ccf843Smisaki HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 283244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 283352ccf843Smisaki "==> nxge_receive_packet: Full tcp/udp cksum " 283452ccf843Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d " 283552ccf843Smisaki "error %d", 283652ccf843Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 283744961713Sgirish } 283844961713Sgirish } 283944961713Sgirish 284044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 284152ccf843Smisaki "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 284244961713Sgirish 284344961713Sgirish *multi_p = (multi == RCR_MULTI_MASK); 284444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 284552ccf843Smisaki "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 284652ccf843Smisaki *multi_p, nmp, *mp, *mp_cont)); 284744961713Sgirish } 284844961713Sgirish 2849da14cebeSEric Cheng /* 2850da14cebeSEric Cheng * Enable polling for a ring. Interrupt for the ring is disabled when 2851da14cebeSEric Cheng * the nxge interrupt comes (see nxge_rx_intr). 2852da14cebeSEric Cheng */ 2853da14cebeSEric Cheng int 2854da14cebeSEric Cheng nxge_enable_poll(void *arg) 2855da14cebeSEric Cheng { 2856da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2857da14cebeSEric Cheng p_rx_rcr_ring_t ringp; 2858da14cebeSEric Cheng p_nxge_t nxgep; 2859da14cebeSEric Cheng p_nxge_ldg_t ldgp; 2860da14cebeSEric Cheng uint32_t channel; 2861da14cebeSEric Cheng 2862da14cebeSEric Cheng if (ring_handle == NULL) { 2863da14cebeSEric Cheng return (0); 2864da14cebeSEric Cheng } 2865da14cebeSEric Cheng 2866da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2867da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2868da14cebeSEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2869da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2870da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2871da14cebeSEric Cheng ldgp = ringp->ldgp; 2872da14cebeSEric Cheng if (ldgp == NULL) { 2873da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2874da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2875da14cebeSEric Cheng ringp->rdc)); 2876da14cebeSEric Cheng return (0); 2877da14cebeSEric Cheng } 2878da14cebeSEric Cheng 2879da14cebeSEric Cheng MUTEX_ENTER(&ringp->lock); 2880da14cebeSEric Cheng /* enable polling */ 2881da14cebeSEric Cheng if (ringp->poll_flag == 0) { 2882da14cebeSEric Cheng ringp->poll_flag = 1; 2883da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2884da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d set poll flag to 1", 2885da14cebeSEric Cheng ringp->rdc)); 2886da14cebeSEric Cheng } 2887da14cebeSEric Cheng 2888da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2889da14cebeSEric Cheng return (0); 2890da14cebeSEric Cheng } 2891da14cebeSEric Cheng /* 2892da14cebeSEric Cheng * Disable polling for a ring and enable its interrupt. 2893da14cebeSEric Cheng */ 2894da14cebeSEric Cheng int 2895da14cebeSEric Cheng nxge_disable_poll(void *arg) 2896da14cebeSEric Cheng { 2897da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2898da14cebeSEric Cheng p_rx_rcr_ring_t ringp; 2899da14cebeSEric Cheng p_nxge_t nxgep; 2900da14cebeSEric Cheng uint32_t channel; 2901da14cebeSEric Cheng 2902da14cebeSEric Cheng if (ring_handle == NULL) { 2903da14cebeSEric Cheng return (0); 2904da14cebeSEric Cheng } 2905da14cebeSEric Cheng 2906da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2907da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2908da14cebeSEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2909da14cebeSEric Cheng 2910da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2911da14cebeSEric Cheng "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2912da14cebeSEric Cheng 2913da14cebeSEric Cheng MUTEX_ENTER(&ringp->lock); 2914da14cebeSEric Cheng 2915da14cebeSEric Cheng /* disable polling: enable interrupt */ 2916da14cebeSEric Cheng if (ringp->poll_flag) { 2917da14cebeSEric Cheng npi_handle_t handle; 2918da14cebeSEric Cheng rx_dma_ctl_stat_t cs; 2919da14cebeSEric Cheng uint8_t channel; 2920da14cebeSEric Cheng p_nxge_ldg_t ldgp; 2921da14cebeSEric Cheng 2922da14cebeSEric Cheng /* 2923da14cebeSEric Cheng * Get the control and status for this channel. 2924da14cebeSEric Cheng */ 2925da14cebeSEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 2926da14cebeSEric Cheng channel = ringp->rdc; 2927da14cebeSEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2928da14cebeSEric Cheng channel, &cs.value); 2929da14cebeSEric Cheng 2930da14cebeSEric Cheng /* 2931da14cebeSEric Cheng * Enable mailbox update 2932da14cebeSEric Cheng * Since packets were not read and the hardware uses 2933da14cebeSEric Cheng * bits pktread and ptrread to update the queue 2934da14cebeSEric Cheng * length, we need to set both bits to 0. 2935da14cebeSEric Cheng */ 2936da14cebeSEric Cheng cs.bits.ldw.pktread = 0; 2937da14cebeSEric Cheng cs.bits.ldw.ptrread = 0; 2938da14cebeSEric Cheng cs.bits.hdw.mex = 1; 2939da14cebeSEric Cheng RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2940da14cebeSEric Cheng cs.value); 2941da14cebeSEric Cheng 2942da14cebeSEric Cheng /* 2943da14cebeSEric Cheng * Rearm this logical group if this is a single device 2944da14cebeSEric Cheng * group. 2945da14cebeSEric Cheng */ 2946da14cebeSEric Cheng ldgp = ringp->ldgp; 2947da14cebeSEric Cheng if (ldgp == NULL) { 2948da14cebeSEric Cheng ringp->poll_flag = 0; 2949da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2950da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2951da14cebeSEric Cheng "==> nxge_disable_poll: no ldgp rdc %d " 2952da14cebeSEric Cheng "(still set poll to 0", ringp->rdc)); 2953da14cebeSEric Cheng return (0); 2954da14cebeSEric Cheng } 2955da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2956da14cebeSEric Cheng "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2957da14cebeSEric Cheng ringp->rdc, ldgp)); 2958da14cebeSEric Cheng if (ldgp->nldvs == 1) { 2959da14cebeSEric Cheng ldgimgm_t mgm; 2960da14cebeSEric Cheng mgm.value = 0; 2961da14cebeSEric Cheng mgm.bits.ldw.arm = 1; 2962da14cebeSEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 2963da14cebeSEric Cheng NXGE_REG_WR64(handle, 2964da14cebeSEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2965da14cebeSEric Cheng } 2966da14cebeSEric Cheng ringp->poll_flag = 0; 2967da14cebeSEric Cheng } 2968da14cebeSEric Cheng 2969da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2970da14cebeSEric Cheng return (0); 2971da14cebeSEric Cheng } 2972da14cebeSEric Cheng 2973da14cebeSEric Cheng /* 2974da14cebeSEric Cheng * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2975da14cebeSEric Cheng */ 2976da14cebeSEric Cheng mblk_t * 2977da14cebeSEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup) 2978da14cebeSEric Cheng { 2979da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2980da14cebeSEric Cheng p_rx_rcr_ring_t rcr_p; 2981da14cebeSEric Cheng p_nxge_t nxgep; 2982da14cebeSEric Cheng npi_handle_t handle; 2983da14cebeSEric Cheng rx_dma_ctl_stat_t cs; 2984da14cebeSEric Cheng mblk_t *mblk; 2985da14cebeSEric Cheng p_nxge_ldv_t ldvp; 2986da14cebeSEric Cheng uint32_t channel; 2987da14cebeSEric Cheng 2988da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2989da14cebeSEric Cheng 2990da14cebeSEric Cheng /* 2991da14cebeSEric Cheng * Get the control and status for this channel. 2992da14cebeSEric Cheng */ 2993da14cebeSEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 2994da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2995da14cebeSEric Cheng rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2996da14cebeSEric Cheng MUTEX_ENTER(&rcr_p->lock); 2997da14cebeSEric Cheng ASSERT(rcr_p->poll_flag == 1); 2998da14cebeSEric Cheng 2999da14cebeSEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 3000da14cebeSEric Cheng 3001da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3002da14cebeSEric Cheng "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 3003da14cebeSEric Cheng rcr_p->rdc, rcr_p->poll_flag)); 3004da14cebeSEric Cheng mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 3005da14cebeSEric Cheng 3006da14cebeSEric Cheng ldvp = rcr_p->ldvp; 3007da14cebeSEric Cheng /* error events. */ 3008da14cebeSEric Cheng if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 3009da14cebeSEric Cheng (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 3010da14cebeSEric Cheng } 3011da14cebeSEric Cheng 3012da14cebeSEric Cheng MUTEX_EXIT(&rcr_p->lock); 3013da14cebeSEric Cheng 3014da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3015da14cebeSEric Cheng "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 3016da14cebeSEric Cheng return (mblk); 3017da14cebeSEric Cheng } 3018da14cebeSEric Cheng 3019da14cebeSEric Cheng 302044961713Sgirish /*ARGSUSED*/ 302144961713Sgirish static nxge_status_t 3022678453a8Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 302344961713Sgirish { 302444961713Sgirish p_nxge_rx_ring_stats_t rdc_stats; 302544961713Sgirish npi_handle_t handle; 302644961713Sgirish npi_status_t rs; 302744961713Sgirish boolean_t rxchan_fatal = B_FALSE; 302844961713Sgirish boolean_t rxport_fatal = B_FALSE; 302944961713Sgirish uint8_t portn; 303044961713Sgirish nxge_status_t status = NXGE_OK; 303144961713Sgirish uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 303244961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 303344961713Sgirish 303444961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 303544961713Sgirish portn = nxgep->mac.portnum; 3036678453a8Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 303744961713Sgirish 303844961713Sgirish if (cs.bits.hdw.rbr_tmout) { 303944961713Sgirish rdc_stats->rx_rbr_tmout++; 304044961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 304152ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 304244961713Sgirish rxchan_fatal = B_TRUE; 304344961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 304452ccf843Smisaki "==> nxge_rx_err_evnts: rx_rbr_timeout")); 304544961713Sgirish } 304644961713Sgirish if (cs.bits.hdw.rsp_cnt_err) { 304744961713Sgirish rdc_stats->rsp_cnt_err++; 304844961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 304952ccf843Smisaki NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 305044961713Sgirish rxchan_fatal = B_TRUE; 305144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 305252ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 305352ccf843Smisaki "rsp_cnt_err", channel)); 305444961713Sgirish } 305544961713Sgirish if (cs.bits.hdw.byte_en_bus) { 305644961713Sgirish rdc_stats->byte_en_bus++; 305744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 305852ccf843Smisaki NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 305944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 306052ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 306152ccf843Smisaki "fatal error: byte_en_bus", channel)); 306244961713Sgirish rxchan_fatal = B_TRUE; 306344961713Sgirish } 306444961713Sgirish if (cs.bits.hdw.rsp_dat_err) { 306544961713Sgirish rdc_stats->rsp_dat_err++; 306644961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 306752ccf843Smisaki NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 306844961713Sgirish rxchan_fatal = B_TRUE; 306944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 307052ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 307152ccf843Smisaki "fatal error: rsp_dat_err", channel)); 307244961713Sgirish } 307344961713Sgirish if (cs.bits.hdw.rcr_ack_err) { 307444961713Sgirish rdc_stats->rcr_ack_err++; 307544961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 307652ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 307744961713Sgirish rxchan_fatal = B_TRUE; 307844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 307952ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 308052ccf843Smisaki "fatal error: rcr_ack_err", channel)); 308144961713Sgirish } 308244961713Sgirish if (cs.bits.hdw.dc_fifo_err) { 308344961713Sgirish rdc_stats->dc_fifo_err++; 308444961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 308552ccf843Smisaki NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 308644961713Sgirish /* This is not a fatal error! */ 308744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 308852ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 308952ccf843Smisaki "dc_fifo_err", channel)); 309044961713Sgirish rxport_fatal = B_TRUE; 309144961713Sgirish } 309244961713Sgirish if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 309344961713Sgirish if ((rs = npi_rxdma_ring_perr_stat_get(handle, 309452ccf843Smisaki &rdc_stats->errlog.pre_par, 309552ccf843Smisaki &rdc_stats->errlog.sha_par)) 309652ccf843Smisaki != NPI_SUCCESS) { 309744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 309852ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 309952ccf843Smisaki "rcr_sha_par: get perr", channel)); 310044961713Sgirish return (NXGE_ERROR | rs); 310144961713Sgirish } 310244961713Sgirish if (cs.bits.hdw.rcr_sha_par) { 310344961713Sgirish rdc_stats->rcr_sha_par++; 310444961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 310552ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 310644961713Sgirish rxchan_fatal = B_TRUE; 310744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 310852ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 310952ccf843Smisaki "fatal error: rcr_sha_par", channel)); 311044961713Sgirish } 311144961713Sgirish if (cs.bits.hdw.rbr_pre_par) { 311244961713Sgirish rdc_stats->rbr_pre_par++; 311344961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 311452ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 311544961713Sgirish rxchan_fatal = B_TRUE; 311644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 311752ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 311852ccf843Smisaki "fatal error: rbr_pre_par", channel)); 311944961713Sgirish } 312044961713Sgirish } 312163e23a19Syc /* 312263e23a19Syc * The Following 4 status bits are for information, the system 312363e23a19Syc * is running fine. There is no need to send FMA ereports or 312463e23a19Syc * log messages. 312563e23a19Syc */ 312644961713Sgirish if (cs.bits.hdw.port_drop_pkt) { 312744961713Sgirish rdc_stats->port_drop_pkt++; 312844961713Sgirish } 312944961713Sgirish if (cs.bits.hdw.wred_drop) { 313044961713Sgirish rdc_stats->wred_drop++; 313144961713Sgirish } 313244961713Sgirish if (cs.bits.hdw.rbr_pre_empty) { 313344961713Sgirish rdc_stats->rbr_pre_empty++; 313444961713Sgirish } 313544961713Sgirish if (cs.bits.hdw.rcr_shadow_full) { 313644961713Sgirish rdc_stats->rcr_shadow_full++; 313744961713Sgirish } 313844961713Sgirish if (cs.bits.hdw.config_err) { 313944961713Sgirish rdc_stats->config_err++; 314044961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 314152ccf843Smisaki NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 314244961713Sgirish rxchan_fatal = B_TRUE; 314344961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 314452ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 314552ccf843Smisaki "config error", channel)); 314644961713Sgirish } 314744961713Sgirish if (cs.bits.hdw.rcrincon) { 314844961713Sgirish rdc_stats->rcrincon++; 314944961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 315052ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCRINCON); 315144961713Sgirish rxchan_fatal = B_TRUE; 315244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 315352ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 315452ccf843Smisaki "fatal error: rcrincon error", channel)); 315544961713Sgirish } 315644961713Sgirish if (cs.bits.hdw.rcrfull) { 315744961713Sgirish rdc_stats->rcrfull++; 315844961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 315952ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCRFULL); 316044961713Sgirish rxchan_fatal = B_TRUE; 316144961713Sgirish if (rdc_stats->rcrfull < error_disp_cnt) 316244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 316352ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 316452ccf843Smisaki "fatal error: rcrfull error", channel)); 316544961713Sgirish } 316644961713Sgirish if (cs.bits.hdw.rbr_empty) { 316763e23a19Syc /* 316863e23a19Syc * This bit is for information, there is no need 316963e23a19Syc * send FMA ereport or log a message. 317063e23a19Syc */ 317144961713Sgirish rdc_stats->rbr_empty++; 317244961713Sgirish } 317344961713Sgirish if (cs.bits.hdw.rbrfull) { 317444961713Sgirish rdc_stats->rbrfull++; 317544961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 317652ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBRFULL); 317744961713Sgirish rxchan_fatal = B_TRUE; 317844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 317952ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 318052ccf843Smisaki "fatal error: rbr_full error", channel)); 318144961713Sgirish } 318244961713Sgirish if (cs.bits.hdw.rbrlogpage) { 318344961713Sgirish rdc_stats->rbrlogpage++; 318444961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 318552ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 318644961713Sgirish rxchan_fatal = B_TRUE; 318744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 318852ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 318952ccf843Smisaki "fatal error: rbr logical page error", channel)); 319044961713Sgirish } 319144961713Sgirish if (cs.bits.hdw.cfiglogpage) { 319244961713Sgirish rdc_stats->cfiglogpage++; 319344961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 319452ccf843Smisaki NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 319544961713Sgirish rxchan_fatal = B_TRUE; 319644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 319752ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 319852ccf843Smisaki "fatal error: cfig logical page error", channel)); 319944961713Sgirish } 320044961713Sgirish 320144961713Sgirish if (rxport_fatal) { 320244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3203678453a8Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 3204678453a8Sspeer portn)); 3205678453a8Sspeer if (isLDOMguest(nxgep)) { 3206678453a8Sspeer status = NXGE_ERROR; 3207678453a8Sspeer } else { 3208678453a8Sspeer status = nxge_ipp_fatal_err_recover(nxgep); 3209678453a8Sspeer if (status == NXGE_OK) { 3210678453a8Sspeer FM_SERVICE_RESTORED(nxgep); 3211678453a8Sspeer } 321244961713Sgirish } 321344961713Sgirish } 321444961713Sgirish 321544961713Sgirish if (rxchan_fatal) { 321644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3217678453a8Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3218678453a8Sspeer channel)); 3219678453a8Sspeer if (isLDOMguest(nxgep)) { 3220678453a8Sspeer status = NXGE_ERROR; 3221678453a8Sspeer } else { 3222678453a8Sspeer status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3223678453a8Sspeer if (status == NXGE_OK) { 3224678453a8Sspeer FM_SERVICE_RESTORED(nxgep); 3225678453a8Sspeer } 322644961713Sgirish } 322744961713Sgirish } 322844961713Sgirish 322944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 323044961713Sgirish 323144961713Sgirish return (status); 323244961713Sgirish } 323344961713Sgirish 3234678453a8Sspeer /* 3235678453a8Sspeer * nxge_rdc_hvio_setup 3236678453a8Sspeer * 3237678453a8Sspeer * This code appears to setup some Hypervisor variables. 3238678453a8Sspeer * 3239678453a8Sspeer * Arguments: 3240678453a8Sspeer * nxgep 3241678453a8Sspeer * channel 3242678453a8Sspeer * 3243678453a8Sspeer * Notes: 3244678453a8Sspeer * What does NIU_LP_WORKAROUND mean? 3245678453a8Sspeer * 3246678453a8Sspeer * NPI/NXGE function calls: 3247678453a8Sspeer * na 3248678453a8Sspeer * 3249678453a8Sspeer * Context: 3250678453a8Sspeer * Any domain 3251678453a8Sspeer */ 3252678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3253678453a8Sspeer static void 3254678453a8Sspeer nxge_rdc_hvio_setup( 3255678453a8Sspeer nxge_t *nxgep, int channel) 325644961713Sgirish { 3257678453a8Sspeer nxge_dma_common_t *dma_common; 3258678453a8Sspeer nxge_dma_common_t *dma_control; 3259678453a8Sspeer rx_rbr_ring_t *ring; 3260678453a8Sspeer 3261678453a8Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3262678453a8Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3263678453a8Sspeer 3264678453a8Sspeer ring->hv_set = B_FALSE; 3265678453a8Sspeer 3266678453a8Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3267678453a8Sspeer dma_common->orig_ioaddr_pp; 3268678453a8Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 3269678453a8Sspeer dma_common->orig_alength; 3270678453a8Sspeer 3271678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3272678453a8Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3273678453a8Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 3274678453a8Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3275678453a8Sspeer dma_common->orig_alength, dma_common->orig_alength)); 3276678453a8Sspeer 3277678453a8Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3278678453a8Sspeer 3279678453a8Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 3280678453a8Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 3281678453a8Sspeer ring->hv_rx_cntl_ioaddr_size = 3282678453a8Sspeer (uint64_t)dma_control->orig_alength; 3283678453a8Sspeer 3284678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3285678453a8Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3286678453a8Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 3287678453a8Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3288678453a8Sspeer dma_control->orig_alength, dma_control->orig_alength)); 3289678453a8Sspeer } 329044961713Sgirish #endif 329144961713Sgirish 3292678453a8Sspeer /* 3293678453a8Sspeer * nxge_map_rxdma 3294678453a8Sspeer * 3295678453a8Sspeer * Map an RDC into our kernel space. 3296678453a8Sspeer * 3297678453a8Sspeer * Arguments: 3298678453a8Sspeer * nxgep 3299678453a8Sspeer * channel The channel to map. 3300678453a8Sspeer * 3301678453a8Sspeer * Notes: 3302678453a8Sspeer * 1. Allocate & initialise a memory pool, if necessary. 3303678453a8Sspeer * 2. Allocate however many receive buffers are required. 3304678453a8Sspeer * 3. Setup buffers, descriptors, and mailbox. 3305678453a8Sspeer * 3306678453a8Sspeer * NPI/NXGE function calls: 3307678453a8Sspeer * nxge_alloc_rx_mem_pool() 3308678453a8Sspeer * nxge_alloc_rbb() 3309678453a8Sspeer * nxge_map_rxdma_channel() 3310678453a8Sspeer * 3311678453a8Sspeer * Registers accessed: 3312678453a8Sspeer * 3313678453a8Sspeer * Context: 3314678453a8Sspeer * Any domain 3315678453a8Sspeer */ 3316678453a8Sspeer static nxge_status_t 3317678453a8Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 3318678453a8Sspeer { 3319678453a8Sspeer nxge_dma_common_t **data; 3320678453a8Sspeer nxge_dma_common_t **control; 3321678453a8Sspeer rx_rbr_ring_t **rbr_ring; 3322678453a8Sspeer rx_rcr_ring_t **rcr_ring; 3323678453a8Sspeer rx_mbox_t **mailbox; 3324678453a8Sspeer uint32_t chunks; 332544961713Sgirish 3326678453a8Sspeer nxge_status_t status; 332744961713Sgirish 3328678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 332944961713Sgirish 3330678453a8Sspeer if (!nxgep->rx_buf_pool_p) { 3331678453a8Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3332678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3333678453a8Sspeer "<== nxge_map_rxdma: buf not allocated")); 3334678453a8Sspeer return (NXGE_ERROR); 3335678453a8Sspeer } 333644961713Sgirish } 333744961713Sgirish 3338678453a8Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3339678453a8Sspeer return (NXGE_ERROR); 334014ea4bb7Ssd 334144961713Sgirish /* 3342678453a8Sspeer * Map descriptors from the buffer polls for each dma channel. 334344961713Sgirish */ 334444961713Sgirish 3345678453a8Sspeer /* 3346678453a8Sspeer * Set up and prepare buffer blocks, descriptors 3347678453a8Sspeer * and mailbox. 3348678453a8Sspeer */ 3349678453a8Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3350678453a8Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3351678453a8Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 335244961713Sgirish 3353678453a8Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3354678453a8Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 335544961713Sgirish 3356678453a8Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 335744961713Sgirish 3358678453a8Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3359678453a8Sspeer chunks, control, rcr_ring, mailbox); 3360678453a8Sspeer if (status != NXGE_OK) { 3361678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 336252ccf843Smisaki "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 336352ccf843Smisaki "returned 0x%x", 336452ccf843Smisaki channel, status)); 3365678453a8Sspeer return (status); 3366678453a8Sspeer } 3367678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3368678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3369678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3370678453a8Sspeer &nxgep->statsp->rdc_stats[channel]; 337144961713Sgirish 3372678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3373678453a8Sspeer if (!isLDOMguest(nxgep)) 3374678453a8Sspeer nxge_rdc_hvio_setup(nxgep, channel); 3375678453a8Sspeer #endif 337644961713Sgirish 337744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3378678453a8Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 337944961713Sgirish 338044961713Sgirish return (status); 338144961713Sgirish } 338244961713Sgirish 338344961713Sgirish static void 3384678453a8Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 338544961713Sgirish { 3386678453a8Sspeer rx_rbr_ring_t *rbr_ring; 3387678453a8Sspeer rx_rcr_ring_t *rcr_ring; 3388678453a8Sspeer rx_mbox_t *mailbox; 338944961713Sgirish 3390678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 339144961713Sgirish 3392678453a8Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3393678453a8Sspeer !nxgep->rx_mbox_areas_p) 339444961713Sgirish return; 339544961713Sgirish 3396678453a8Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3397678453a8Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3398678453a8Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 339944961713Sgirish 3400678453a8Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 3401678453a8Sspeer return; 340244961713Sgirish 3403678453a8Sspeer (void) nxge_unmap_rxdma_channel( 340452ccf843Smisaki nxgep, channel, rbr_ring, rcr_ring, mailbox); 340544961713Sgirish 3406678453a8Sspeer nxge_free_rxb(nxgep, channel); 340744961713Sgirish 3408678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 340944961713Sgirish } 341044961713Sgirish 341144961713Sgirish nxge_status_t 341244961713Sgirish nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 341344961713Sgirish p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 341444961713Sgirish uint32_t num_chunks, 341544961713Sgirish p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 341644961713Sgirish p_rx_mbox_t *rx_mbox_p) 341744961713Sgirish { 341844961713Sgirish int status = NXGE_OK; 341944961713Sgirish 342044961713Sgirish /* 342144961713Sgirish * Set up and prepare buffer blocks, descriptors 342244961713Sgirish * and mailbox. 342344961713Sgirish */ 342444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 342552ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d)", channel)); 342644961713Sgirish /* 342744961713Sgirish * Receive buffer blocks 342844961713Sgirish */ 342944961713Sgirish status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 343052ccf843Smisaki dma_buf_p, rbr_p, num_chunks); 343144961713Sgirish if (status != NXGE_OK) { 343244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 343352ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d): " 343452ccf843Smisaki "map buffer failed 0x%x", channel, status)); 343544961713Sgirish goto nxge_map_rxdma_channel_exit; 343644961713Sgirish } 343744961713Sgirish 343844961713Sgirish /* 343944961713Sgirish * Receive block ring, completion ring and mailbox. 344044961713Sgirish */ 344144961713Sgirish status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 344252ccf843Smisaki dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 344344961713Sgirish if (status != NXGE_OK) { 344444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 344552ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d): " 344652ccf843Smisaki "map config failed 0x%x", channel, status)); 344744961713Sgirish goto nxge_map_rxdma_channel_fail2; 344844961713Sgirish } 344944961713Sgirish 345044961713Sgirish goto nxge_map_rxdma_channel_exit; 345144961713Sgirish 345244961713Sgirish nxge_map_rxdma_channel_fail3: 345344961713Sgirish /* Free rbr, rcr */ 345444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 345552ccf843Smisaki "==> nxge_map_rxdma_channel: free rbr/rcr " 345652ccf843Smisaki "(status 0x%x channel %d)", 345752ccf843Smisaki status, channel)); 345844961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(nxgep, 345952ccf843Smisaki *rcr_p, *rx_mbox_p); 346044961713Sgirish 346144961713Sgirish nxge_map_rxdma_channel_fail2: 346244961713Sgirish /* Free buffer blocks */ 346344961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 346452ccf843Smisaki "==> nxge_map_rxdma_channel: free rx buffers" 346552ccf843Smisaki "(nxgep 0x%x status 0x%x channel %d)", 346652ccf843Smisaki nxgep, status, channel)); 346744961713Sgirish nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 346844961713Sgirish 346956d930aeSspeer status = NXGE_ERROR; 347056d930aeSspeer 347144961713Sgirish nxge_map_rxdma_channel_exit: 347244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 347352ccf843Smisaki "<== nxge_map_rxdma_channel: " 347452ccf843Smisaki "(nxgep 0x%x status 0x%x channel %d)", 347552ccf843Smisaki nxgep, status, channel)); 347644961713Sgirish 347744961713Sgirish return (status); 347844961713Sgirish } 347944961713Sgirish 348044961713Sgirish /*ARGSUSED*/ 348144961713Sgirish static void 348244961713Sgirish nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 348344961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 348444961713Sgirish { 348544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 348652ccf843Smisaki "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 348744961713Sgirish 348844961713Sgirish /* 348944961713Sgirish * unmap receive block ring, completion ring and mailbox. 349044961713Sgirish */ 349144961713Sgirish (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 349252ccf843Smisaki rcr_p, rx_mbox_p); 349344961713Sgirish 349444961713Sgirish /* unmap buffer blocks */ 349544961713Sgirish (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 349644961713Sgirish 349744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 349844961713Sgirish } 349944961713Sgirish 350044961713Sgirish /*ARGSUSED*/ 350144961713Sgirish static nxge_status_t 350244961713Sgirish nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 350344961713Sgirish p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 350444961713Sgirish p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 350544961713Sgirish { 350644961713Sgirish p_rx_rbr_ring_t rbrp; 350744961713Sgirish p_rx_rcr_ring_t rcrp; 350844961713Sgirish p_rx_mbox_t mboxp; 350944961713Sgirish p_nxge_dma_common_t cntl_dmap; 351044961713Sgirish p_nxge_dma_common_t dmap; 351144961713Sgirish p_rx_msg_t *rx_msg_ring; 351244961713Sgirish p_rx_msg_t rx_msg_p; 351344961713Sgirish p_rbr_cfig_a_t rcfga_p; 351444961713Sgirish p_rbr_cfig_b_t rcfgb_p; 351544961713Sgirish p_rcrcfig_a_t cfga_p; 351644961713Sgirish p_rcrcfig_b_t cfgb_p; 351744961713Sgirish p_rxdma_cfig1_t cfig1_p; 351844961713Sgirish p_rxdma_cfig2_t cfig2_p; 351944961713Sgirish p_rbr_kick_t kick_p; 352044961713Sgirish uint32_t dmaaddrp; 352144961713Sgirish uint32_t *rbr_vaddrp; 352244961713Sgirish uint32_t bkaddr; 352344961713Sgirish nxge_status_t status = NXGE_OK; 352444961713Sgirish int i; 352544961713Sgirish uint32_t nxge_port_rcr_size; 352644961713Sgirish 352744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 352852ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring")); 352944961713Sgirish 353044961713Sgirish cntl_dmap = *dma_cntl_p; 353144961713Sgirish 353244961713Sgirish /* Map in the receive block ring */ 353344961713Sgirish rbrp = *rbr_p; 353444961713Sgirish dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 353544961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 353644961713Sgirish /* 353744961713Sgirish * Zero out buffer block ring descriptors. 353844961713Sgirish */ 353944961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 354044961713Sgirish 354144961713Sgirish rcfga_p = &(rbrp->rbr_cfga); 354244961713Sgirish rcfgb_p = &(rbrp->rbr_cfgb); 354344961713Sgirish kick_p = &(rbrp->rbr_kick); 354444961713Sgirish rcfga_p->value = 0; 354544961713Sgirish rcfgb_p->value = 0; 354644961713Sgirish kick_p->value = 0; 354744961713Sgirish rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 354844961713Sgirish rcfga_p->value = (rbrp->rbr_addr & 354952ccf843Smisaki (RBR_CFIG_A_STDADDR_MASK | 355052ccf843Smisaki RBR_CFIG_A_STDADDR_BASE_MASK)); 355144961713Sgirish rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 355244961713Sgirish 355344961713Sgirish rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 355444961713Sgirish rcfgb_p->bits.ldw.vld0 = 1; 355544961713Sgirish rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 355644961713Sgirish rcfgb_p->bits.ldw.vld1 = 1; 355744961713Sgirish rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 355844961713Sgirish rcfgb_p->bits.ldw.vld2 = 1; 355944961713Sgirish rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 356044961713Sgirish 356144961713Sgirish /* 356244961713Sgirish * For each buffer block, enter receive block address to the ring. 356344961713Sgirish */ 356444961713Sgirish rbr_vaddrp = (uint32_t *)dmap->kaddrp; 356544961713Sgirish rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 356644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 356752ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 356852ccf843Smisaki "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 356944961713Sgirish 357044961713Sgirish rx_msg_ring = rbrp->rx_msg_ring; 357144961713Sgirish for (i = 0; i < rbrp->tnblocks; i++) { 357244961713Sgirish rx_msg_p = rx_msg_ring[i]; 357344961713Sgirish rx_msg_p->nxgep = nxgep; 357444961713Sgirish rx_msg_p->rx_rbr_p = rbrp; 357544961713Sgirish bkaddr = (uint32_t) 357652ccf843Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 357752ccf843Smisaki >> RBR_BKADDR_SHIFT)); 357844961713Sgirish rx_msg_p->free = B_FALSE; 357944961713Sgirish rx_msg_p->max_usage_cnt = 0xbaddcafe; 358044961713Sgirish 358144961713Sgirish *rbr_vaddrp++ = bkaddr; 358244961713Sgirish } 358344961713Sgirish 358444961713Sgirish kick_p->bits.ldw.bkadd = rbrp->rbb_max; 358544961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 358644961713Sgirish 358744961713Sgirish rbrp->rbr_rd_index = 0; 358844961713Sgirish 358944961713Sgirish rbrp->rbr_consumed = 0; 359044961713Sgirish rbrp->rbr_use_bcopy = B_TRUE; 359144961713Sgirish rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 359244961713Sgirish /* 359344961713Sgirish * Do bcopy on packets greater than bcopy size once 359444961713Sgirish * the lo threshold is reached. 359544961713Sgirish * This lo threshold should be less than the hi threshold. 359644961713Sgirish * 359744961713Sgirish * Do bcopy on every packet once the hi threshold is reached. 359844961713Sgirish */ 359944961713Sgirish if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 360044961713Sgirish /* default it to use hi */ 360144961713Sgirish nxge_rx_threshold_lo = nxge_rx_threshold_hi; 360244961713Sgirish } 360344961713Sgirish 360444961713Sgirish if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 360544961713Sgirish nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 360644961713Sgirish } 360744961713Sgirish rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 360844961713Sgirish 360944961713Sgirish switch (nxge_rx_threshold_hi) { 361044961713Sgirish default: 361144961713Sgirish case NXGE_RX_COPY_NONE: 361244961713Sgirish /* Do not do bcopy at all */ 361344961713Sgirish rbrp->rbr_use_bcopy = B_FALSE; 361444961713Sgirish rbrp->rbr_threshold_hi = rbrp->rbb_max; 361544961713Sgirish break; 361644961713Sgirish 361744961713Sgirish case NXGE_RX_COPY_1: 361844961713Sgirish case NXGE_RX_COPY_2: 361944961713Sgirish case NXGE_RX_COPY_3: 362044961713Sgirish case NXGE_RX_COPY_4: 362144961713Sgirish case NXGE_RX_COPY_5: 362244961713Sgirish case NXGE_RX_COPY_6: 362344961713Sgirish case NXGE_RX_COPY_7: 362444961713Sgirish rbrp->rbr_threshold_hi = 362552ccf843Smisaki rbrp->rbb_max * 362652ccf843Smisaki (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 362744961713Sgirish break; 362844961713Sgirish 362944961713Sgirish case NXGE_RX_COPY_ALL: 363044961713Sgirish rbrp->rbr_threshold_hi = 0; 363144961713Sgirish break; 363244961713Sgirish } 363344961713Sgirish 363444961713Sgirish switch (nxge_rx_threshold_lo) { 363544961713Sgirish default: 363644961713Sgirish case NXGE_RX_COPY_NONE: 363744961713Sgirish /* Do not do bcopy at all */ 363844961713Sgirish if (rbrp->rbr_use_bcopy) { 363944961713Sgirish rbrp->rbr_use_bcopy = B_FALSE; 364044961713Sgirish } 364144961713Sgirish rbrp->rbr_threshold_lo = rbrp->rbb_max; 364244961713Sgirish break; 364344961713Sgirish 364444961713Sgirish case NXGE_RX_COPY_1: 364544961713Sgirish case NXGE_RX_COPY_2: 364644961713Sgirish case NXGE_RX_COPY_3: 364744961713Sgirish case NXGE_RX_COPY_4: 364844961713Sgirish case NXGE_RX_COPY_5: 364944961713Sgirish case NXGE_RX_COPY_6: 365044961713Sgirish case NXGE_RX_COPY_7: 365144961713Sgirish rbrp->rbr_threshold_lo = 365252ccf843Smisaki rbrp->rbb_max * 365352ccf843Smisaki (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 365444961713Sgirish break; 365544961713Sgirish 365644961713Sgirish case NXGE_RX_COPY_ALL: 365744961713Sgirish rbrp->rbr_threshold_lo = 0; 365844961713Sgirish break; 365944961713Sgirish } 366044961713Sgirish 366144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 366252ccf843Smisaki "nxge_map_rxdma_channel_cfg_ring: channel %d " 366352ccf843Smisaki "rbb_max %d " 366452ccf843Smisaki "rbrp->rbr_bufsize_type %d " 366552ccf843Smisaki "rbb_threshold_hi %d " 366652ccf843Smisaki "rbb_threshold_lo %d", 366752ccf843Smisaki dma_channel, 366852ccf843Smisaki rbrp->rbb_max, 366952ccf843Smisaki rbrp->rbr_bufsize_type, 367052ccf843Smisaki rbrp->rbr_threshold_hi, 367152ccf843Smisaki rbrp->rbr_threshold_lo)); 367244961713Sgirish 367344961713Sgirish rbrp->page_valid.value = 0; 367444961713Sgirish rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 367544961713Sgirish rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 367644961713Sgirish rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 367744961713Sgirish rbrp->page_hdl.value = 0; 367844961713Sgirish 367944961713Sgirish rbrp->page_valid.bits.ldw.page0 = 1; 368044961713Sgirish rbrp->page_valid.bits.ldw.page1 = 1; 368144961713Sgirish 368244961713Sgirish /* Map in the receive completion ring */ 368344961713Sgirish rcrp = (p_rx_rcr_ring_t) 368452ccf843Smisaki KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 368544961713Sgirish rcrp->rdc = dma_channel; 368644961713Sgirish 368744961713Sgirish nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 368844961713Sgirish rcrp->comp_size = nxge_port_rcr_size; 368944961713Sgirish rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 369044961713Sgirish 369144961713Sgirish rcrp->max_receive_pkts = nxge_max_rx_pkts; 369244961713Sgirish 369344961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 369444961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 369552ccf843Smisaki sizeof (rcr_entry_t)); 369644961713Sgirish rcrp->comp_rd_index = 0; 369744961713Sgirish rcrp->comp_wt_index = 0; 369844961713Sgirish rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 369952ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3700adfcba55Sjoycey #if defined(__i386) 370152ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 370252ccf843Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3703adfcba55Sjoycey #else 370452ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 370552ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3706adfcba55Sjoycey #endif 370744961713Sgirish 370844961713Sgirish rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 370952ccf843Smisaki (nxge_port_rcr_size - 1); 371044961713Sgirish rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 371152ccf843Smisaki (nxge_port_rcr_size - 1); 371244961713Sgirish 371344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 371452ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 371552ccf843Smisaki "channel %d " 371652ccf843Smisaki "rbr_vaddrp $%p " 371752ccf843Smisaki "rcr_desc_rd_head_p $%p " 371852ccf843Smisaki "rcr_desc_rd_head_pp $%p " 371952ccf843Smisaki "rcr_desc_rd_last_p $%p " 372052ccf843Smisaki "rcr_desc_rd_last_pp $%p ", 372152ccf843Smisaki dma_channel, 372252ccf843Smisaki rbr_vaddrp, 372352ccf843Smisaki rcrp->rcr_desc_rd_head_p, 372452ccf843Smisaki rcrp->rcr_desc_rd_head_pp, 372552ccf843Smisaki rcrp->rcr_desc_last_p, 372652ccf843Smisaki rcrp->rcr_desc_last_pp)); 372744961713Sgirish 372844961713Sgirish /* 372944961713Sgirish * Zero out buffer block ring descriptors. 373044961713Sgirish */ 373144961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 37327b26d9ffSSantwona Behera 37337b26d9ffSSantwona Behera rcrp->intr_timeout = (nxgep->intr_timeout < 37347b26d9ffSSantwona Behera NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 37357b26d9ffSSantwona Behera nxgep->intr_timeout; 37367b26d9ffSSantwona Behera 37377b26d9ffSSantwona Behera rcrp->intr_threshold = (nxgep->intr_threshold < 37387b26d9ffSSantwona Behera NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 37397b26d9ffSSantwona Behera nxgep->intr_threshold; 37407b26d9ffSSantwona Behera 374144961713Sgirish rcrp->full_hdr_flag = B_FALSE; 374244961713Sgirish rcrp->sw_priv_hdr_len = 0; 374344961713Sgirish 374444961713Sgirish cfga_p = &(rcrp->rcr_cfga); 374544961713Sgirish cfgb_p = &(rcrp->rcr_cfgb); 374644961713Sgirish cfga_p->value = 0; 374744961713Sgirish cfgb_p->value = 0; 374844961713Sgirish rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 374944961713Sgirish cfga_p->value = (rcrp->rcr_addr & 375052ccf843Smisaki (RCRCFIG_A_STADDR_MASK | 375152ccf843Smisaki RCRCFIG_A_STADDR_BASE_MASK)); 375244961713Sgirish 375344961713Sgirish rcfga_p->value |= ((uint64_t)rcrp->comp_size << 375452ccf843Smisaki RCRCFIG_A_LEN_SHIF); 375544961713Sgirish 375644961713Sgirish /* 375744961713Sgirish * Timeout should be set based on the system clock divider. 37587b26d9ffSSantwona Behera * A timeout value of 1 assumes that the 375944961713Sgirish * granularity (1000) is 3 microseconds running at 300MHz. 376044961713Sgirish */ 376114ea4bb7Ssd cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 376214ea4bb7Ssd cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 376344961713Sgirish cfgb_p->bits.ldw.entout = 1; 376444961713Sgirish 376544961713Sgirish /* Map in the mailbox */ 376644961713Sgirish mboxp = (p_rx_mbox_t) 376752ccf843Smisaki KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 376844961713Sgirish dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 376944961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 377044961713Sgirish cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 377144961713Sgirish cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 377244961713Sgirish cfig1_p->value = cfig2_p->value = 0; 377344961713Sgirish 377444961713Sgirish mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 377544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 377652ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 377752ccf843Smisaki "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 377852ccf843Smisaki dma_channel, cfig1_p->value, cfig2_p->value, 377952ccf843Smisaki mboxp->mbox_addr)); 378044961713Sgirish 378144961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 378252ccf843Smisaki & 0xfff); 378344961713Sgirish cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 378444961713Sgirish 378544961713Sgirish 378644961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 378744961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 378852ccf843Smisaki RXDMA_CFIG2_MBADDR_L_MASK); 378944961713Sgirish 379044961713Sgirish cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 379144961713Sgirish 379244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 379352ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 379452ccf843Smisaki "channel %d damaddrp $%p " 379552ccf843Smisaki "cfg1 0x%016llx cfig2 0x%016llx", 379652ccf843Smisaki dma_channel, dmaaddrp, 379752ccf843Smisaki cfig1_p->value, cfig2_p->value)); 379844961713Sgirish 379944961713Sgirish cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 380044961713Sgirish cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 380144961713Sgirish 380244961713Sgirish rbrp->rx_rcr_p = rcrp; 380344961713Sgirish rcrp->rx_rbr_p = rbrp; 380444961713Sgirish *rcr_p = rcrp; 380544961713Sgirish *rx_mbox_p = mboxp; 380644961713Sgirish 380744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 380852ccf843Smisaki "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 380944961713Sgirish 381044961713Sgirish return (status); 381144961713Sgirish } 381244961713Sgirish 381344961713Sgirish /*ARGSUSED*/ 381444961713Sgirish static void 381544961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 381644961713Sgirish p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 381744961713Sgirish { 381844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 381952ccf843Smisaki "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 382052ccf843Smisaki rcr_p->rdc)); 382144961713Sgirish 382244961713Sgirish KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 382344961713Sgirish KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 382444961713Sgirish 382544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 382652ccf843Smisaki "<== nxge_unmap_rxdma_channel_cfg_ring")); 382744961713Sgirish } 382844961713Sgirish 382944961713Sgirish static nxge_status_t 383044961713Sgirish nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 383144961713Sgirish p_nxge_dma_common_t *dma_buf_p, 383244961713Sgirish p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 383344961713Sgirish { 383444961713Sgirish p_rx_rbr_ring_t rbrp; 383544961713Sgirish p_nxge_dma_common_t dma_bufp, tmp_bufp; 383644961713Sgirish p_rx_msg_t *rx_msg_ring; 383744961713Sgirish p_rx_msg_t rx_msg_p; 383844961713Sgirish p_mblk_t mblk_p; 383944961713Sgirish 384044961713Sgirish rxring_info_t *ring_info; 384144961713Sgirish nxge_status_t status = NXGE_OK; 384244961713Sgirish int i, j, index; 384344961713Sgirish uint32_t size, bsize, nblocks, nmsgs; 384444961713Sgirish 384544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 384652ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d", 384752ccf843Smisaki channel)); 384844961713Sgirish 384944961713Sgirish dma_bufp = tmp_bufp = *dma_buf_p; 385044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 385152ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 385252ccf843Smisaki "chunks bufp 0x%016llx", 385352ccf843Smisaki channel, num_chunks, dma_bufp)); 385444961713Sgirish 385544961713Sgirish nmsgs = 0; 385644961713Sgirish for (i = 0; i < num_chunks; i++, tmp_bufp++) { 385744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 385852ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 385952ccf843Smisaki "bufp 0x%016llx nblocks %d nmsgs %d", 386052ccf843Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 386144961713Sgirish nmsgs += tmp_bufp->nblocks; 386244961713Sgirish } 386344961713Sgirish if (!nmsgs) { 386456d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 386552ccf843Smisaki "<== nxge_map_rxdma_channel_buf_ring: channel %d " 386652ccf843Smisaki "no msg blocks", 386752ccf843Smisaki channel)); 386844961713Sgirish status = NXGE_ERROR; 386944961713Sgirish goto nxge_map_rxdma_channel_buf_ring_exit; 387044961713Sgirish } 387144961713Sgirish 3872007969e0Stm rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 387344961713Sgirish 387444961713Sgirish size = nmsgs * sizeof (p_rx_msg_t); 387544961713Sgirish rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 387644961713Sgirish ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 387752ccf843Smisaki KM_SLEEP); 387844961713Sgirish 387944961713Sgirish MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 388052ccf843Smisaki (void *)nxgep->interrupt_cookie); 388144961713Sgirish MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 388252ccf843Smisaki (void *)nxgep->interrupt_cookie); 388344961713Sgirish rbrp->rdc = channel; 388444961713Sgirish rbrp->num_blocks = num_chunks; 388544961713Sgirish rbrp->tnblocks = nmsgs; 388644961713Sgirish rbrp->rbb_max = nmsgs; 388744961713Sgirish rbrp->rbr_max_size = nmsgs; 388844961713Sgirish rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 388944961713Sgirish 389044961713Sgirish /* 389144961713Sgirish * Buffer sizes suggested by NIU architect. 389244961713Sgirish * 256, 512 and 2K. 389344961713Sgirish */ 389444961713Sgirish 389544961713Sgirish rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 389644961713Sgirish rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 389744961713Sgirish rbrp->npi_pkt_buf_size0 = SIZE_256B; 389844961713Sgirish 389944961713Sgirish rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 390044961713Sgirish rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 390144961713Sgirish rbrp->npi_pkt_buf_size1 = SIZE_1KB; 390244961713Sgirish 390344961713Sgirish rbrp->block_size = nxgep->rx_default_block_size; 390444961713Sgirish 390514ea4bb7Ssd if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 390644961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 390744961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 390844961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_2KB; 390944961713Sgirish } else { 391044961713Sgirish if (rbrp->block_size >= 0x2000) { 391144961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 391244961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 391344961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_8KB; 391444961713Sgirish } else { 391544961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 391644961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 391744961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_4KB; 391844961713Sgirish } 391944961713Sgirish } 392044961713Sgirish 392144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 392252ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 392352ccf843Smisaki "actual rbr max %d rbb_max %d nmsgs %d " 392452ccf843Smisaki "rbrp->block_size %d default_block_size %d " 392552ccf843Smisaki "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 392652ccf843Smisaki channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 392752ccf843Smisaki rbrp->block_size, nxgep->rx_default_block_size, 392852ccf843Smisaki nxge_rbr_size, nxge_rbr_spare_size)); 392944961713Sgirish 393044961713Sgirish /* Map in buffers from the buffer pool. */ 393144961713Sgirish index = 0; 393244961713Sgirish for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 393344961713Sgirish bsize = dma_bufp->block_size; 393444961713Sgirish nblocks = dma_bufp->nblocks; 3935adfcba55Sjoycey #if defined(__i386) 3936adfcba55Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3937adfcba55Sjoycey #else 393844961713Sgirish ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3939adfcba55Sjoycey #endif 394044961713Sgirish ring_info->buffer[i].buf_index = i; 394144961713Sgirish ring_info->buffer[i].buf_size = dma_bufp->alength; 394244961713Sgirish ring_info->buffer[i].start_index = index; 3943adfcba55Sjoycey #if defined(__i386) 3944adfcba55Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3945adfcba55Sjoycey #else 394644961713Sgirish ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3947adfcba55Sjoycey #endif 394844961713Sgirish 394944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 395052ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: map channel %d " 395152ccf843Smisaki "chunk %d" 395252ccf843Smisaki " nblocks %d chunk_size %x block_size 0x%x " 395352ccf843Smisaki "dma_bufp $%p", channel, i, 395452ccf843Smisaki dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 395552ccf843Smisaki dma_bufp)); 395644961713Sgirish 395744961713Sgirish for (j = 0; j < nblocks; j++) { 395844961713Sgirish if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 395952ccf843Smisaki dma_bufp)) == NULL) { 396056d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 396152ccf843Smisaki "allocb failed (index %d i %d j %d)", 396252ccf843Smisaki index, i, j)); 396356d930aeSspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 396444961713Sgirish } 396544961713Sgirish rx_msg_ring[index] = rx_msg_p; 396644961713Sgirish rx_msg_p->block_index = index; 396744961713Sgirish rx_msg_p->shifted_addr = (uint32_t) 396852ccf843Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 396952ccf843Smisaki RBR_BKADDR_SHIFT)); 397044961713Sgirish 397144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 397252ccf843Smisaki "index %d j %d rx_msg_p $%p mblk %p", 397352ccf843Smisaki index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 397444961713Sgirish 397544961713Sgirish mblk_p = rx_msg_p->rx_mblk_p; 397644961713Sgirish mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3977007969e0Stm 3978007969e0Stm rbrp->rbr_ref_cnt++; 397944961713Sgirish index++; 398044961713Sgirish rx_msg_p->buf_dma.dma_channel = channel; 398144961713Sgirish } 3982678453a8Sspeer 3983678453a8Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3984678453a8Sspeer if (dma_bufp->contig_alloc_type) { 3985678453a8Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3986678453a8Sspeer } 3987678453a8Sspeer 3988678453a8Sspeer if (dma_bufp->kmem_alloc_type) { 3989678453a8Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 3990678453a8Sspeer } 3991678453a8Sspeer 3992678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3993678453a8Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 3994678453a8Sspeer "chunk %d" 3995678453a8Sspeer " nblocks %d chunk_size %x block_size 0x%x " 3996678453a8Sspeer "dma_bufp $%p", 3997678453a8Sspeer channel, i, 3998678453a8Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3999678453a8Sspeer dma_bufp)); 400044961713Sgirish } 400144961713Sgirish if (i < rbrp->num_blocks) { 400244961713Sgirish goto nxge_map_rxdma_channel_buf_ring_fail1; 400344961713Sgirish } 400444961713Sgirish 400544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 400652ccf843Smisaki "nxge_map_rxdma_channel_buf_ring: done buf init " 400752ccf843Smisaki "channel %d msg block entries %d", 400852ccf843Smisaki channel, index)); 400944961713Sgirish ring_info->block_size_mask = bsize - 1; 401044961713Sgirish rbrp->rx_msg_ring = rx_msg_ring; 401144961713Sgirish rbrp->dma_bufp = dma_buf_p; 401244961713Sgirish rbrp->ring_info = ring_info; 401344961713Sgirish 401444961713Sgirish status = nxge_rxbuf_index_info_init(nxgep, rbrp); 401544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 401652ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: " 401752ccf843Smisaki "channel %d done buf info init", channel)); 401844961713Sgirish 4019007969e0Stm /* 4020007969e0Stm * Finally, permit nxge_freeb() to call nxge_post_page(). 4021007969e0Stm */ 4022007969e0Stm rbrp->rbr_state = RBR_POSTING; 4023007969e0Stm 402444961713Sgirish *rbr_p = rbrp; 402544961713Sgirish goto nxge_map_rxdma_channel_buf_ring_exit; 402644961713Sgirish 402744961713Sgirish nxge_map_rxdma_channel_buf_ring_fail1: 402844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 402952ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 403052ccf843Smisaki channel, status)); 403144961713Sgirish 403244961713Sgirish index--; 403344961713Sgirish for (; index >= 0; index--) { 403444961713Sgirish rx_msg_p = rx_msg_ring[index]; 403544961713Sgirish if (rx_msg_p != NULL) { 403614ea4bb7Ssd freeb(rx_msg_p->rx_mblk_p); 403744961713Sgirish rx_msg_ring[index] = NULL; 403844961713Sgirish } 403944961713Sgirish } 404044961713Sgirish nxge_map_rxdma_channel_buf_ring_fail: 404144961713Sgirish MUTEX_DESTROY(&rbrp->post_lock); 404244961713Sgirish MUTEX_DESTROY(&rbrp->lock); 404344961713Sgirish KMEM_FREE(ring_info, sizeof (rxring_info_t)); 404444961713Sgirish KMEM_FREE(rx_msg_ring, size); 404544961713Sgirish KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 404644961713Sgirish 404756d930aeSspeer status = NXGE_ERROR; 404856d930aeSspeer 404944961713Sgirish nxge_map_rxdma_channel_buf_ring_exit: 405044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 405152ccf843Smisaki "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 405244961713Sgirish 405344961713Sgirish return (status); 405444961713Sgirish } 405544961713Sgirish 405644961713Sgirish /*ARGSUSED*/ 405744961713Sgirish static void 405844961713Sgirish nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 405944961713Sgirish p_rx_rbr_ring_t rbr_p) 406044961713Sgirish { 406144961713Sgirish p_rx_msg_t *rx_msg_ring; 406244961713Sgirish p_rx_msg_t rx_msg_p; 406344961713Sgirish rxring_info_t *ring_info; 406444961713Sgirish int i; 406544961713Sgirish uint32_t size; 406644961713Sgirish #ifdef NXGE_DEBUG 406744961713Sgirish int num_chunks; 406844961713Sgirish #endif 406944961713Sgirish 407044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 407152ccf843Smisaki "==> nxge_unmap_rxdma_channel_buf_ring")); 407244961713Sgirish if (rbr_p == NULL) { 407344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 407452ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 407544961713Sgirish return; 407644961713Sgirish } 407744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 407852ccf843Smisaki "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 407952ccf843Smisaki rbr_p->rdc)); 408044961713Sgirish 408144961713Sgirish rx_msg_ring = rbr_p->rx_msg_ring; 408244961713Sgirish ring_info = rbr_p->ring_info; 408344961713Sgirish 408444961713Sgirish if (rx_msg_ring == NULL || ring_info == NULL) { 408552ccf843Smisaki NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 408652ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: " 408752ccf843Smisaki "rx_msg_ring $%p ring_info $%p", 408852ccf843Smisaki rx_msg_p, ring_info)); 408944961713Sgirish return; 409044961713Sgirish } 409144961713Sgirish 409244961713Sgirish #ifdef NXGE_DEBUG 409344961713Sgirish num_chunks = rbr_p->num_blocks; 409444961713Sgirish #endif 409544961713Sgirish size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 409644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 409752ccf843Smisaki " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 409852ccf843Smisaki "tnblocks %d (max %d) size ptrs %d ", 409952ccf843Smisaki rbr_p->rdc, num_chunks, 410052ccf843Smisaki rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 410144961713Sgirish 410244961713Sgirish for (i = 0; i < rbr_p->tnblocks; i++) { 410344961713Sgirish rx_msg_p = rx_msg_ring[i]; 410444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 410552ccf843Smisaki " nxge_unmap_rxdma_channel_buf_ring: " 410652ccf843Smisaki "rx_msg_p $%p", 410752ccf843Smisaki rx_msg_p)); 410844961713Sgirish if (rx_msg_p != NULL) { 410914ea4bb7Ssd freeb(rx_msg_p->rx_mblk_p); 411044961713Sgirish rx_msg_ring[i] = NULL; 411144961713Sgirish } 411244961713Sgirish } 411344961713Sgirish 4114007969e0Stm /* 4115007969e0Stm * We no longer may use the mutex <post_lock>. By setting 4116007969e0Stm * <rbr_state> to anything but POSTING, we prevent 4117007969e0Stm * nxge_post_page() from accessing a dead mutex. 4118007969e0Stm */ 4119007969e0Stm rbr_p->rbr_state = RBR_UNMAPPING; 412044961713Sgirish MUTEX_DESTROY(&rbr_p->post_lock); 4121007969e0Stm 412244961713Sgirish MUTEX_DESTROY(&rbr_p->lock); 4123007969e0Stm 4124007969e0Stm if (rbr_p->rbr_ref_cnt == 0) { 4125678453a8Sspeer /* 4126678453a8Sspeer * This is the normal state of affairs. 4127678453a8Sspeer * Need to free the following buffers: 4128678453a8Sspeer * - data buffers 4129678453a8Sspeer * - rx_msg ring 4130678453a8Sspeer * - ring_info 4131678453a8Sspeer * - rbr ring 4132678453a8Sspeer */ 4133678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 4134678453a8Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4135678453a8Sspeer nxge_rxdma_databuf_free(rbr_p); 4136678453a8Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4137678453a8Sspeer KMEM_FREE(rx_msg_ring, size); 4138007969e0Stm KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4139007969e0Stm } else { 4140007969e0Stm /* 4141007969e0Stm * Some of our buffers are still being used. 4142007969e0Stm * Therefore, tell nxge_freeb() this ring is 4143007969e0Stm * unmapped, so it may free <rbr_p> for us. 4144007969e0Stm */ 4145007969e0Stm rbr_p->rbr_state = RBR_UNMAPPED; 4146007969e0Stm NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4147007969e0Stm "unmap_rxdma_buf_ring: %d %s outstanding.", 4148007969e0Stm rbr_p->rbr_ref_cnt, 4149007969e0Stm rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4150007969e0Stm } 415144961713Sgirish 415244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 415352ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring")); 415444961713Sgirish } 415544961713Sgirish 4156678453a8Sspeer /* 4157678453a8Sspeer * nxge_rxdma_hw_start_common 4158678453a8Sspeer * 4159678453a8Sspeer * Arguments: 4160678453a8Sspeer * nxgep 4161678453a8Sspeer * 4162678453a8Sspeer * Notes: 4163678453a8Sspeer * 4164678453a8Sspeer * NPI/NXGE function calls: 4165678453a8Sspeer * nxge_init_fzc_rx_common(); 4166678453a8Sspeer * nxge_init_fzc_rxdma_port(); 4167678453a8Sspeer * 4168678453a8Sspeer * Registers accessed: 4169678453a8Sspeer * 4170678453a8Sspeer * Context: 4171678453a8Sspeer * Service domain 4172678453a8Sspeer */ 417344961713Sgirish static nxge_status_t 417444961713Sgirish nxge_rxdma_hw_start_common(p_nxge_t nxgep) 417544961713Sgirish { 417644961713Sgirish nxge_status_t status = NXGE_OK; 417744961713Sgirish 417844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 417944961713Sgirish 418044961713Sgirish /* 418144961713Sgirish * Load the sharable parameters by writing to the 418244961713Sgirish * function zero control registers. These FZC registers 418344961713Sgirish * should be initialized only once for the entire chip. 418444961713Sgirish */ 418544961713Sgirish (void) nxge_init_fzc_rx_common(nxgep); 418644961713Sgirish 418744961713Sgirish /* 418844961713Sgirish * Initialize the RXDMA port specific FZC control configurations. 418944961713Sgirish * These FZC registers are pertaining to each port. 419044961713Sgirish */ 419144961713Sgirish (void) nxge_init_fzc_rxdma_port(nxgep); 419244961713Sgirish 419344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 419444961713Sgirish 419544961713Sgirish return (status); 419644961713Sgirish } 419744961713Sgirish 419844961713Sgirish static nxge_status_t 4199678453a8Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 420044961713Sgirish { 420144961713Sgirish int i, ndmas; 420244961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 420344961713Sgirish p_rx_rbr_ring_t *rbr_rings; 420444961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 420544961713Sgirish p_rx_rcr_ring_t *rcr_rings; 420644961713Sgirish p_rx_mbox_areas_t rx_mbox_areas_p; 420744961713Sgirish p_rx_mbox_t *rx_mbox_p; 420844961713Sgirish nxge_status_t status = NXGE_OK; 420944961713Sgirish 421044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 421144961713Sgirish 421244961713Sgirish rx_rbr_rings = nxgep->rx_rbr_rings; 421344961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 421444961713Sgirish if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 421544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 421652ccf843Smisaki "<== nxge_rxdma_hw_start: NULL ring pointers")); 421744961713Sgirish return (NXGE_ERROR); 421844961713Sgirish } 421944961713Sgirish ndmas = rx_rbr_rings->ndmas; 422044961713Sgirish if (ndmas == 0) { 422144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 422252ccf843Smisaki "<== nxge_rxdma_hw_start: no dma channel allocated")); 422344961713Sgirish return (NXGE_ERROR); 422444961713Sgirish } 422544961713Sgirish 422644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 422752ccf843Smisaki "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 422844961713Sgirish 422944961713Sgirish rbr_rings = rx_rbr_rings->rbr_rings; 423044961713Sgirish rcr_rings = rx_rcr_rings->rcr_rings; 423144961713Sgirish rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 423244961713Sgirish if (rx_mbox_areas_p) { 423344961713Sgirish rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 423444961713Sgirish } 423544961713Sgirish 4236678453a8Sspeer i = channel; 4237678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 423852ccf843Smisaki "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 423952ccf843Smisaki ndmas, channel)); 4240678453a8Sspeer status = nxge_rxdma_start_channel(nxgep, channel, 4241678453a8Sspeer (p_rx_rbr_ring_t)rbr_rings[i], 4242678453a8Sspeer (p_rx_rcr_ring_t)rcr_rings[i], 4243678453a8Sspeer (p_rx_mbox_t)rx_mbox_p[i]); 4244678453a8Sspeer if (status != NXGE_OK) { 4245678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4246678453a8Sspeer "==> nxge_rxdma_hw_start: disable " 4247678453a8Sspeer "(status 0x%x channel %d)", status, channel)); 4248678453a8Sspeer return (status); 424944961713Sgirish } 425044961713Sgirish 425144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 425252ccf843Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 425352ccf843Smisaki rx_rbr_rings, rx_rcr_rings)); 425444961713Sgirish 425544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 425652ccf843Smisaki "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 425744961713Sgirish 425844961713Sgirish return (status); 425944961713Sgirish } 426044961713Sgirish 426144961713Sgirish static void 4262678453a8Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 426344961713Sgirish { 426444961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 426544961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 426644961713Sgirish 426744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 426844961713Sgirish 426944961713Sgirish rx_rbr_rings = nxgep->rx_rbr_rings; 427044961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 427144961713Sgirish if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 427244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 427352ccf843Smisaki "<== nxge_rxdma_hw_stop: NULL ring pointers")); 427444961713Sgirish return; 427544961713Sgirish } 427644961713Sgirish 427744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 427852ccf843Smisaki "==> nxge_rxdma_hw_stop(channel %d)", 427952ccf843Smisaki channel)); 4280678453a8Sspeer (void) nxge_rxdma_stop_channel(nxgep, channel); 428144961713Sgirish 428244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 428352ccf843Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 428452ccf843Smisaki rx_rbr_rings, rx_rcr_rings)); 428544961713Sgirish 428644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 428744961713Sgirish } 428844961713Sgirish 428944961713Sgirish 429044961713Sgirish static nxge_status_t 429144961713Sgirish nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 429244961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 429344961713Sgirish 429444961713Sgirish { 429544961713Sgirish npi_handle_t handle; 429644961713Sgirish npi_status_t rs = NPI_SUCCESS; 429744961713Sgirish rx_dma_ctl_stat_t cs; 429844961713Sgirish rx_dma_ent_msk_t ent_mask; 429944961713Sgirish nxge_status_t status = NXGE_OK; 430044961713Sgirish 430144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 430244961713Sgirish 430344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 430444961713Sgirish 430544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 430644961713Sgirish "npi handle addr $%p acc $%p", 430744961713Sgirish nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 430844961713Sgirish 4309678453a8Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 4310678453a8Sspeer if (!isLDOMguest(nxgep)) { 4311678453a8Sspeer rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4312678453a8Sspeer if (rs != NPI_SUCCESS) { 4313678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4314678453a8Sspeer "==> nxge_init_fzc_rdc: " 4315678453a8Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4316678453a8Sspeer channel, rs)); 4317678453a8Sspeer return (NXGE_ERROR | rs); 4318678453a8Sspeer } 4319678453a8Sspeer 4320678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4321678453a8Sspeer "==> nxge_rxdma_start_channel: reset done: channel %d", 4322678453a8Sspeer channel)); 432344961713Sgirish } 432444961713Sgirish 4325678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4326678453a8Sspeer if (isLDOMguest(nxgep)) 4327678453a8Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 4328678453a8Sspeer #endif 432944961713Sgirish 433044961713Sgirish /* 433144961713Sgirish * Initialize the RXDMA channel specific FZC control 433244961713Sgirish * configurations. These FZC registers are pertaining 433344961713Sgirish * to each RX channel (logical pages). 433444961713Sgirish */ 4335678453a8Sspeer if (!isLDOMguest(nxgep)) { 4336678453a8Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4337678453a8Sspeer if (status != NXGE_OK) { 4338678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4339678453a8Sspeer "==> nxge_rxdma_start_channel: " 4340678453a8Sspeer "init fzc rxdma failed (0x%08x channel %d)", 4341678453a8Sspeer status, channel)); 4342678453a8Sspeer return (status); 4343678453a8Sspeer } 434444961713Sgirish 4345678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4346678453a8Sspeer "==> nxge_rxdma_start_channel: fzc done")); 4347678453a8Sspeer } 434844961713Sgirish 434944961713Sgirish /* Set up the interrupt event masks. */ 435044961713Sgirish ent_mask.value = 0; 435144961713Sgirish ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 435244961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4353678453a8Sspeer &ent_mask); 435444961713Sgirish if (rs != NPI_SUCCESS) { 435544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 435644961713Sgirish "==> nxge_rxdma_start_channel: " 4357678453a8Sspeer "init rxdma event masks failed " 4358678453a8Sspeer "(0x%08x channel %d)", 435944961713Sgirish status, channel)); 436044961713Sgirish return (NXGE_ERROR | rs); 436144961713Sgirish } 436244961713Sgirish 4363678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4364678453a8Sspeer "==> nxge_rxdma_start_channel: " 436544961713Sgirish "event done: channel %d (mask 0x%016llx)", 436644961713Sgirish channel, ent_mask.value)); 436744961713Sgirish 436844961713Sgirish /* Initialize the receive DMA control and status register */ 436944961713Sgirish cs.value = 0; 437044961713Sgirish cs.bits.hdw.mex = 1; 437144961713Sgirish cs.bits.hdw.rcrthres = 1; 437244961713Sgirish cs.bits.hdw.rcrto = 1; 437344961713Sgirish cs.bits.hdw.rbr_empty = 1; 437444961713Sgirish status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 437544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 437644961713Sgirish "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 437744961713Sgirish if (status != NXGE_OK) { 437844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 437944961713Sgirish "==> nxge_rxdma_start_channel: " 438044961713Sgirish "init rxdma control register failed (0x%08x channel %d", 438144961713Sgirish status, channel)); 438244961713Sgirish return (status); 438344961713Sgirish } 438444961713Sgirish 438544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 438644961713Sgirish "control done - channel %d cs 0x%016llx", channel, cs.value)); 438744961713Sgirish 438844961713Sgirish /* 438944961713Sgirish * Load RXDMA descriptors, buffers, mailbox, 439044961713Sgirish * initialise the receive DMA channels and 439144961713Sgirish * enable each DMA channel. 439244961713Sgirish */ 439344961713Sgirish status = nxge_enable_rxdma_channel(nxgep, 4394678453a8Sspeer channel, rbr_p, rcr_p, mbox_p); 439544961713Sgirish 439644961713Sgirish if (status != NXGE_OK) { 439744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4398678453a8Sspeer " nxge_rxdma_start_channel: " 4399678453a8Sspeer " enable rxdma failed (0x%08x channel %d)", 4400678453a8Sspeer status, channel)); 440144961713Sgirish return (status); 440244961713Sgirish } 440344961713Sgirish 4404678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4405678453a8Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 4406678453a8Sspeer 4407678453a8Sspeer if (isLDOMguest(nxgep)) { 4408678453a8Sspeer /* Add interrupt handler for this channel. */ 4409678453a8Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4410678453a8Sspeer != NXGE_OK) { 4411678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4412678453a8Sspeer " nxge_rxdma_start_channel: " 4413678453a8Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 4414678453a8Sspeer status, channel)); 4415678453a8Sspeer } 4416678453a8Sspeer } 4417678453a8Sspeer 441844961713Sgirish ent_mask.value = 0; 441944961713Sgirish ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 442044961713Sgirish RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 442144961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 442244961713Sgirish &ent_mask); 442344961713Sgirish if (rs != NPI_SUCCESS) { 442444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 442544961713Sgirish "==> nxge_rxdma_start_channel: " 442644961713Sgirish "init rxdma event masks failed (0x%08x channel %d)", 442744961713Sgirish status, channel)); 442844961713Sgirish return (NXGE_ERROR | rs); 442944961713Sgirish } 443044961713Sgirish 443144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 443244961713Sgirish "control done - channel %d cs 0x%016llx", channel, cs.value)); 443344961713Sgirish 443444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 443544961713Sgirish 443644961713Sgirish return (NXGE_OK); 443744961713Sgirish } 443844961713Sgirish 443944961713Sgirish static nxge_status_t 444044961713Sgirish nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 444144961713Sgirish { 444244961713Sgirish npi_handle_t handle; 444344961713Sgirish npi_status_t rs = NPI_SUCCESS; 444444961713Sgirish rx_dma_ctl_stat_t cs; 444544961713Sgirish rx_dma_ent_msk_t ent_mask; 444644961713Sgirish nxge_status_t status = NXGE_OK; 444744961713Sgirish 444844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 444944961713Sgirish 445044961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 445144961713Sgirish 445244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 445352ccf843Smisaki "npi handle addr $%p acc $%p", 445452ccf843Smisaki nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 445544961713Sgirish 4456330cd344SMichael Speer if (!isLDOMguest(nxgep)) { 4457330cd344SMichael Speer /* 4458330cd344SMichael Speer * Stop RxMAC = A.9.2.6 4459330cd344SMichael Speer */ 4460330cd344SMichael Speer if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4461330cd344SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4462330cd344SMichael Speer "nxge_rxdma_stop_channel: " 4463330cd344SMichael Speer "Failed to disable RxMAC")); 4464330cd344SMichael Speer } 4465330cd344SMichael Speer 4466330cd344SMichael Speer /* 4467330cd344SMichael Speer * Drain IPP Port = A.9.3.6 4468330cd344SMichael Speer */ 4469330cd344SMichael Speer (void) nxge_ipp_drain(nxgep); 4470330cd344SMichael Speer } 4471330cd344SMichael Speer 447244961713Sgirish /* Reset RXDMA channel */ 447344961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 447444961713Sgirish if (rs != NPI_SUCCESS) { 447544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 447652ccf843Smisaki " nxge_rxdma_stop_channel: " 447752ccf843Smisaki " reset rxdma failed (0x%08x channel %d)", 447852ccf843Smisaki rs, channel)); 447944961713Sgirish return (NXGE_ERROR | rs); 448044961713Sgirish } 448144961713Sgirish 448244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 448352ccf843Smisaki "==> nxge_rxdma_stop_channel: reset done")); 448444961713Sgirish 448544961713Sgirish /* Set up the interrupt event masks. */ 448644961713Sgirish ent_mask.value = RX_DMA_ENT_MSK_ALL; 448744961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 448852ccf843Smisaki &ent_mask); 448944961713Sgirish if (rs != NPI_SUCCESS) { 449044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 449152ccf843Smisaki "==> nxge_rxdma_stop_channel: " 449252ccf843Smisaki "set rxdma event masks failed (0x%08x channel %d)", 449352ccf843Smisaki rs, channel)); 449444961713Sgirish return (NXGE_ERROR | rs); 449544961713Sgirish } 449644961713Sgirish 449744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 449852ccf843Smisaki "==> nxge_rxdma_stop_channel: event done")); 449944961713Sgirish 4500330cd344SMichael Speer /* 4501330cd344SMichael Speer * Initialize the receive DMA control and status register 4502330cd344SMichael Speer */ 450344961713Sgirish cs.value = 0; 4504330cd344SMichael Speer status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 450544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 450652ccf843Smisaki " to default (all 0s) 0x%08x", cs.value)); 450744961713Sgirish if (status != NXGE_OK) { 450844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 450952ccf843Smisaki " nxge_rxdma_stop_channel: init rxdma" 451052ccf843Smisaki " control register failed (0x%08x channel %d", 451152ccf843Smisaki status, channel)); 451244961713Sgirish return (status); 451344961713Sgirish } 451444961713Sgirish 451544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 451652ccf843Smisaki "==> nxge_rxdma_stop_channel: control done")); 451744961713Sgirish 4518330cd344SMichael Speer /* 4519330cd344SMichael Speer * Make sure channel is disabled. 4520330cd344SMichael Speer */ 452144961713Sgirish status = nxge_disable_rxdma_channel(nxgep, channel); 4522da14cebeSEric Cheng 452344961713Sgirish if (status != NXGE_OK) { 452444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 452552ccf843Smisaki " nxge_rxdma_stop_channel: " 452652ccf843Smisaki " init enable rxdma failed (0x%08x channel %d)", 452752ccf843Smisaki status, channel)); 452844961713Sgirish return (status); 452944961713Sgirish } 453044961713Sgirish 4531330cd344SMichael Speer if (!isLDOMguest(nxgep)) { 4532330cd344SMichael Speer /* 4533330cd344SMichael Speer * Enable RxMAC = A.9.2.10 4534330cd344SMichael Speer */ 4535330cd344SMichael Speer if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4536330cd344SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4537330cd344SMichael Speer "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4538330cd344SMichael Speer } 4539330cd344SMichael Speer } 4540330cd344SMichael Speer 454144961713Sgirish NXGE_DEBUG_MSG((nxgep, 454252ccf843Smisaki RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 454344961713Sgirish 454444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 454544961713Sgirish 454644961713Sgirish return (NXGE_OK); 454744961713Sgirish } 454844961713Sgirish 454944961713Sgirish nxge_status_t 455044961713Sgirish nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 455144961713Sgirish { 455244961713Sgirish npi_handle_t handle; 455344961713Sgirish p_nxge_rdc_sys_stats_t statsp; 455444961713Sgirish rx_ctl_dat_fifo_stat_t stat; 455544961713Sgirish uint32_t zcp_err_status; 455644961713Sgirish uint32_t ipp_err_status; 455744961713Sgirish nxge_status_t status = NXGE_OK; 455844961713Sgirish npi_status_t rs = NPI_SUCCESS; 455944961713Sgirish boolean_t my_err = B_FALSE; 456044961713Sgirish 456144961713Sgirish handle = nxgep->npi_handle; 456244961713Sgirish statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 456344961713Sgirish 456444961713Sgirish rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 456544961713Sgirish 456644961713Sgirish if (rs != NPI_SUCCESS) 456744961713Sgirish return (NXGE_ERROR | rs); 456844961713Sgirish 456944961713Sgirish if (stat.bits.ldw.id_mismatch) { 457044961713Sgirish statsp->id_mismatch++; 457144961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 457252ccf843Smisaki NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 457344961713Sgirish /* Global fatal error encountered */ 457444961713Sgirish } 457544961713Sgirish 457644961713Sgirish if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 457744961713Sgirish switch (nxgep->mac.portnum) { 457844961713Sgirish case 0: 457944961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 458052ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 458144961713Sgirish my_err = B_TRUE; 458244961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 458344961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 458444961713Sgirish } 458544961713Sgirish break; 458644961713Sgirish case 1: 458744961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 458852ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 458944961713Sgirish my_err = B_TRUE; 459044961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 459144961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 459244961713Sgirish } 459344961713Sgirish break; 459444961713Sgirish case 2: 459544961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 459652ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 459744961713Sgirish my_err = B_TRUE; 459844961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 459944961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 460044961713Sgirish } 460144961713Sgirish break; 460244961713Sgirish case 3: 460344961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 460452ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 460544961713Sgirish my_err = B_TRUE; 460644961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 460744961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 460844961713Sgirish } 460944961713Sgirish break; 461044961713Sgirish default: 461144961713Sgirish return (NXGE_ERROR); 461244961713Sgirish } 461344961713Sgirish } 461444961713Sgirish 461544961713Sgirish if (my_err) { 461644961713Sgirish status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 461752ccf843Smisaki zcp_err_status); 461844961713Sgirish if (status != NXGE_OK) 461944961713Sgirish return (status); 462044961713Sgirish } 462144961713Sgirish 462244961713Sgirish return (NXGE_OK); 462344961713Sgirish } 462444961713Sgirish 462544961713Sgirish static nxge_status_t 462644961713Sgirish nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 462744961713Sgirish uint32_t zcp_status) 462844961713Sgirish { 462944961713Sgirish boolean_t rxport_fatal = B_FALSE; 463044961713Sgirish p_nxge_rdc_sys_stats_t statsp; 463144961713Sgirish nxge_status_t status = NXGE_OK; 463244961713Sgirish uint8_t portn; 463344961713Sgirish 463444961713Sgirish portn = nxgep->mac.portnum; 463544961713Sgirish statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 463644961713Sgirish 463744961713Sgirish if (ipp_status & (0x1 << portn)) { 463844961713Sgirish statsp->ipp_eop_err++; 463944961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 464052ccf843Smisaki NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 464144961713Sgirish rxport_fatal = B_TRUE; 464244961713Sgirish } 464344961713Sgirish 464444961713Sgirish if (zcp_status & (0x1 << portn)) { 464544961713Sgirish statsp->zcp_eop_err++; 464644961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 464752ccf843Smisaki NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 464844961713Sgirish rxport_fatal = B_TRUE; 464944961713Sgirish } 465044961713Sgirish 465144961713Sgirish if (rxport_fatal) { 465244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 465352ccf843Smisaki " nxge_rxdma_handle_port_error: " 465452ccf843Smisaki " fatal error on Port #%d\n", 465552ccf843Smisaki portn)); 465644961713Sgirish status = nxge_rx_port_fatal_err_recover(nxgep); 465744961713Sgirish if (status == NXGE_OK) { 465844961713Sgirish FM_SERVICE_RESTORED(nxgep); 465944961713Sgirish } 466044961713Sgirish } 466144961713Sgirish 466244961713Sgirish return (status); 466344961713Sgirish } 466444961713Sgirish 466544961713Sgirish static nxge_status_t 466644961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 466744961713Sgirish { 466844961713Sgirish npi_handle_t handle; 466944961713Sgirish npi_status_t rs = NPI_SUCCESS; 467044961713Sgirish nxge_status_t status = NXGE_OK; 467144961713Sgirish p_rx_rbr_ring_t rbrp; 467244961713Sgirish p_rx_rcr_ring_t rcrp; 467344961713Sgirish p_rx_mbox_t mboxp; 467444961713Sgirish rx_dma_ent_msk_t ent_mask; 467544961713Sgirish p_nxge_dma_common_t dmap; 467644961713Sgirish int ring_idx; 467744961713Sgirish uint32_t ref_cnt; 467844961713Sgirish p_rx_msg_t rx_msg_p; 467944961713Sgirish int i; 468044961713Sgirish uint32_t nxge_port_rcr_size; 468144961713Sgirish 468244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 468344961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 468452ccf843Smisaki "Recovering from RxDMAChannel#%d error...", channel)); 468544961713Sgirish 468644961713Sgirish /* 468744961713Sgirish * Stop the dma channel waits for the stop done. 468844961713Sgirish * If the stop done bit is not set, then create 468944961713Sgirish * an error. 469044961713Sgirish */ 469144961713Sgirish 469244961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 469344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 469444961713Sgirish 469544961713Sgirish ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 469644961713Sgirish rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 469744961713Sgirish rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 469844961713Sgirish 469944961713Sgirish MUTEX_ENTER(&rcrp->lock); 470044961713Sgirish MUTEX_ENTER(&rbrp->lock); 470144961713Sgirish MUTEX_ENTER(&rbrp->post_lock); 470244961713Sgirish 470344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 470444961713Sgirish 470544961713Sgirish rs = npi_rxdma_cfg_rdc_disable(handle, channel); 470644961713Sgirish if (rs != NPI_SUCCESS) { 470744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 470852ccf843Smisaki "nxge_disable_rxdma_channel:failed")); 470944961713Sgirish goto fail; 471044961713Sgirish } 471144961713Sgirish 471244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 471344961713Sgirish 471444961713Sgirish /* Disable interrupt */ 471544961713Sgirish ent_mask.value = RX_DMA_ENT_MSK_ALL; 471644961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 471744961713Sgirish if (rs != NPI_SUCCESS) { 471844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 471952ccf843Smisaki "nxge_rxdma_stop_channel: " 472052ccf843Smisaki "set rxdma event masks failed (channel %d)", 472152ccf843Smisaki channel)); 472244961713Sgirish } 472344961713Sgirish 472444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 472544961713Sgirish 472644961713Sgirish /* Reset RXDMA channel */ 472744961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 472844961713Sgirish if (rs != NPI_SUCCESS) { 472944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 473052ccf843Smisaki "nxge_rxdma_fatal_err_recover: " 473152ccf843Smisaki " reset rxdma failed (channel %d)", channel)); 473244961713Sgirish goto fail; 473344961713Sgirish } 473444961713Sgirish 473544961713Sgirish nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 473644961713Sgirish 473744961713Sgirish mboxp = 473852ccf843Smisaki (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 473944961713Sgirish 474044961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 474144961713Sgirish rbrp->rbr_rd_index = 0; 474244961713Sgirish 474344961713Sgirish rcrp->comp_rd_index = 0; 474444961713Sgirish rcrp->comp_wt_index = 0; 474544961713Sgirish rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 474652ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4747adfcba55Sjoycey #if defined(__i386) 474852ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 474952ccf843Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4750adfcba55Sjoycey #else 475152ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 475252ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4753adfcba55Sjoycey #endif 475444961713Sgirish 475544961713Sgirish rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 475652ccf843Smisaki (nxge_port_rcr_size - 1); 475744961713Sgirish rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 475852ccf843Smisaki (nxge_port_rcr_size - 1); 475944961713Sgirish 476044961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 476144961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 476244961713Sgirish 476344961713Sgirish cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 476444961713Sgirish 476544961713Sgirish for (i = 0; i < rbrp->rbr_max_size; i++) { 476644961713Sgirish rx_msg_p = rbrp->rx_msg_ring[i]; 476744961713Sgirish ref_cnt = rx_msg_p->ref_cnt; 476844961713Sgirish if (ref_cnt != 1) { 4769a3c5bd6dSspeer if (rx_msg_p->cur_usage_cnt != 477052ccf843Smisaki rx_msg_p->max_usage_cnt) { 477144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 477252ccf843Smisaki "buf[%d]: cur_usage_cnt = %d " 477352ccf843Smisaki "max_usage_cnt = %d\n", i, 477452ccf843Smisaki rx_msg_p->cur_usage_cnt, 477552ccf843Smisaki rx_msg_p->max_usage_cnt)); 4776a3c5bd6dSspeer } else { 4777a3c5bd6dSspeer /* Buffer can be re-posted */ 4778a3c5bd6dSspeer rx_msg_p->free = B_TRUE; 4779a3c5bd6dSspeer rx_msg_p->cur_usage_cnt = 0; 4780a3c5bd6dSspeer rx_msg_p->max_usage_cnt = 0xbaddcafe; 4781a3c5bd6dSspeer rx_msg_p->pkt_buf_size = 0; 4782a3c5bd6dSspeer } 478344961713Sgirish } 478444961713Sgirish } 478544961713Sgirish 478644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 478744961713Sgirish 478844961713Sgirish status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 478944961713Sgirish if (status != NXGE_OK) { 479044961713Sgirish goto fail; 479144961713Sgirish } 479244961713Sgirish 479344961713Sgirish MUTEX_EXIT(&rbrp->post_lock); 479444961713Sgirish MUTEX_EXIT(&rbrp->lock); 479544961713Sgirish MUTEX_EXIT(&rcrp->lock); 479644961713Sgirish 479744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 479852ccf843Smisaki "Recovery Successful, RxDMAChannel#%d Restored", 479952ccf843Smisaki channel)); 480044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 480144961713Sgirish 480244961713Sgirish return (NXGE_OK); 480344961713Sgirish fail: 480444961713Sgirish MUTEX_EXIT(&rbrp->post_lock); 480544961713Sgirish MUTEX_EXIT(&rbrp->lock); 480644961713Sgirish MUTEX_EXIT(&rcrp->lock); 480744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 480844961713Sgirish 480944961713Sgirish return (NXGE_ERROR | rs); 481044961713Sgirish } 481144961713Sgirish 481244961713Sgirish nxge_status_t 481344961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 481444961713Sgirish { 4815678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 4816678453a8Sspeer nxge_status_t status = NXGE_OK; 4817678453a8Sspeer int rdc; 481844961713Sgirish 481944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 482044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 482152ccf843Smisaki "Recovering from RxPort error...")); 4822678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 482344961713Sgirish 482444961713Sgirish if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 482544961713Sgirish goto fail; 482644961713Sgirish 482744961713Sgirish NXGE_DELAY(1000); 482844961713Sgirish 4829678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 483044961713Sgirish 4831678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4832678453a8Sspeer if ((1 << rdc) & set->owned.map) { 4833678453a8Sspeer if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4834678453a8Sspeer != NXGE_OK) { 4835678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4836678453a8Sspeer "Could not recover channel %d", rdc)); 4837678453a8Sspeer } 483844961713Sgirish } 483944961713Sgirish } 484044961713Sgirish 4841678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 484244961713Sgirish 484344961713Sgirish /* Reset IPP */ 484444961713Sgirish if (nxge_ipp_reset(nxgep) != NXGE_OK) { 484544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 484652ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 484752ccf843Smisaki "Failed to reset IPP")); 484844961713Sgirish goto fail; 484944961713Sgirish } 485044961713Sgirish 485144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 485244961713Sgirish 485344961713Sgirish /* Reset RxMAC */ 485444961713Sgirish if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 485544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 485652ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 485752ccf843Smisaki "Failed to reset RxMAC")); 485844961713Sgirish goto fail; 485944961713Sgirish } 486044961713Sgirish 486144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 486244961713Sgirish 486344961713Sgirish /* Re-Initialize IPP */ 486444961713Sgirish if (nxge_ipp_init(nxgep) != NXGE_OK) { 486544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 486652ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 486752ccf843Smisaki "Failed to init IPP")); 486844961713Sgirish goto fail; 486944961713Sgirish } 487044961713Sgirish 487144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 487244961713Sgirish 487344961713Sgirish /* Re-Initialize RxMAC */ 487444961713Sgirish if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 487544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 487652ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 487752ccf843Smisaki "Failed to reset RxMAC")); 487844961713Sgirish goto fail; 487944961713Sgirish } 488044961713Sgirish 488144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 488244961713Sgirish 488344961713Sgirish /* Re-enable RxMAC */ 488444961713Sgirish if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 488544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 488652ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 488752ccf843Smisaki "Failed to enable RxMAC")); 488844961713Sgirish goto fail; 488944961713Sgirish } 489044961713Sgirish 489144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 489252ccf843Smisaki "Recovery Successful, RxPort Restored")); 489344961713Sgirish 489444961713Sgirish return (NXGE_OK); 489544961713Sgirish fail: 489644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 489744961713Sgirish return (status); 489844961713Sgirish } 489944961713Sgirish 490044961713Sgirish void 490144961713Sgirish nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 490244961713Sgirish { 490344961713Sgirish rx_dma_ctl_stat_t cs; 490444961713Sgirish rx_ctl_dat_fifo_stat_t cdfs; 490544961713Sgirish 490644961713Sgirish switch (err_id) { 490744961713Sgirish case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 490844961713Sgirish case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 490944961713Sgirish case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 491044961713Sgirish case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 491144961713Sgirish case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 491244961713Sgirish case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 491344961713Sgirish case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 491444961713Sgirish case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 491544961713Sgirish case NXGE_FM_EREPORT_RDMC_RCRINCON: 491644961713Sgirish case NXGE_FM_EREPORT_RDMC_RCRFULL: 491744961713Sgirish case NXGE_FM_EREPORT_RDMC_RBRFULL: 491844961713Sgirish case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 491944961713Sgirish case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 492044961713Sgirish case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 492144961713Sgirish RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 492252ccf843Smisaki chan, &cs.value); 492344961713Sgirish if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 492444961713Sgirish cs.bits.hdw.rcr_ack_err = 1; 492544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 492644961713Sgirish cs.bits.hdw.dc_fifo_err = 1; 492744961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 492844961713Sgirish cs.bits.hdw.rcr_sha_par = 1; 492944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 493044961713Sgirish cs.bits.hdw.rbr_pre_par = 1; 493144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 493244961713Sgirish cs.bits.hdw.rbr_tmout = 1; 493344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 493444961713Sgirish cs.bits.hdw.rsp_cnt_err = 1; 493544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 493644961713Sgirish cs.bits.hdw.byte_en_bus = 1; 493744961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 493844961713Sgirish cs.bits.hdw.rsp_dat_err = 1; 493944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 494044961713Sgirish cs.bits.hdw.config_err = 1; 494144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 494244961713Sgirish cs.bits.hdw.rcrincon = 1; 494344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 494444961713Sgirish cs.bits.hdw.rcrfull = 1; 494544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 494644961713Sgirish cs.bits.hdw.rbrfull = 1; 494744961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 494844961713Sgirish cs.bits.hdw.rbrlogpage = 1; 494944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 495044961713Sgirish cs.bits.hdw.cfiglogpage = 1; 4951adfcba55Sjoycey #if defined(__i386) 4952adfcba55Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 495352ccf843Smisaki cs.value); 4954adfcba55Sjoycey #else 495544961713Sgirish cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 495652ccf843Smisaki cs.value); 4957adfcba55Sjoycey #endif 495844961713Sgirish RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 495952ccf843Smisaki chan, cs.value); 496044961713Sgirish break; 496144961713Sgirish case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 496244961713Sgirish case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 496344961713Sgirish case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 496444961713Sgirish cdfs.value = 0; 496544961713Sgirish if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 496644961713Sgirish cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 496744961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 496844961713Sgirish cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 496944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 497044961713Sgirish cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4971adfcba55Sjoycey #if defined(__i386) 4972adfcba55Sjoycey cmn_err(CE_NOTE, 497352ccf843Smisaki "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 497452ccf843Smisaki cdfs.value); 4975adfcba55Sjoycey #else 497644961713Sgirish cmn_err(CE_NOTE, 497752ccf843Smisaki "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 497852ccf843Smisaki cdfs.value); 4979adfcba55Sjoycey #endif 4980678453a8Sspeer NXGE_REG_WR64(nxgep->npi_handle, 4981678453a8Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 498244961713Sgirish break; 498344961713Sgirish case NXGE_FM_EREPORT_RDMC_DCF_ERR: 498444961713Sgirish break; 498553f3d8ecSyc case NXGE_FM_EREPORT_RDMC_RCR_ERR: 498644961713Sgirish break; 498744961713Sgirish } 498844961713Sgirish } 4989678453a8Sspeer 4990678453a8Sspeer static void 4991678453a8Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4992678453a8Sspeer { 4993678453a8Sspeer rxring_info_t *ring_info; 4994678453a8Sspeer int index; 4995678453a8Sspeer uint32_t chunk_size; 4996678453a8Sspeer uint64_t kaddr; 4997678453a8Sspeer uint_t num_blocks; 4998678453a8Sspeer 4999678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 5000678453a8Sspeer 5001678453a8Sspeer if (rbr_p == NULL) { 5002678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5003678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 5004678453a8Sspeer return; 5005678453a8Sspeer } 5006678453a8Sspeer 5007678453a8Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 5008*e759c33aSMichael Speer NXGE_DEBUG_MSG((NULL, DMA_CTL, 5009*e759c33aSMichael Speer "<== nxge_rxdma_databuf_free: DDI")); 5010678453a8Sspeer return; 5011678453a8Sspeer } 5012678453a8Sspeer 5013678453a8Sspeer ring_info = rbr_p->ring_info; 5014678453a8Sspeer if (ring_info == NULL) { 5015678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5016678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 5017678453a8Sspeer return; 5018678453a8Sspeer } 5019678453a8Sspeer num_blocks = rbr_p->num_blocks; 5020678453a8Sspeer for (index = 0; index < num_blocks; index++) { 5021678453a8Sspeer kaddr = ring_info->buffer[index].kaddr; 5022678453a8Sspeer chunk_size = ring_info->buffer[index].buf_size; 5023678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 5024678453a8Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 5025678453a8Sspeer "kaddrp $%p chunk size %d", 5026678453a8Sspeer index, kaddr, chunk_size)); 5027678453a8Sspeer if (kaddr == NULL) continue; 5028678453a8Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 5029678453a8Sspeer ring_info->buffer[index].kaddr = NULL; 5030678453a8Sspeer } 5031678453a8Sspeer 5032678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 5033678453a8Sspeer } 5034678453a8Sspeer 5035678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5036678453a8Sspeer extern void contig_mem_free(void *, size_t); 5037678453a8Sspeer #endif 5038678453a8Sspeer 5039678453a8Sspeer void 5040678453a8Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 5041678453a8Sspeer { 5042678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 5043678453a8Sspeer 5044678453a8Sspeer if (kaddr == NULL || !buf_size) { 5045678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5046678453a8Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 5047678453a8Sspeer kaddr, buf_size)); 5048678453a8Sspeer return; 5049678453a8Sspeer } 5050678453a8Sspeer 5051678453a8Sspeer switch (alloc_type) { 5052678453a8Sspeer case KMEM_ALLOC: 5053678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 5054678453a8Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 5055678453a8Sspeer kaddr, buf_size)); 5056678453a8Sspeer #if defined(__i386) 5057678453a8Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 5058678453a8Sspeer #else 5059678453a8Sspeer KMEM_FREE((void *)kaddr, buf_size); 5060678453a8Sspeer #endif 5061678453a8Sspeer break; 5062678453a8Sspeer 5063678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5064678453a8Sspeer case CONTIG_MEM_ALLOC: 5065678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 5066678453a8Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 5067678453a8Sspeer kaddr, buf_size)); 5068678453a8Sspeer contig_mem_free((void *)kaddr, buf_size); 5069678453a8Sspeer break; 5070678453a8Sspeer #endif 5071678453a8Sspeer 5072678453a8Sspeer default: 5073678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5074678453a8Sspeer "<== nxge_free_buf: unsupported alloc type %d", 5075678453a8Sspeer alloc_type)); 5076678453a8Sspeer return; 5077678453a8Sspeer } 5078678453a8Sspeer 5079678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5080678453a8Sspeer } 5081