144961713Sgirish /* 244961713Sgirish * CDDL HEADER START 344961713Sgirish * 444961713Sgirish * The contents of this file are subject to the terms of the 544961713Sgirish * Common Development and Distribution License (the "License"). 644961713Sgirish * You may not use this file except in compliance with the License. 744961713Sgirish * 844961713Sgirish * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 944961713Sgirish * or http://www.opensolaris.org/os/licensing. 1044961713Sgirish * See the License for the specific language governing permissions 1144961713Sgirish * and limitations under the License. 1244961713Sgirish * 1344961713Sgirish * When distributing Covered Code, include this CDDL HEADER in each 1444961713Sgirish * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1544961713Sgirish * If applicable, add the following below this CDDL HEADER, with the 1644961713Sgirish * fields enclosed by brackets "[]" replaced with your own identifying 1744961713Sgirish * information: Portions Copyright [yyyy] [name of copyright owner] 1844961713Sgirish * 1944961713Sgirish * CDDL HEADER END 2044961713Sgirish */ 2144961713Sgirish /* 223e82a89eSmisaki * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 2344961713Sgirish * Use is subject to license terms. 2444961713Sgirish */ 2544961713Sgirish 2644961713Sgirish #pragma ident "%Z%%M% %I% %E% SMI" 2744961713Sgirish 2844961713Sgirish #include <sys/nxge/nxge_impl.h> 2944961713Sgirish #include <sys/nxge/nxge_rxdma.h> 30678453a8Sspeer #include <sys/nxge/nxge_hio.h> 31678453a8Sspeer 32678453a8Sspeer #if !defined(_BIG_ENDIAN) 33678453a8Sspeer #include <npi_rx_rd32.h> 34678453a8Sspeer #endif 35678453a8Sspeer #include <npi_rx_rd64.h> 36678453a8Sspeer #include <npi_rx_wr64.h> 3744961713Sgirish 3844961713Sgirish #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 39678453a8Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 4044961713Sgirish #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 4144961713Sgirish (rdc + nxgep->pt_config.hw_config.start_rdc) 4244961713Sgirish 4344961713Sgirish /* 4444961713Sgirish * Globals: tunable parameters (/etc/system or adb) 4544961713Sgirish * 4644961713Sgirish */ 4744961713Sgirish extern uint32_t nxge_rbr_size; 4844961713Sgirish extern uint32_t nxge_rcr_size; 4944961713Sgirish extern uint32_t nxge_rbr_spare_size; 5044961713Sgirish 5144961713Sgirish extern uint32_t nxge_mblks_pending; 5244961713Sgirish 5344961713Sgirish /* 5444961713Sgirish * Tunable to reduce the amount of time spent in the 5544961713Sgirish * ISR doing Rx Processing. 5644961713Sgirish */ 5744961713Sgirish extern uint32_t nxge_max_rx_pkts; 5844961713Sgirish boolean_t nxge_jumbo_enable; 5944961713Sgirish 6044961713Sgirish /* 6144961713Sgirish * Tunables to manage the receive buffer blocks. 6244961713Sgirish * 6344961713Sgirish * nxge_rx_threshold_hi: copy all buffers. 6444961713Sgirish * nxge_rx_bcopy_size_type: receive buffer block size type. 6544961713Sgirish * nxge_rx_threshold_lo: copy only up to tunable block size type. 6644961713Sgirish */ 6744961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 6844961713Sgirish extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 6944961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 7044961713Sgirish 71b4d05839Sml extern uint32_t nxge_cksum_offload; 72678453a8Sspeer 73678453a8Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 74678453a8Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 7544961713Sgirish 7644961713Sgirish static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 7744961713Sgirish 78678453a8Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 79678453a8Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 8044961713Sgirish 8144961713Sgirish static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 8244961713Sgirish p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 8344961713Sgirish uint32_t, 8444961713Sgirish p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 8544961713Sgirish p_rx_mbox_t *); 8644961713Sgirish static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 8744961713Sgirish p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 8844961713Sgirish 8944961713Sgirish static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 9044961713Sgirish uint16_t, 9144961713Sgirish p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 9244961713Sgirish p_rx_rcr_ring_t *, p_rx_mbox_t *); 9344961713Sgirish static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 9444961713Sgirish p_rx_rcr_ring_t, p_rx_mbox_t); 9544961713Sgirish 9644961713Sgirish static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 9744961713Sgirish uint16_t, 9844961713Sgirish p_nxge_dma_common_t *, 9944961713Sgirish p_rx_rbr_ring_t *, uint32_t); 10044961713Sgirish static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 10144961713Sgirish p_rx_rbr_ring_t); 10244961713Sgirish 10344961713Sgirish static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 10444961713Sgirish p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 10544961713Sgirish static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 10644961713Sgirish 107678453a8Sspeer static mblk_t * 108678453a8Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 10944961713Sgirish 11044961713Sgirish static void nxge_receive_packet(p_nxge_t, 11144961713Sgirish p_rx_rcr_ring_t, 11244961713Sgirish p_rcr_entry_t, 11344961713Sgirish boolean_t *, 11444961713Sgirish mblk_t **, mblk_t **); 11544961713Sgirish 11644961713Sgirish nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 11744961713Sgirish 11844961713Sgirish static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 11944961713Sgirish static void nxge_freeb(p_rx_msg_t); 120678453a8Sspeer static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 121678453a8Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 12244961713Sgirish 12344961713Sgirish static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 12444961713Sgirish uint32_t, uint32_t); 12544961713Sgirish 12644961713Sgirish static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 12744961713Sgirish p_rx_rbr_ring_t); 12844961713Sgirish 12944961713Sgirish 13044961713Sgirish static nxge_status_t 13144961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 13244961713Sgirish 13344961713Sgirish nxge_status_t 13444961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t); 13544961713Sgirish 136678453a8Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 137678453a8Sspeer 13844961713Sgirish nxge_status_t 13944961713Sgirish nxge_init_rxdma_channels(p_nxge_t nxgep) 14044961713Sgirish { 141678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 142678453a8Sspeer int i, count; 14344961713Sgirish 14444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 14544961713Sgirish 146678453a8Sspeer if (!isLDOMguest(nxgep)) { 147678453a8Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 148678453a8Sspeer cmn_err(CE_NOTE, "hw_start_common"); 149678453a8Sspeer return (NXGE_ERROR); 150678453a8Sspeer } 151678453a8Sspeer } 152678453a8Sspeer 153678453a8Sspeer /* 154678453a8Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 155678453a8Sspeer * We only have 8 hardware RDC tables, but we may have 156678453a8Sspeer * up to 16 logical (software-defined) groups of RDCS, 157678453a8Sspeer * if we make use of layer 3 & 4 hardware classification. 158678453a8Sspeer */ 159678453a8Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 160678453a8Sspeer if ((1 << i) & set->lg.map) { 161678453a8Sspeer int channel; 162678453a8Sspeer nxge_grp_t *group = set->group[i]; 163678453a8Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 164678453a8Sspeer if ((1 << channel) & group->map) { 165678453a8Sspeer if ((nxge_grp_dc_add(nxgep, 166*52ccf843Smisaki (vr_handle_t)group, 167*52ccf843Smisaki VP_BOUND_RX, channel))) 168678453a8Sspeer return (NXGE_ERROR); 169678453a8Sspeer } 170678453a8Sspeer } 171678453a8Sspeer } 172678453a8Sspeer if (++count == set->lg.count) 173678453a8Sspeer break; 17444961713Sgirish } 17544961713Sgirish 176678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 177678453a8Sspeer 178678453a8Sspeer return (NXGE_OK); 179678453a8Sspeer } 180678453a8Sspeer 181678453a8Sspeer nxge_status_t 182678453a8Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 183678453a8Sspeer { 184678453a8Sspeer nxge_status_t status; 185678453a8Sspeer 186678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 187678453a8Sspeer 188678453a8Sspeer status = nxge_map_rxdma(nxge, channel); 18944961713Sgirish if (status != NXGE_OK) { 190678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 191678453a8Sspeer "<== nxge_init_rxdma: status 0x%x", status)); 192678453a8Sspeer return (status); 19344961713Sgirish } 19444961713Sgirish 195678453a8Sspeer status = nxge_rxdma_hw_start(nxge, channel); 19644961713Sgirish if (status != NXGE_OK) { 197678453a8Sspeer nxge_unmap_rxdma(nxge, channel); 19844961713Sgirish } 19944961713Sgirish 200678453a8Sspeer if (!nxge->statsp->rdc_ksp[channel]) 201678453a8Sspeer nxge_setup_rdc_kstats(nxge, channel); 202678453a8Sspeer 203678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 204678453a8Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 20544961713Sgirish 20644961713Sgirish return (status); 20744961713Sgirish } 20844961713Sgirish 20944961713Sgirish void 21044961713Sgirish nxge_uninit_rxdma_channels(p_nxge_t nxgep) 21144961713Sgirish { 212678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 213678453a8Sspeer int rdc; 214678453a8Sspeer 21544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 21644961713Sgirish 217678453a8Sspeer if (set->owned.map == 0) { 218678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 219678453a8Sspeer "nxge_uninit_rxdma_channels: no channels")); 220678453a8Sspeer return; 221678453a8Sspeer } 22244961713Sgirish 223678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 224678453a8Sspeer if ((1 << rdc) & set->owned.map) { 225678453a8Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 226678453a8Sspeer } 227678453a8Sspeer } 228678453a8Sspeer 229678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 230678453a8Sspeer } 231678453a8Sspeer 232678453a8Sspeer void 233678453a8Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 234678453a8Sspeer { 235678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 236678453a8Sspeer 237678453a8Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 238678453a8Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 239678453a8Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 240678453a8Sspeer } 241678453a8Sspeer 242678453a8Sspeer nxge_rxdma_hw_stop(nxgep, channel); 243678453a8Sspeer nxge_unmap_rxdma(nxgep, channel); 244678453a8Sspeer 245678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 24644961713Sgirish } 24744961713Sgirish 24844961713Sgirish nxge_status_t 24944961713Sgirish nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 25044961713Sgirish { 25144961713Sgirish npi_handle_t handle; 25244961713Sgirish npi_status_t rs = NPI_SUCCESS; 25344961713Sgirish nxge_status_t status = NXGE_OK; 25444961713Sgirish 25544961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 25644961713Sgirish 25744961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 25844961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 25944961713Sgirish 26044961713Sgirish if (rs != NPI_SUCCESS) { 26144961713Sgirish status = NXGE_ERROR | rs; 26244961713Sgirish } 26344961713Sgirish 26444961713Sgirish return (status); 26544961713Sgirish } 26644961713Sgirish 26744961713Sgirish void 26844961713Sgirish nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 26944961713Sgirish { 270678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 271678453a8Sspeer int rdc; 27244961713Sgirish 27344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 27444961713Sgirish 275678453a8Sspeer if (!isLDOMguest(nxgep)) { 276678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 277678453a8Sspeer (void) npi_rxdma_dump_fzc_regs(handle); 27844961713Sgirish } 279678453a8Sspeer 280678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 281678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 282678453a8Sspeer "nxge_rxdma_regs_dump_channels: " 283678453a8Sspeer "NULL ring pointer(s)")); 28444961713Sgirish return; 28544961713Sgirish } 28644961713Sgirish 287678453a8Sspeer if (set->owned.map == 0) { 28844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 289678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 29044961713Sgirish return; 29144961713Sgirish } 29244961713Sgirish 293678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 294678453a8Sspeer if ((1 << rdc) & set->owned.map) { 295678453a8Sspeer rx_rbr_ring_t *ring = 296678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 297678453a8Sspeer if (ring) { 298678453a8Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 299678453a8Sspeer } 30044961713Sgirish } 30144961713Sgirish } 30244961713Sgirish 30344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 30444961713Sgirish } 30544961713Sgirish 30644961713Sgirish nxge_status_t 30744961713Sgirish nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 30844961713Sgirish { 30944961713Sgirish npi_handle_t handle; 31044961713Sgirish npi_status_t rs = NPI_SUCCESS; 31144961713Sgirish nxge_status_t status = NXGE_OK; 31244961713Sgirish 31344961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 31444961713Sgirish 31544961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 31644961713Sgirish rs = npi_rxdma_dump_rdc_regs(handle, channel); 31744961713Sgirish 31844961713Sgirish if (rs != NPI_SUCCESS) { 31944961713Sgirish status = NXGE_ERROR | rs; 32044961713Sgirish } 32144961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 32244961713Sgirish return (status); 32344961713Sgirish } 32444961713Sgirish 32544961713Sgirish nxge_status_t 32644961713Sgirish nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 32744961713Sgirish p_rx_dma_ent_msk_t mask_p) 32844961713Sgirish { 32944961713Sgirish npi_handle_t handle; 33044961713Sgirish npi_status_t rs = NPI_SUCCESS; 33144961713Sgirish nxge_status_t status = NXGE_OK; 33244961713Sgirish 33344961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 334*52ccf843Smisaki "<== nxge_init_rxdma_channel_event_mask")); 33544961713Sgirish 33644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 33744961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 33844961713Sgirish if (rs != NPI_SUCCESS) { 33944961713Sgirish status = NXGE_ERROR | rs; 34044961713Sgirish } 34144961713Sgirish 34244961713Sgirish return (status); 34344961713Sgirish } 34444961713Sgirish 34544961713Sgirish nxge_status_t 34644961713Sgirish nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 34744961713Sgirish p_rx_dma_ctl_stat_t cs_p) 34844961713Sgirish { 34944961713Sgirish npi_handle_t handle; 35044961713Sgirish npi_status_t rs = NPI_SUCCESS; 35144961713Sgirish nxge_status_t status = NXGE_OK; 35244961713Sgirish 35344961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 354*52ccf843Smisaki "<== nxge_init_rxdma_channel_cntl_stat")); 35544961713Sgirish 35644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 35744961713Sgirish rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 35844961713Sgirish 35944961713Sgirish if (rs != NPI_SUCCESS) { 36044961713Sgirish status = NXGE_ERROR | rs; 36144961713Sgirish } 36244961713Sgirish 36344961713Sgirish return (status); 36444961713Sgirish } 36544961713Sgirish 366678453a8Sspeer /* 367678453a8Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 368678453a8Sspeer * 369678453a8Sspeer * Set the default RDC for an RDC Group (Table) 370678453a8Sspeer * 371678453a8Sspeer * Arguments: 372678453a8Sspeer * nxgep 373678453a8Sspeer * rdcgrp The group to modify 374678453a8Sspeer * rdc The new default RDC. 375678453a8Sspeer * 376678453a8Sspeer * Notes: 377678453a8Sspeer * 378678453a8Sspeer * NPI/NXGE function calls: 379678453a8Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 380678453a8Sspeer * 381678453a8Sspeer * Registers accessed: 382678453a8Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 383678453a8Sspeer * 384678453a8Sspeer * Context: 385678453a8Sspeer * Service domain 386678453a8Sspeer */ 38744961713Sgirish nxge_status_t 388678453a8Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 389678453a8Sspeer p_nxge_t nxgep, 390678453a8Sspeer uint8_t rdcgrp, 391678453a8Sspeer uint8_t rdc) 39244961713Sgirish { 39344961713Sgirish npi_handle_t handle; 39444961713Sgirish npi_status_t rs = NPI_SUCCESS; 39544961713Sgirish p_nxge_dma_pt_cfg_t p_dma_cfgp; 39644961713Sgirish p_nxge_rdc_grp_t rdc_grp_p; 39744961713Sgirish uint8_t actual_rdcgrp, actual_rdc; 39844961713Sgirish 39944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 400*52ccf843Smisaki " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 40144961713Sgirish p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 40244961713Sgirish 40344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 40444961713Sgirish 405678453a8Sspeer /* 406678453a8Sspeer * This has to be rewritten. Do we even allow this anymore? 407678453a8Sspeer */ 40844961713Sgirish rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 409678453a8Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 410678453a8Sspeer rdc_grp_p->def_rdc = rdc; 41144961713Sgirish 41244961713Sgirish actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 41344961713Sgirish actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 41444961713Sgirish 415678453a8Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 416*52ccf843Smisaki handle, actual_rdcgrp, actual_rdc); 41744961713Sgirish 41844961713Sgirish if (rs != NPI_SUCCESS) { 41944961713Sgirish return (NXGE_ERROR | rs); 42044961713Sgirish } 42144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 422*52ccf843Smisaki " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 42344961713Sgirish return (NXGE_OK); 42444961713Sgirish } 42544961713Sgirish 42644961713Sgirish nxge_status_t 42744961713Sgirish nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 42844961713Sgirish { 42944961713Sgirish npi_handle_t handle; 43044961713Sgirish 43144961713Sgirish uint8_t actual_rdc; 43244961713Sgirish npi_status_t rs = NPI_SUCCESS; 43344961713Sgirish 43444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 435*52ccf843Smisaki " ==> nxge_rxdma_cfg_port_default_rdc")); 43644961713Sgirish 43744961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 438678453a8Sspeer actual_rdc = rdc; /* XXX Hack! */ 43944961713Sgirish rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 44044961713Sgirish 44144961713Sgirish 44244961713Sgirish if (rs != NPI_SUCCESS) { 44344961713Sgirish return (NXGE_ERROR | rs); 44444961713Sgirish } 44544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 446*52ccf843Smisaki " <== nxge_rxdma_cfg_port_default_rdc")); 44744961713Sgirish 44844961713Sgirish return (NXGE_OK); 44944961713Sgirish } 45044961713Sgirish 45144961713Sgirish nxge_status_t 45244961713Sgirish nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 45344961713Sgirish uint16_t pkts) 45444961713Sgirish { 45544961713Sgirish npi_status_t rs = NPI_SUCCESS; 45644961713Sgirish npi_handle_t handle; 45744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 458*52ccf843Smisaki " ==> nxge_rxdma_cfg_rcr_threshold")); 45944961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 46044961713Sgirish 46144961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 46244961713Sgirish 46344961713Sgirish if (rs != NPI_SUCCESS) { 46444961713Sgirish return (NXGE_ERROR | rs); 46544961713Sgirish } 46644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 46744961713Sgirish return (NXGE_OK); 46844961713Sgirish } 46944961713Sgirish 47044961713Sgirish nxge_status_t 47144961713Sgirish nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 47244961713Sgirish uint16_t tout, uint8_t enable) 47344961713Sgirish { 47444961713Sgirish npi_status_t rs = NPI_SUCCESS; 47544961713Sgirish npi_handle_t handle; 47644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 47744961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 47844961713Sgirish if (enable == 0) { 47944961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 48044961713Sgirish } else { 48144961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 482*52ccf843Smisaki tout); 48344961713Sgirish } 48444961713Sgirish 48544961713Sgirish if (rs != NPI_SUCCESS) { 48644961713Sgirish return (NXGE_ERROR | rs); 48744961713Sgirish } 48844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 48944961713Sgirish return (NXGE_OK); 49044961713Sgirish } 49144961713Sgirish 49244961713Sgirish nxge_status_t 49344961713Sgirish nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 49444961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 49544961713Sgirish { 49644961713Sgirish npi_handle_t handle; 49744961713Sgirish rdc_desc_cfg_t rdc_desc; 49844961713Sgirish p_rcrcfig_b_t cfgb_p; 49944961713Sgirish npi_status_t rs = NPI_SUCCESS; 50044961713Sgirish 50144961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 50244961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 50344961713Sgirish /* 50444961713Sgirish * Use configuration data composed at init time. 50544961713Sgirish * Write to hardware the receive ring configurations. 50644961713Sgirish */ 50744961713Sgirish rdc_desc.mbox_enable = 1; 50844961713Sgirish rdc_desc.mbox_addr = mbox_p->mbox_addr; 50944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 510*52ccf843Smisaki "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 511*52ccf843Smisaki mbox_p->mbox_addr, rdc_desc.mbox_addr)); 51244961713Sgirish 51344961713Sgirish rdc_desc.rbr_len = rbr_p->rbb_max; 51444961713Sgirish rdc_desc.rbr_addr = rbr_p->rbr_addr; 51544961713Sgirish 51644961713Sgirish switch (nxgep->rx_bksize_code) { 51744961713Sgirish case RBR_BKSIZE_4K: 51844961713Sgirish rdc_desc.page_size = SIZE_4KB; 51944961713Sgirish break; 52044961713Sgirish case RBR_BKSIZE_8K: 52144961713Sgirish rdc_desc.page_size = SIZE_8KB; 52244961713Sgirish break; 52344961713Sgirish case RBR_BKSIZE_16K: 52444961713Sgirish rdc_desc.page_size = SIZE_16KB; 52544961713Sgirish break; 52644961713Sgirish case RBR_BKSIZE_32K: 52744961713Sgirish rdc_desc.page_size = SIZE_32KB; 52844961713Sgirish break; 52944961713Sgirish } 53044961713Sgirish 53144961713Sgirish rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 53244961713Sgirish rdc_desc.valid0 = 1; 53344961713Sgirish 53444961713Sgirish rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 53544961713Sgirish rdc_desc.valid1 = 1; 53644961713Sgirish 53744961713Sgirish rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 53844961713Sgirish rdc_desc.valid2 = 1; 53944961713Sgirish 54044961713Sgirish rdc_desc.full_hdr = rcr_p->full_hdr_flag; 54144961713Sgirish rdc_desc.offset = rcr_p->sw_priv_hdr_len; 54244961713Sgirish 54344961713Sgirish rdc_desc.rcr_len = rcr_p->comp_size; 54444961713Sgirish rdc_desc.rcr_addr = rcr_p->rcr_addr; 54544961713Sgirish 54644961713Sgirish cfgb_p = &(rcr_p->rcr_cfgb); 54744961713Sgirish rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 548678453a8Sspeer /* For now, disable this timeout in a guest domain. */ 549678453a8Sspeer if (isLDOMguest(nxgep)) { 550678453a8Sspeer rdc_desc.rcr_timeout = 0; 551678453a8Sspeer rdc_desc.rcr_timeout_enable = 0; 552678453a8Sspeer } else { 553678453a8Sspeer rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 554678453a8Sspeer rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 555678453a8Sspeer } 55644961713Sgirish 55744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 558*52ccf843Smisaki "rbr_len qlen %d pagesize code %d rcr_len %d", 559*52ccf843Smisaki rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 56044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 561*52ccf843Smisaki "size 0 %d size 1 %d size 2 %d", 562*52ccf843Smisaki rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 563*52ccf843Smisaki rbr_p->npi_pkt_buf_size2)); 56444961713Sgirish 56544961713Sgirish rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 56644961713Sgirish if (rs != NPI_SUCCESS) { 56744961713Sgirish return (NXGE_ERROR | rs); 56844961713Sgirish } 56944961713Sgirish 57044961713Sgirish /* 57144961713Sgirish * Enable the timeout and threshold. 57244961713Sgirish */ 57344961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 574*52ccf843Smisaki rdc_desc.rcr_threshold); 57544961713Sgirish if (rs != NPI_SUCCESS) { 57644961713Sgirish return (NXGE_ERROR | rs); 57744961713Sgirish } 57844961713Sgirish 57944961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 580*52ccf843Smisaki rdc_desc.rcr_timeout); 58144961713Sgirish if (rs != NPI_SUCCESS) { 58244961713Sgirish return (NXGE_ERROR | rs); 58344961713Sgirish } 58444961713Sgirish 58544961713Sgirish /* Enable the DMA */ 58644961713Sgirish rs = npi_rxdma_cfg_rdc_enable(handle, channel); 58744961713Sgirish if (rs != NPI_SUCCESS) { 58844961713Sgirish return (NXGE_ERROR | rs); 58944961713Sgirish } 59044961713Sgirish 59144961713Sgirish /* Kick the DMA engine. */ 59244961713Sgirish npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 59344961713Sgirish /* Clear the rbr empty bit */ 59444961713Sgirish (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 59544961713Sgirish 59644961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 59744961713Sgirish 59844961713Sgirish return (NXGE_OK); 59944961713Sgirish } 60044961713Sgirish 60144961713Sgirish nxge_status_t 60244961713Sgirish nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 60344961713Sgirish { 60444961713Sgirish npi_handle_t handle; 60544961713Sgirish npi_status_t rs = NPI_SUCCESS; 60644961713Sgirish 60744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 60844961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 60944961713Sgirish 61044961713Sgirish /* disable the DMA */ 61144961713Sgirish rs = npi_rxdma_cfg_rdc_disable(handle, channel); 61244961713Sgirish if (rs != NPI_SUCCESS) { 61344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 614*52ccf843Smisaki "<== nxge_disable_rxdma_channel:failed (0x%x)", 615*52ccf843Smisaki rs)); 61644961713Sgirish return (NXGE_ERROR | rs); 61744961713Sgirish } 61844961713Sgirish 61944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 62044961713Sgirish return (NXGE_OK); 62144961713Sgirish } 62244961713Sgirish 62344961713Sgirish nxge_status_t 62444961713Sgirish nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 62544961713Sgirish { 62644961713Sgirish npi_handle_t handle; 62744961713Sgirish nxge_status_t status = NXGE_OK; 62844961713Sgirish 62944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 630*52ccf843Smisaki "<== nxge_init_rxdma_channel_rcrflush")); 63144961713Sgirish 63244961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 63344961713Sgirish npi_rxdma_rdc_rcr_flush(handle, channel); 63444961713Sgirish 63544961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 636*52ccf843Smisaki "<== nxge_init_rxdma_channel_rcrflsh")); 63744961713Sgirish return (status); 63844961713Sgirish 63944961713Sgirish } 64044961713Sgirish 64144961713Sgirish #define MID_INDEX(l, r) ((r + l + 1) >> 1) 64244961713Sgirish 64344961713Sgirish #define TO_LEFT -1 64444961713Sgirish #define TO_RIGHT 1 64544961713Sgirish #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 64644961713Sgirish #define BOTH_LEFT (TO_LEFT + TO_LEFT) 64744961713Sgirish #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 64844961713Sgirish #define NO_HINT 0xffffffff 64944961713Sgirish 65044961713Sgirish /*ARGSUSED*/ 65144961713Sgirish nxge_status_t 65244961713Sgirish nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 653a3c5bd6dSspeer uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 654a3c5bd6dSspeer uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 65544961713Sgirish { 65644961713Sgirish int bufsize; 65744961713Sgirish uint64_t pktbuf_pp; 65844961713Sgirish uint64_t dvma_addr; 65944961713Sgirish rxring_info_t *ring_info; 66044961713Sgirish int base_side, end_side; 66144961713Sgirish int r_index, l_index, anchor_index; 66244961713Sgirish int found, search_done; 66344961713Sgirish uint32_t offset, chunk_size, block_size, page_size_mask; 66444961713Sgirish uint32_t chunk_index, block_index, total_index; 66544961713Sgirish int max_iterations, iteration; 66644961713Sgirish rxbuf_index_info_t *bufinfo; 66744961713Sgirish 66844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 66944961713Sgirish 67044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 671*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 672*52ccf843Smisaki pkt_buf_addr_pp, 673*52ccf843Smisaki pktbufsz_type)); 674adfcba55Sjoycey #if defined(__i386) 675adfcba55Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 676adfcba55Sjoycey #else 67744961713Sgirish pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 678adfcba55Sjoycey #endif 67944961713Sgirish 68044961713Sgirish switch (pktbufsz_type) { 68144961713Sgirish case 0: 68244961713Sgirish bufsize = rbr_p->pkt_buf_size0; 68344961713Sgirish break; 68444961713Sgirish case 1: 68544961713Sgirish bufsize = rbr_p->pkt_buf_size1; 68644961713Sgirish break; 68744961713Sgirish case 2: 68844961713Sgirish bufsize = rbr_p->pkt_buf_size2; 68944961713Sgirish break; 69044961713Sgirish case RCR_SINGLE_BLOCK: 69144961713Sgirish bufsize = 0; 69244961713Sgirish anchor_index = 0; 69344961713Sgirish break; 69444961713Sgirish default: 69544961713Sgirish return (NXGE_ERROR); 69644961713Sgirish } 69744961713Sgirish 69844961713Sgirish if (rbr_p->num_blocks == 1) { 69944961713Sgirish anchor_index = 0; 70044961713Sgirish ring_info = rbr_p->ring_info; 70144961713Sgirish bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 70244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 703*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 704*52ccf843Smisaki "buf_pp $%p btype %d anchor_index %d " 705*52ccf843Smisaki "bufinfo $%p", 706*52ccf843Smisaki pkt_buf_addr_pp, 707*52ccf843Smisaki pktbufsz_type, 708*52ccf843Smisaki anchor_index, 709*52ccf843Smisaki bufinfo)); 71044961713Sgirish 71144961713Sgirish goto found_index; 71244961713Sgirish } 71344961713Sgirish 71444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 715*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 716*52ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 717*52ccf843Smisaki pkt_buf_addr_pp, 718*52ccf843Smisaki pktbufsz_type, 719*52ccf843Smisaki anchor_index)); 72044961713Sgirish 72144961713Sgirish ring_info = rbr_p->ring_info; 72244961713Sgirish found = B_FALSE; 72344961713Sgirish bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 72444961713Sgirish iteration = 0; 72544961713Sgirish max_iterations = ring_info->max_iterations; 72644961713Sgirish /* 727a3c5bd6dSspeer * First check if this block has been seen 72844961713Sgirish * recently. This is indicated by a hint which 72944961713Sgirish * is initialized when the first buffer of the block 73044961713Sgirish * is seen. The hint is reset when the last buffer of 73144961713Sgirish * the block has been processed. 73244961713Sgirish * As three block sizes are supported, three hints 73344961713Sgirish * are kept. The idea behind the hints is that once 73444961713Sgirish * the hardware uses a block for a buffer of that 73544961713Sgirish * size, it will use it exclusively for that size 73644961713Sgirish * and will use it until it is exhausted. It is assumed 73744961713Sgirish * that there would a single block being used for the same 73844961713Sgirish * buffer sizes at any given time. 73944961713Sgirish */ 74044961713Sgirish if (ring_info->hint[pktbufsz_type] != NO_HINT) { 74144961713Sgirish anchor_index = ring_info->hint[pktbufsz_type]; 74244961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 74344961713Sgirish chunk_size = bufinfo[anchor_index].buf_size; 74444961713Sgirish if ((pktbuf_pp >= dvma_addr) && 745*52ccf843Smisaki (pktbuf_pp < (dvma_addr + chunk_size))) { 74644961713Sgirish found = B_TRUE; 74744961713Sgirish /* 74844961713Sgirish * check if this is the last buffer in the block 74944961713Sgirish * If so, then reset the hint for the size; 75044961713Sgirish */ 75144961713Sgirish 75244961713Sgirish if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 75344961713Sgirish ring_info->hint[pktbufsz_type] = NO_HINT; 75444961713Sgirish } 75544961713Sgirish } 75644961713Sgirish 75744961713Sgirish if (found == B_FALSE) { 75844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 759*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (!found)" 760*52ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 761*52ccf843Smisaki pkt_buf_addr_pp, 762*52ccf843Smisaki pktbufsz_type, 763*52ccf843Smisaki anchor_index)); 76444961713Sgirish 76544961713Sgirish /* 76644961713Sgirish * This is the first buffer of the block of this 76744961713Sgirish * size. Need to search the whole information 76844961713Sgirish * array. 76944961713Sgirish * the search algorithm uses a binary tree search 77044961713Sgirish * algorithm. It assumes that the information is 77144961713Sgirish * already sorted with increasing order 77244961713Sgirish * info[0] < info[1] < info[2] .... < info[n-1] 77344961713Sgirish * where n is the size of the information array 77444961713Sgirish */ 77544961713Sgirish r_index = rbr_p->num_blocks - 1; 77644961713Sgirish l_index = 0; 77744961713Sgirish search_done = B_FALSE; 77844961713Sgirish anchor_index = MID_INDEX(r_index, l_index); 77944961713Sgirish while (search_done == B_FALSE) { 78044961713Sgirish if ((r_index == l_index) || 781*52ccf843Smisaki (iteration >= max_iterations)) 78244961713Sgirish search_done = B_TRUE; 78344961713Sgirish end_side = TO_RIGHT; /* to the right */ 78444961713Sgirish base_side = TO_LEFT; /* to the left */ 78544961713Sgirish /* read the DVMA address information and sort it */ 78644961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 78744961713Sgirish chunk_size = bufinfo[anchor_index].buf_size; 78844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 789*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (searching)" 790*52ccf843Smisaki "buf_pp $%p btype %d " 791*52ccf843Smisaki "anchor_index %d chunk_size %d dvmaaddr $%p", 792*52ccf843Smisaki pkt_buf_addr_pp, 793*52ccf843Smisaki pktbufsz_type, 794*52ccf843Smisaki anchor_index, 795*52ccf843Smisaki chunk_size, 796*52ccf843Smisaki dvma_addr)); 79744961713Sgirish 79844961713Sgirish if (pktbuf_pp >= dvma_addr) 79944961713Sgirish base_side = TO_RIGHT; /* to the right */ 80044961713Sgirish if (pktbuf_pp < (dvma_addr + chunk_size)) 80144961713Sgirish end_side = TO_LEFT; /* to the left */ 80244961713Sgirish 80344961713Sgirish switch (base_side + end_side) { 804*52ccf843Smisaki case IN_MIDDLE: 805*52ccf843Smisaki /* found */ 806*52ccf843Smisaki found = B_TRUE; 807*52ccf843Smisaki search_done = B_TRUE; 808*52ccf843Smisaki if ((pktbuf_pp + bufsize) < 809*52ccf843Smisaki (dvma_addr + chunk_size)) 810*52ccf843Smisaki ring_info->hint[pktbufsz_type] = 811*52ccf843Smisaki bufinfo[anchor_index].buf_index; 812*52ccf843Smisaki break; 813*52ccf843Smisaki case BOTH_RIGHT: 814*52ccf843Smisaki /* not found: go to the right */ 815*52ccf843Smisaki l_index = anchor_index + 1; 816*52ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 817*52ccf843Smisaki break; 818*52ccf843Smisaki 819*52ccf843Smisaki case BOTH_LEFT: 820*52ccf843Smisaki /* not found: go to the left */ 821*52ccf843Smisaki r_index = anchor_index - 1; 822*52ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 823*52ccf843Smisaki break; 824*52ccf843Smisaki default: /* should not come here */ 825*52ccf843Smisaki return (NXGE_ERROR); 82644961713Sgirish } 82744961713Sgirish iteration++; 82844961713Sgirish } 82944961713Sgirish 83044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 831*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (search done)" 832*52ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 833*52ccf843Smisaki pkt_buf_addr_pp, 834*52ccf843Smisaki pktbufsz_type, 835*52ccf843Smisaki anchor_index)); 83644961713Sgirish } 83744961713Sgirish 83844961713Sgirish if (found == B_FALSE) { 83944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 840*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (search failed)" 841*52ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 842*52ccf843Smisaki pkt_buf_addr_pp, 843*52ccf843Smisaki pktbufsz_type, 844*52ccf843Smisaki anchor_index)); 84544961713Sgirish return (NXGE_ERROR); 84644961713Sgirish } 84744961713Sgirish 84844961713Sgirish found_index: 84944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 850*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 851*52ccf843Smisaki "buf_pp $%p btype %d bufsize %d anchor_index %d", 852*52ccf843Smisaki pkt_buf_addr_pp, 853*52ccf843Smisaki pktbufsz_type, 854*52ccf843Smisaki bufsize, 855*52ccf843Smisaki anchor_index)); 85644961713Sgirish 85744961713Sgirish /* index of the first block in this chunk */ 85844961713Sgirish chunk_index = bufinfo[anchor_index].start_index; 85944961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 86044961713Sgirish page_size_mask = ring_info->block_size_mask; 86144961713Sgirish 86244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 863*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 864*52ccf843Smisaki "buf_pp $%p btype %d bufsize %d " 865*52ccf843Smisaki "anchor_index %d chunk_index %d dvma $%p", 866*52ccf843Smisaki pkt_buf_addr_pp, 867*52ccf843Smisaki pktbufsz_type, 868*52ccf843Smisaki bufsize, 869*52ccf843Smisaki anchor_index, 870*52ccf843Smisaki chunk_index, 871*52ccf843Smisaki dvma_addr)); 87244961713Sgirish 87344961713Sgirish offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 87444961713Sgirish block_size = rbr_p->block_size; /* System block(page) size */ 87544961713Sgirish 87644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 877*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 878*52ccf843Smisaki "buf_pp $%p btype %d bufsize %d " 879*52ccf843Smisaki "anchor_index %d chunk_index %d dvma $%p " 880*52ccf843Smisaki "offset %d block_size %d", 881*52ccf843Smisaki pkt_buf_addr_pp, 882*52ccf843Smisaki pktbufsz_type, 883*52ccf843Smisaki bufsize, 884*52ccf843Smisaki anchor_index, 885*52ccf843Smisaki chunk_index, 886*52ccf843Smisaki dvma_addr, 887*52ccf843Smisaki offset, 888*52ccf843Smisaki block_size)); 88944961713Sgirish 89044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 89144961713Sgirish 89244961713Sgirish block_index = (offset / block_size); /* index within chunk */ 89344961713Sgirish total_index = chunk_index + block_index; 89444961713Sgirish 89544961713Sgirish 89644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 897*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 898*52ccf843Smisaki "total_index %d dvma_addr $%p " 899*52ccf843Smisaki "offset %d block_size %d " 900*52ccf843Smisaki "block_index %d ", 901*52ccf843Smisaki total_index, dvma_addr, 902*52ccf843Smisaki offset, block_size, 903*52ccf843Smisaki block_index)); 904adfcba55Sjoycey #if defined(__i386) 905adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 906*52ccf843Smisaki (uint32_t)offset); 907adfcba55Sjoycey #else 908adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 909*52ccf843Smisaki (uint64_t)offset); 910adfcba55Sjoycey #endif 91144961713Sgirish 91244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 913*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 914*52ccf843Smisaki "total_index %d dvma_addr $%p " 915*52ccf843Smisaki "offset %d block_size %d " 916*52ccf843Smisaki "block_index %d " 917*52ccf843Smisaki "*pkt_buf_addr_p $%p", 918*52ccf843Smisaki total_index, dvma_addr, 919*52ccf843Smisaki offset, block_size, 920*52ccf843Smisaki block_index, 921*52ccf843Smisaki *pkt_buf_addr_p)); 92244961713Sgirish 92344961713Sgirish 92444961713Sgirish *msg_index = total_index; 92544961713Sgirish *bufoffset = (offset & page_size_mask); 92644961713Sgirish 92744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 928*52ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: get msg index: " 929*52ccf843Smisaki "msg_index %d bufoffset_index %d", 930*52ccf843Smisaki *msg_index, 931*52ccf843Smisaki *bufoffset)); 93244961713Sgirish 93344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 93444961713Sgirish 93544961713Sgirish return (NXGE_OK); 93644961713Sgirish } 93744961713Sgirish 93844961713Sgirish /* 93944961713Sgirish * used by quick sort (qsort) function 94044961713Sgirish * to perform comparison 94144961713Sgirish */ 94244961713Sgirish static int 94344961713Sgirish nxge_sort_compare(const void *p1, const void *p2) 94444961713Sgirish { 94544961713Sgirish 94644961713Sgirish rxbuf_index_info_t *a, *b; 94744961713Sgirish 94844961713Sgirish a = (rxbuf_index_info_t *)p1; 94944961713Sgirish b = (rxbuf_index_info_t *)p2; 95044961713Sgirish 95144961713Sgirish if (a->dvma_addr > b->dvma_addr) 95244961713Sgirish return (1); 95344961713Sgirish if (a->dvma_addr < b->dvma_addr) 95444961713Sgirish return (-1); 95544961713Sgirish return (0); 95644961713Sgirish } 95744961713Sgirish 95844961713Sgirish 95944961713Sgirish 96044961713Sgirish /* 96144961713Sgirish * grabbed this sort implementation from common/syscall/avl.c 96244961713Sgirish * 96344961713Sgirish */ 96444961713Sgirish /* 96544961713Sgirish * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 96644961713Sgirish * v = Ptr to array/vector of objs 96744961713Sgirish * n = # objs in the array 96844961713Sgirish * s = size of each obj (must be multiples of a word size) 96944961713Sgirish * f = ptr to function to compare two objs 97044961713Sgirish * returns (-1 = less than, 0 = equal, 1 = greater than 97144961713Sgirish */ 97244961713Sgirish void 97344961713Sgirish nxge_ksort(caddr_t v, int n, int s, int (*f)()) 97444961713Sgirish { 97544961713Sgirish int g, i, j, ii; 97644961713Sgirish unsigned int *p1, *p2; 97744961713Sgirish unsigned int tmp; 97844961713Sgirish 97944961713Sgirish /* No work to do */ 98044961713Sgirish if (v == NULL || n <= 1) 98144961713Sgirish return; 98244961713Sgirish /* Sanity check on arguments */ 98344961713Sgirish ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 98444961713Sgirish ASSERT(s > 0); 98544961713Sgirish 98644961713Sgirish for (g = n / 2; g > 0; g /= 2) { 98744961713Sgirish for (i = g; i < n; i++) { 98844961713Sgirish for (j = i - g; j >= 0 && 989*52ccf843Smisaki (*f)(v + j * s, v + (j + g) * s) == 1; 990*52ccf843Smisaki j -= g) { 99144961713Sgirish p1 = (unsigned *)(v + j * s); 99244961713Sgirish p2 = (unsigned *)(v + (j + g) * s); 99344961713Sgirish for (ii = 0; ii < s / 4; ii++) { 99444961713Sgirish tmp = *p1; 99544961713Sgirish *p1++ = *p2; 99644961713Sgirish *p2++ = tmp; 99744961713Sgirish } 99844961713Sgirish } 99944961713Sgirish } 100044961713Sgirish } 100144961713Sgirish } 100244961713Sgirish 100344961713Sgirish /* 100444961713Sgirish * Initialize data structures required for rxdma 100544961713Sgirish * buffer dvma->vmem address lookup 100644961713Sgirish */ 100744961713Sgirish /*ARGSUSED*/ 100844961713Sgirish static nxge_status_t 100944961713Sgirish nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 101044961713Sgirish { 101144961713Sgirish 101244961713Sgirish int index; 101344961713Sgirish rxring_info_t *ring_info; 101444961713Sgirish int max_iteration = 0, max_index = 0; 101544961713Sgirish 101644961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 101744961713Sgirish 101844961713Sgirish ring_info = rbrp->ring_info; 101944961713Sgirish ring_info->hint[0] = NO_HINT; 102044961713Sgirish ring_info->hint[1] = NO_HINT; 102144961713Sgirish ring_info->hint[2] = NO_HINT; 102244961713Sgirish max_index = rbrp->num_blocks; 102344961713Sgirish 102444961713Sgirish /* read the DVMA address information and sort it */ 102544961713Sgirish /* do init of the information array */ 102644961713Sgirish 102744961713Sgirish 102844961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1029*52ccf843Smisaki " nxge_rxbuf_index_info_init Sort ptrs")); 103044961713Sgirish 103144961713Sgirish /* sort the array */ 103244961713Sgirish nxge_ksort((void *)ring_info->buffer, max_index, 1033*52ccf843Smisaki sizeof (rxbuf_index_info_t), nxge_sort_compare); 103444961713Sgirish 103544961713Sgirish 103644961713Sgirish 103744961713Sgirish for (index = 0; index < max_index; index++) { 103844961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1039*52ccf843Smisaki " nxge_rxbuf_index_info_init: sorted chunk %d " 1040*52ccf843Smisaki " ioaddr $%p kaddr $%p size %x", 1041*52ccf843Smisaki index, ring_info->buffer[index].dvma_addr, 1042*52ccf843Smisaki ring_info->buffer[index].kaddr, 1043*52ccf843Smisaki ring_info->buffer[index].buf_size)); 104444961713Sgirish } 104544961713Sgirish 104644961713Sgirish max_iteration = 0; 104744961713Sgirish while (max_index >= (1ULL << max_iteration)) 104844961713Sgirish max_iteration++; 104944961713Sgirish ring_info->max_iterations = max_iteration + 1; 105044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1051*52ccf843Smisaki " nxge_rxbuf_index_info_init Find max iter %d", 1052*52ccf843Smisaki ring_info->max_iterations)); 105344961713Sgirish 105444961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 105544961713Sgirish return (NXGE_OK); 105644961713Sgirish } 105744961713Sgirish 10580a8e077aSspeer /* ARGSUSED */ 105944961713Sgirish void 106044961713Sgirish nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 106144961713Sgirish { 106244961713Sgirish #ifdef NXGE_DEBUG 106344961713Sgirish 106444961713Sgirish uint32_t bptr; 106544961713Sgirish uint64_t pp; 106644961713Sgirish 106744961713Sgirish bptr = entry_p->bits.hdw.pkt_buf_addr; 106844961713Sgirish 106944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1070*52ccf843Smisaki "\trcr entry $%p " 1071*52ccf843Smisaki "\trcr entry 0x%0llx " 1072*52ccf843Smisaki "\trcr entry 0x%08x " 1073*52ccf843Smisaki "\trcr entry 0x%08x " 1074*52ccf843Smisaki "\tvalue 0x%0llx\n" 1075*52ccf843Smisaki "\tmulti = %d\n" 1076*52ccf843Smisaki "\tpkt_type = 0x%x\n" 1077*52ccf843Smisaki "\tzero_copy = %d\n" 1078*52ccf843Smisaki "\tnoport = %d\n" 1079*52ccf843Smisaki "\tpromis = %d\n" 1080*52ccf843Smisaki "\terror = 0x%04x\n" 1081*52ccf843Smisaki "\tdcf_err = 0x%01x\n" 1082*52ccf843Smisaki "\tl2_len = %d\n" 1083*52ccf843Smisaki "\tpktbufsize = %d\n" 1084*52ccf843Smisaki "\tpkt_buf_addr = $%p\n" 1085*52ccf843Smisaki "\tpkt_buf_addr (<< 6) = $%p\n", 1086*52ccf843Smisaki entry_p, 1087*52ccf843Smisaki *(int64_t *)entry_p, 1088*52ccf843Smisaki *(int32_t *)entry_p, 1089*52ccf843Smisaki *(int32_t *)((char *)entry_p + 32), 1090*52ccf843Smisaki entry_p->value, 1091*52ccf843Smisaki entry_p->bits.hdw.multi, 1092*52ccf843Smisaki entry_p->bits.hdw.pkt_type, 1093*52ccf843Smisaki entry_p->bits.hdw.zero_copy, 1094*52ccf843Smisaki entry_p->bits.hdw.noport, 1095*52ccf843Smisaki entry_p->bits.hdw.promis, 1096*52ccf843Smisaki entry_p->bits.hdw.error, 1097*52ccf843Smisaki entry_p->bits.hdw.dcf_err, 1098*52ccf843Smisaki entry_p->bits.hdw.l2_len, 1099*52ccf843Smisaki entry_p->bits.hdw.pktbufsz, 1100*52ccf843Smisaki bptr, 1101*52ccf843Smisaki entry_p->bits.ldw.pkt_buf_addr)); 110244961713Sgirish 110344961713Sgirish pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1104*52ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT; 110544961713Sgirish 110644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1107*52ccf843Smisaki pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 110844961713Sgirish #endif 110944961713Sgirish } 111044961713Sgirish 111144961713Sgirish void 111244961713Sgirish nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 111344961713Sgirish { 111444961713Sgirish npi_handle_t handle; 111544961713Sgirish rbr_stat_t rbr_stat; 111644961713Sgirish addr44_t hd_addr; 111744961713Sgirish addr44_t tail_addr; 111844961713Sgirish uint16_t qlen; 111944961713Sgirish 112044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1121*52ccf843Smisaki "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 112244961713Sgirish 112344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 112444961713Sgirish 112544961713Sgirish /* RBR head */ 112644961713Sgirish hd_addr.addr = 0; 112744961713Sgirish (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1128adfcba55Sjoycey #if defined(__i386) 112953f3d8ecSyc printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1130*52ccf843Smisaki (void *)(uint32_t)hd_addr.addr); 1131adfcba55Sjoycey #else 113253f3d8ecSyc printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1133*52ccf843Smisaki (void *)hd_addr.addr); 1134adfcba55Sjoycey #endif 113544961713Sgirish 113644961713Sgirish /* RBR stats */ 113744961713Sgirish (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 113844961713Sgirish printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 113944961713Sgirish 114044961713Sgirish /* RCR tail */ 114144961713Sgirish tail_addr.addr = 0; 114244961713Sgirish (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1143adfcba55Sjoycey #if defined(__i386) 114453f3d8ecSyc printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1145*52ccf843Smisaki (void *)(uint32_t)tail_addr.addr); 1146adfcba55Sjoycey #else 114753f3d8ecSyc printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1148*52ccf843Smisaki (void *)tail_addr.addr); 1149adfcba55Sjoycey #endif 115044961713Sgirish 115144961713Sgirish /* RCR qlen */ 115244961713Sgirish (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 115344961713Sgirish printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 115444961713Sgirish 115544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1156*52ccf843Smisaki "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 115744961713Sgirish } 115844961713Sgirish 115944961713Sgirish void 116044961713Sgirish nxge_rxdma_stop(p_nxge_t nxgep) 116144961713Sgirish { 116244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 116344961713Sgirish 116444961713Sgirish (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 116544961713Sgirish (void) nxge_rx_mac_disable(nxgep); 116644961713Sgirish (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 116744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 116844961713Sgirish } 116944961713Sgirish 117044961713Sgirish void 117144961713Sgirish nxge_rxdma_stop_reinit(p_nxge_t nxgep) 117244961713Sgirish { 117344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 117444961713Sgirish 117544961713Sgirish (void) nxge_rxdma_stop(nxgep); 117644961713Sgirish (void) nxge_uninit_rxdma_channels(nxgep); 117744961713Sgirish (void) nxge_init_rxdma_channels(nxgep); 117844961713Sgirish 117944961713Sgirish #ifndef AXIS_DEBUG_LB 118044961713Sgirish (void) nxge_xcvr_init(nxgep); 118144961713Sgirish (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 118244961713Sgirish #endif 118344961713Sgirish (void) nxge_rx_mac_enable(nxgep); 118444961713Sgirish 118544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 118644961713Sgirish } 118744961713Sgirish 118844961713Sgirish nxge_status_t 118944961713Sgirish nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 119044961713Sgirish { 1191678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1192678453a8Sspeer nxge_status_t status; 1193678453a8Sspeer npi_status_t rs; 1194678453a8Sspeer int rdc; 119544961713Sgirish 119644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1197*52ccf843Smisaki "==> nxge_rxdma_hw_mode: mode %d", enable)); 119844961713Sgirish 119944961713Sgirish if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 120044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1201678453a8Sspeer "<== nxge_rxdma_mode: not initialized")); 120244961713Sgirish return (NXGE_ERROR); 120344961713Sgirish } 120444961713Sgirish 1205678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1206678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1207678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1208678453a8Sspeer "NULL ring pointer(s)")); 120944961713Sgirish return (NXGE_ERROR); 121044961713Sgirish } 121144961713Sgirish 1212678453a8Sspeer if (set->owned.map == 0) { 121344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1214678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 1215678453a8Sspeer return (NULL); 121644961713Sgirish } 121744961713Sgirish 1218678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1219678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1220678453a8Sspeer rx_rbr_ring_t *ring = 1221678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1222678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1223678453a8Sspeer if (ring) { 1224678453a8Sspeer if (enable) { 1225678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1226678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1227678453a8Sspeer "channel %d (enable)", rdc)); 1228678453a8Sspeer rs = npi_rxdma_cfg_rdc_enable 1229678453a8Sspeer (handle, rdc); 1230678453a8Sspeer } else { 1231678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1232678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1233678453a8Sspeer "channel %d disable)", rdc)); 1234678453a8Sspeer rs = npi_rxdma_cfg_rdc_disable 1235678453a8Sspeer (handle, rdc); 1236678453a8Sspeer } 1237678453a8Sspeer } 123844961713Sgirish } 123944961713Sgirish } 124044961713Sgirish 124144961713Sgirish status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 124244961713Sgirish 124344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1244*52ccf843Smisaki "<== nxge_rxdma_hw_mode: status 0x%x", status)); 124544961713Sgirish 124644961713Sgirish return (status); 124744961713Sgirish } 124844961713Sgirish 124944961713Sgirish void 125044961713Sgirish nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 125144961713Sgirish { 125244961713Sgirish npi_handle_t handle; 125344961713Sgirish 125444961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1255*52ccf843Smisaki "==> nxge_rxdma_enable_channel: channel %d", channel)); 125644961713Sgirish 125744961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 125844961713Sgirish (void) npi_rxdma_cfg_rdc_enable(handle, channel); 125944961713Sgirish 126044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 126144961713Sgirish } 126244961713Sgirish 126344961713Sgirish void 126444961713Sgirish nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 126544961713Sgirish { 126644961713Sgirish npi_handle_t handle; 126744961713Sgirish 126844961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1269*52ccf843Smisaki "==> nxge_rxdma_disable_channel: channel %d", channel)); 127044961713Sgirish 127144961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 127244961713Sgirish (void) npi_rxdma_cfg_rdc_disable(handle, channel); 127344961713Sgirish 127444961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 127544961713Sgirish } 127644961713Sgirish 127744961713Sgirish void 127844961713Sgirish nxge_hw_start_rx(p_nxge_t nxgep) 127944961713Sgirish { 128044961713Sgirish NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 128144961713Sgirish 128244961713Sgirish (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 128344961713Sgirish (void) nxge_rx_mac_enable(nxgep); 128444961713Sgirish 128544961713Sgirish NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 128644961713Sgirish } 128744961713Sgirish 128844961713Sgirish /*ARGSUSED*/ 128944961713Sgirish void 129044961713Sgirish nxge_fixup_rxdma_rings(p_nxge_t nxgep) 129144961713Sgirish { 1292678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1293678453a8Sspeer int rdc; 129444961713Sgirish 129544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 129644961713Sgirish 1297678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1298678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1299678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1300678453a8Sspeer "NULL ring pointer(s)")); 130144961713Sgirish return; 130244961713Sgirish } 130344961713Sgirish 1304678453a8Sspeer if (set->owned.map == 0) { 130544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1306678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 130744961713Sgirish return; 130844961713Sgirish } 130944961713Sgirish 1310678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1311678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1312678453a8Sspeer rx_rbr_ring_t *ring = 1313678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1314678453a8Sspeer if (ring) { 1315678453a8Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 1316678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 1317*52ccf843Smisaki "==> nxge_fixup_rxdma_rings: " 1318*52ccf843Smisaki "channel %d ring $%px", 1319*52ccf843Smisaki rdc, ring)); 1320678453a8Sspeer (void) nxge_rxdma_fixup_channel 1321678453a8Sspeer (nxgep, rdc, rdc); 1322678453a8Sspeer } 1323678453a8Sspeer } 132444961713Sgirish } 132544961713Sgirish 132644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 132744961713Sgirish } 132844961713Sgirish 132944961713Sgirish void 133044961713Sgirish nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 133144961713Sgirish { 133244961713Sgirish int i; 133344961713Sgirish 133444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 133544961713Sgirish i = nxge_rxdma_get_ring_index(nxgep, channel); 133644961713Sgirish if (i < 0) { 133744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1338*52ccf843Smisaki "<== nxge_rxdma_fix_channel: no entry found")); 133944961713Sgirish return; 134044961713Sgirish } 134144961713Sgirish 134244961713Sgirish nxge_rxdma_fixup_channel(nxgep, channel, i); 134344961713Sgirish 1344678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 134544961713Sgirish } 134644961713Sgirish 134744961713Sgirish void 134844961713Sgirish nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 134944961713Sgirish { 135044961713Sgirish int ndmas; 135144961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 135244961713Sgirish p_rx_rbr_ring_t *rbr_rings; 135344961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 135444961713Sgirish p_rx_rcr_ring_t *rcr_rings; 135544961713Sgirish p_rx_mbox_areas_t rx_mbox_areas_p; 135644961713Sgirish p_rx_mbox_t *rx_mbox_p; 135744961713Sgirish p_nxge_dma_pool_t dma_buf_poolp; 135844961713Sgirish p_nxge_dma_pool_t dma_cntl_poolp; 135944961713Sgirish p_rx_rbr_ring_t rbrp; 136044961713Sgirish p_rx_rcr_ring_t rcrp; 136144961713Sgirish p_rx_mbox_t mboxp; 136244961713Sgirish p_nxge_dma_common_t dmap; 136344961713Sgirish nxge_status_t status = NXGE_OK; 136444961713Sgirish 136544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 136644961713Sgirish 136744961713Sgirish (void) nxge_rxdma_stop_channel(nxgep, channel); 136844961713Sgirish 136944961713Sgirish dma_buf_poolp = nxgep->rx_buf_pool_p; 137044961713Sgirish dma_cntl_poolp = nxgep->rx_cntl_pool_p; 137144961713Sgirish 137244961713Sgirish if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 137344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1374*52ccf843Smisaki "<== nxge_rxdma_fixup_channel: buf not allocated")); 137544961713Sgirish return; 137644961713Sgirish } 137744961713Sgirish 137844961713Sgirish ndmas = dma_buf_poolp->ndmas; 137944961713Sgirish if (!ndmas) { 138044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1381*52ccf843Smisaki "<== nxge_rxdma_fixup_channel: no dma allocated")); 138244961713Sgirish return; 138344961713Sgirish } 138444961713Sgirish 1385a3c5bd6dSspeer rx_rbr_rings = nxgep->rx_rbr_rings; 138644961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 138744961713Sgirish rbr_rings = rx_rbr_rings->rbr_rings; 138844961713Sgirish rcr_rings = rx_rcr_rings->rcr_rings; 138944961713Sgirish rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 139044961713Sgirish rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 139144961713Sgirish 139244961713Sgirish /* Reinitialize the receive block and completion rings */ 139344961713Sgirish rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1394*52ccf843Smisaki rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1395*52ccf843Smisaki mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 139644961713Sgirish 139744961713Sgirish 139844961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 139944961713Sgirish rbrp->rbr_rd_index = 0; 140044961713Sgirish rcrp->comp_rd_index = 0; 140144961713Sgirish rcrp->comp_wt_index = 0; 140244961713Sgirish 140344961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 140444961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 140544961713Sgirish 140644961713Sgirish status = nxge_rxdma_start_channel(nxgep, channel, 1407*52ccf843Smisaki rbrp, rcrp, mboxp); 140844961713Sgirish if (status != NXGE_OK) { 140944961713Sgirish goto nxge_rxdma_fixup_channel_fail; 141044961713Sgirish } 141144961713Sgirish if (status != NXGE_OK) { 141244961713Sgirish goto nxge_rxdma_fixup_channel_fail; 141344961713Sgirish } 141444961713Sgirish 141544961713Sgirish nxge_rxdma_fixup_channel_fail: 141644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1417*52ccf843Smisaki "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 141844961713Sgirish 141944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 142044961713Sgirish } 142144961713Sgirish 1422678453a8Sspeer /* ARGSUSED */ 142344961713Sgirish int 142444961713Sgirish nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 142544961713Sgirish { 1426678453a8Sspeer return (channel); 142744961713Sgirish } 142844961713Sgirish 142944961713Sgirish p_rx_rbr_ring_t 143044961713Sgirish nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 143144961713Sgirish { 1432678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1433678453a8Sspeer nxge_channel_t rdc; 143444961713Sgirish 143544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1436*52ccf843Smisaki "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 143744961713Sgirish 1438678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1439678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1440678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: " 1441678453a8Sspeer "NULL ring pointer(s)")); 144244961713Sgirish return (NULL); 144344961713Sgirish } 1444678453a8Sspeer 1445678453a8Sspeer if (set->owned.map == 0) { 144644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1447678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 144844961713Sgirish return (NULL); 144944961713Sgirish } 145044961713Sgirish 1451678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1452678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1453678453a8Sspeer rx_rbr_ring_t *ring = 1454678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1455678453a8Sspeer if (ring) { 1456678453a8Sspeer if (channel == ring->rdc) { 1457678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 1458678453a8Sspeer "==> nxge_rxdma_get_rbr_ring: " 1459678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1460678453a8Sspeer return (ring); 1461678453a8Sspeer } 1462678453a8Sspeer } 146344961713Sgirish } 146444961713Sgirish } 146544961713Sgirish 146644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1467*52ccf843Smisaki "<== nxge_rxdma_get_rbr_ring: not found")); 146844961713Sgirish 146944961713Sgirish return (NULL); 147044961713Sgirish } 147144961713Sgirish 147244961713Sgirish p_rx_rcr_ring_t 147344961713Sgirish nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 147444961713Sgirish { 1475678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1476678453a8Sspeer nxge_channel_t rdc; 147744961713Sgirish 147844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1479*52ccf843Smisaki "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 148044961713Sgirish 1481678453a8Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1482678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1483678453a8Sspeer "<== nxge_rxdma_get_rcr_ring: " 1484678453a8Sspeer "NULL ring pointer(s)")); 148544961713Sgirish return (NULL); 148644961713Sgirish } 1487678453a8Sspeer 1488678453a8Sspeer if (set->owned.map == 0) { 148944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1490678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 149144961713Sgirish return (NULL); 149244961713Sgirish } 149344961713Sgirish 1494678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1495678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1496678453a8Sspeer rx_rcr_ring_t *ring = 1497678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 1498678453a8Sspeer if (ring) { 1499678453a8Sspeer if (channel == ring->rdc) { 1500678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 1501678453a8Sspeer "==> nxge_rxdma_get_rcr_ring: " 1502678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1503678453a8Sspeer return (ring); 1504678453a8Sspeer } 1505678453a8Sspeer } 150644961713Sgirish } 150744961713Sgirish } 150844961713Sgirish 150944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1510*52ccf843Smisaki "<== nxge_rxdma_get_rcr_ring: not found")); 151144961713Sgirish 151244961713Sgirish return (NULL); 151344961713Sgirish } 151444961713Sgirish 151544961713Sgirish /* 151644961713Sgirish * Static functions start here. 151744961713Sgirish */ 151844961713Sgirish static p_rx_msg_t 151944961713Sgirish nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 152044961713Sgirish { 152144961713Sgirish p_rx_msg_t nxge_mp = NULL; 152244961713Sgirish p_nxge_dma_common_t dmamsg_p; 152344961713Sgirish uchar_t *buffer; 152444961713Sgirish 152544961713Sgirish nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 152644961713Sgirish if (nxge_mp == NULL) { 152756d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1528*52ccf843Smisaki "Allocation of a rx msg failed.")); 152944961713Sgirish goto nxge_allocb_exit; 153044961713Sgirish } 153144961713Sgirish 153244961713Sgirish nxge_mp->use_buf_pool = B_FALSE; 153344961713Sgirish if (dmabuf_p) { 153444961713Sgirish nxge_mp->use_buf_pool = B_TRUE; 153544961713Sgirish dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 153644961713Sgirish *dmamsg_p = *dmabuf_p; 153744961713Sgirish dmamsg_p->nblocks = 1; 153844961713Sgirish dmamsg_p->block_size = size; 153944961713Sgirish dmamsg_p->alength = size; 154044961713Sgirish buffer = (uchar_t *)dmabuf_p->kaddrp; 154144961713Sgirish 154244961713Sgirish dmabuf_p->kaddrp = (void *) 1543*52ccf843Smisaki ((char *)dmabuf_p->kaddrp + size); 154444961713Sgirish dmabuf_p->ioaddr_pp = (void *) 1545*52ccf843Smisaki ((char *)dmabuf_p->ioaddr_pp + size); 154644961713Sgirish dmabuf_p->alength -= size; 154744961713Sgirish dmabuf_p->offset += size; 154844961713Sgirish dmabuf_p->dma_cookie.dmac_laddress += size; 154944961713Sgirish dmabuf_p->dma_cookie.dmac_size -= size; 155044961713Sgirish 155144961713Sgirish } else { 155244961713Sgirish buffer = KMEM_ALLOC(size, KM_NOSLEEP); 155344961713Sgirish if (buffer == NULL) { 155456d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1555*52ccf843Smisaki "Allocation of a receive page failed.")); 155644961713Sgirish goto nxge_allocb_fail1; 155744961713Sgirish } 155844961713Sgirish } 155944961713Sgirish 156044961713Sgirish nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 156144961713Sgirish if (nxge_mp->rx_mblk_p == NULL) { 156256d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 156344961713Sgirish goto nxge_allocb_fail2; 156444961713Sgirish } 156544961713Sgirish 156644961713Sgirish nxge_mp->buffer = buffer; 156744961713Sgirish nxge_mp->block_size = size; 156844961713Sgirish nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 156944961713Sgirish nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 157044961713Sgirish nxge_mp->ref_cnt = 1; 157144961713Sgirish nxge_mp->free = B_TRUE; 157244961713Sgirish nxge_mp->rx_use_bcopy = B_FALSE; 157344961713Sgirish 157414ea4bb7Ssd atomic_inc_32(&nxge_mblks_pending); 157544961713Sgirish 157644961713Sgirish goto nxge_allocb_exit; 157744961713Sgirish 157844961713Sgirish nxge_allocb_fail2: 157944961713Sgirish if (!nxge_mp->use_buf_pool) { 158044961713Sgirish KMEM_FREE(buffer, size); 158144961713Sgirish } 158244961713Sgirish 158344961713Sgirish nxge_allocb_fail1: 158444961713Sgirish KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 158544961713Sgirish nxge_mp = NULL; 158644961713Sgirish 158744961713Sgirish nxge_allocb_exit: 158844961713Sgirish return (nxge_mp); 158944961713Sgirish } 159044961713Sgirish 159144961713Sgirish p_mblk_t 159244961713Sgirish nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 159344961713Sgirish { 159444961713Sgirish p_mblk_t mp; 159544961713Sgirish 159644961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 159744961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1598*52ccf843Smisaki "offset = 0x%08X " 1599*52ccf843Smisaki "size = 0x%08X", 1600*52ccf843Smisaki nxge_mp, offset, size)); 160144961713Sgirish 160244961713Sgirish mp = desballoc(&nxge_mp->buffer[offset], size, 1603*52ccf843Smisaki 0, &nxge_mp->freeb); 160444961713Sgirish if (mp == NULL) { 160544961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 160644961713Sgirish goto nxge_dupb_exit; 160744961713Sgirish } 160844961713Sgirish atomic_inc_32(&nxge_mp->ref_cnt); 160944961713Sgirish 161044961713Sgirish 161144961713Sgirish nxge_dupb_exit: 161244961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1613*52ccf843Smisaki nxge_mp)); 161444961713Sgirish return (mp); 161544961713Sgirish } 161644961713Sgirish 161744961713Sgirish p_mblk_t 161844961713Sgirish nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 161944961713Sgirish { 162044961713Sgirish p_mblk_t mp; 162144961713Sgirish uchar_t *dp; 162244961713Sgirish 162344961713Sgirish mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 162444961713Sgirish if (mp == NULL) { 162544961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 162644961713Sgirish goto nxge_dupb_bcopy_exit; 162744961713Sgirish } 162844961713Sgirish dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 162944961713Sgirish bcopy((void *)&nxge_mp->buffer[offset], dp, size); 163044961713Sgirish mp->b_wptr = dp + size; 163144961713Sgirish 163244961713Sgirish nxge_dupb_bcopy_exit: 163344961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1634*52ccf843Smisaki nxge_mp)); 163544961713Sgirish return (mp); 163644961713Sgirish } 163744961713Sgirish 163844961713Sgirish void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 163944961713Sgirish p_rx_msg_t rx_msg_p); 164044961713Sgirish 164144961713Sgirish void 164244961713Sgirish nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 164344961713Sgirish { 164444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 164544961713Sgirish 164644961713Sgirish /* Reuse this buffer */ 164744961713Sgirish rx_msg_p->free = B_FALSE; 164844961713Sgirish rx_msg_p->cur_usage_cnt = 0; 164944961713Sgirish rx_msg_p->max_usage_cnt = 0; 165044961713Sgirish rx_msg_p->pkt_buf_size = 0; 165144961713Sgirish 165244961713Sgirish if (rx_rbr_p->rbr_use_bcopy) { 165344961713Sgirish rx_msg_p->rx_use_bcopy = B_FALSE; 165444961713Sgirish atomic_dec_32(&rx_rbr_p->rbr_consumed); 165544961713Sgirish } 165644961713Sgirish 165744961713Sgirish /* 165844961713Sgirish * Get the rbr header pointer and its offset index. 165944961713Sgirish */ 166044961713Sgirish MUTEX_ENTER(&rx_rbr_p->post_lock); 166144961713Sgirish rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1662*52ccf843Smisaki rx_rbr_p->rbr_wrap_mask); 166344961713Sgirish rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 166444961713Sgirish MUTEX_EXIT(&rx_rbr_p->post_lock); 166530ac2e7bSml npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 166630ac2e7bSml rx_rbr_p->rdc, 1); 166744961713Sgirish 166844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1669*52ccf843Smisaki "<== nxge_post_page (channel %d post_next_index %d)", 1670*52ccf843Smisaki rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 167144961713Sgirish 167244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 167344961713Sgirish } 167444961713Sgirish 167544961713Sgirish void 167644961713Sgirish nxge_freeb(p_rx_msg_t rx_msg_p) 167744961713Sgirish { 167844961713Sgirish size_t size; 167944961713Sgirish uchar_t *buffer = NULL; 168044961713Sgirish int ref_cnt; 1681958cea9eSml boolean_t free_state = B_FALSE; 168244961713Sgirish 1683007969e0Stm rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1684007969e0Stm 168544961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 168644961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1687*52ccf843Smisaki "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1688*52ccf843Smisaki rx_msg_p, nxge_mblks_pending)); 168944961713Sgirish 1690958cea9eSml /* 1691958cea9eSml * First we need to get the free state, then 1692958cea9eSml * atomic decrement the reference count to prevent 1693958cea9eSml * the race condition with the interrupt thread that 1694958cea9eSml * is processing a loaned up buffer block. 1695958cea9eSml */ 1696958cea9eSml free_state = rx_msg_p->free; 1697958cea9eSml ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 169844961713Sgirish if (!ref_cnt) { 169930ac2e7bSml atomic_dec_32(&nxge_mblks_pending); 170044961713Sgirish buffer = rx_msg_p->buffer; 170144961713Sgirish size = rx_msg_p->block_size; 170244961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1703*52ccf843Smisaki "will free: rx_msg_p = $%p (block pending %d)", 1704*52ccf843Smisaki rx_msg_p, nxge_mblks_pending)); 170544961713Sgirish 170644961713Sgirish if (!rx_msg_p->use_buf_pool) { 170744961713Sgirish KMEM_FREE(buffer, size); 170844961713Sgirish } 170914ea4bb7Ssd 171014ea4bb7Ssd KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1711007969e0Stm 17123e82a89eSmisaki if (ring) { 17133e82a89eSmisaki /* 17143e82a89eSmisaki * Decrement the receive buffer ring's reference 17153e82a89eSmisaki * count, too. 17163e82a89eSmisaki */ 17173e82a89eSmisaki atomic_dec_32(&ring->rbr_ref_cnt); 1718007969e0Stm 17193e82a89eSmisaki /* 1720678453a8Sspeer * Free the receive buffer ring, if 17213e82a89eSmisaki * 1. all the receive buffers have been freed 17223e82a89eSmisaki * 2. and we are in the proper state (that is, 17233e82a89eSmisaki * we are not UNMAPPING). 17243e82a89eSmisaki */ 17253e82a89eSmisaki if (ring->rbr_ref_cnt == 0 && 17263e82a89eSmisaki ring->rbr_state == RBR_UNMAPPED) { 1727678453a8Sspeer /* 1728678453a8Sspeer * Free receive data buffers, 1729678453a8Sspeer * buffer index information 1730678453a8Sspeer * (rxring_info) and 1731678453a8Sspeer * the message block ring. 1732678453a8Sspeer */ 1733678453a8Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 1734678453a8Sspeer "nxge_freeb:rx_msg_p = $%p " 1735678453a8Sspeer "(block pending %d) free buffers", 1736678453a8Sspeer rx_msg_p, nxge_mblks_pending)); 1737678453a8Sspeer nxge_rxdma_databuf_free(ring); 1738678453a8Sspeer if (ring->ring_info) { 1739678453a8Sspeer KMEM_FREE(ring->ring_info, 1740678453a8Sspeer sizeof (rxring_info_t)); 1741678453a8Sspeer } 1742678453a8Sspeer 1743678453a8Sspeer if (ring->rx_msg_ring) { 1744678453a8Sspeer KMEM_FREE(ring->rx_msg_ring, 1745678453a8Sspeer ring->tnblocks * 1746678453a8Sspeer sizeof (p_rx_msg_t)); 1747678453a8Sspeer } 17483e82a89eSmisaki KMEM_FREE(ring, sizeof (*ring)); 17493e82a89eSmisaki } 1750007969e0Stm } 175114ea4bb7Ssd return; 175244961713Sgirish } 175344961713Sgirish 175444961713Sgirish /* 175544961713Sgirish * Repost buffer. 175644961713Sgirish */ 17573e82a89eSmisaki if (free_state && (ref_cnt == 1) && ring) { 175844961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, 175944961713Sgirish "nxge_freeb: post page $%p:", rx_msg_p)); 1760007969e0Stm if (ring->rbr_state == RBR_POSTING) 1761007969e0Stm nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 176244961713Sgirish } 176344961713Sgirish 176444961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 176544961713Sgirish } 176644961713Sgirish 176744961713Sgirish uint_t 176844961713Sgirish nxge_rx_intr(void *arg1, void *arg2) 176944961713Sgirish { 177044961713Sgirish p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 177144961713Sgirish p_nxge_t nxgep = (p_nxge_t)arg2; 177244961713Sgirish p_nxge_ldg_t ldgp; 177344961713Sgirish uint8_t channel; 177444961713Sgirish npi_handle_t handle; 177544961713Sgirish rx_dma_ctl_stat_t cs; 177644961713Sgirish 177744961713Sgirish #ifdef NXGE_DEBUG 177844961713Sgirish rxdma_cfig1_t cfg; 177944961713Sgirish #endif 178044961713Sgirish uint_t serviced = DDI_INTR_UNCLAIMED; 178144961713Sgirish 178244961713Sgirish if (ldvp == NULL) { 178344961713Sgirish NXGE_DEBUG_MSG((NULL, INT_CTL, 1784*52ccf843Smisaki "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1785*52ccf843Smisaki nxgep, ldvp)); 178644961713Sgirish 178744961713Sgirish return (DDI_INTR_CLAIMED); 178844961713Sgirish } 178944961713Sgirish 179044961713Sgirish if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 179144961713Sgirish nxgep = ldvp->nxgep; 179244961713Sgirish } 17931d36aa9eSspeer 17941d36aa9eSspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 17951d36aa9eSspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 17961d36aa9eSspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 17971d36aa9eSspeer "<== nxge_rx_intr: interface not started or intialized")); 17981d36aa9eSspeer return (DDI_INTR_CLAIMED); 17991d36aa9eSspeer } 18001d36aa9eSspeer 180144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1802*52ccf843Smisaki "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1803*52ccf843Smisaki nxgep, ldvp)); 180444961713Sgirish 180544961713Sgirish /* 180644961713Sgirish * This interrupt handler is for a specific 180744961713Sgirish * receive dma channel. 180844961713Sgirish */ 180944961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 181044961713Sgirish /* 181144961713Sgirish * Get the control and status for this channel. 181244961713Sgirish */ 181344961713Sgirish channel = ldvp->channel; 181444961713Sgirish ldgp = ldvp->ldgp; 181544961713Sgirish RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 181644961713Sgirish 181744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1818*52ccf843Smisaki "cs 0x%016llx rcrto 0x%x rcrthres %x", 1819*52ccf843Smisaki channel, 1820*52ccf843Smisaki cs.value, 1821*52ccf843Smisaki cs.bits.hdw.rcrto, 1822*52ccf843Smisaki cs.bits.hdw.rcrthres)); 182344961713Sgirish 1824678453a8Sspeer nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 182544961713Sgirish serviced = DDI_INTR_CLAIMED; 182644961713Sgirish 182744961713Sgirish /* error events. */ 182844961713Sgirish if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1829678453a8Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 183044961713Sgirish } 183144961713Sgirish 183244961713Sgirish nxge_intr_exit: 183344961713Sgirish /* 183444961713Sgirish * Enable the mailbox update interrupt if we want 183544961713Sgirish * to use mailbox. We probably don't need to use 183644961713Sgirish * mailbox as it only saves us one pio read. 183744961713Sgirish * Also write 1 to rcrthres and rcrto to clear 183844961713Sgirish * these two edge triggered bits. 183944961713Sgirish */ 184044961713Sgirish 184144961713Sgirish cs.value &= RX_DMA_CTL_STAT_WR1C; 184244961713Sgirish cs.bits.hdw.mex = 1; 184344961713Sgirish RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1844*52ccf843Smisaki cs.value); 184544961713Sgirish 184644961713Sgirish /* 184744961713Sgirish * Rearm this logical group if this is a single device 184844961713Sgirish * group. 184944961713Sgirish */ 185044961713Sgirish if (ldgp->nldvs == 1) { 185144961713Sgirish ldgimgm_t mgm; 185244961713Sgirish mgm.value = 0; 185344961713Sgirish mgm.bits.ldw.arm = 1; 185444961713Sgirish mgm.bits.ldw.timer = ldgp->ldg_timer; 1855678453a8Sspeer if (isLDOMguest(nxgep)) { 1856678453a8Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 1857678453a8Sspeer } else { 1858678453a8Sspeer NXGE_REG_WR64(handle, 185944961713Sgirish LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 186044961713Sgirish mgm.value); 1861678453a8Sspeer } 186244961713Sgirish } 186344961713Sgirish 186444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1865*52ccf843Smisaki serviced)); 186644961713Sgirish return (serviced); 186744961713Sgirish } 186844961713Sgirish 186944961713Sgirish /* 187044961713Sgirish * Process the packets received in the specified logical device 187144961713Sgirish * and pass up a chain of message blocks to the upper layer. 187244961713Sgirish */ 187344961713Sgirish static void 1874678453a8Sspeer nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 187544961713Sgirish { 187644961713Sgirish p_mblk_t mp; 187744961713Sgirish p_rx_rcr_ring_t rcrp; 187844961713Sgirish 187944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1880678453a8Sspeer rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 1881678453a8Sspeer if (rcrp->poll_flag) { 1882678453a8Sspeer /* It is in the poll mode */ 1883678453a8Sspeer return; 1884678453a8Sspeer } 1885678453a8Sspeer 1886678453a8Sspeer if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 188744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1888*52ccf843Smisaki "<== nxge_rx_pkts_vring: no mp")); 188944961713Sgirish return; 189044961713Sgirish } 189144961713Sgirish 189244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1893*52ccf843Smisaki mp)); 189444961713Sgirish 189544961713Sgirish #ifdef NXGE_DEBUG 189644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1897*52ccf843Smisaki "==> nxge_rx_pkts_vring:calling mac_rx " 1898*52ccf843Smisaki "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1899*52ccf843Smisaki "mac_handle $%p", 1900*52ccf843Smisaki mp->b_wptr - mp->b_rptr, 1901*52ccf843Smisaki mp, mp->b_cont, mp->b_next, 1902*52ccf843Smisaki rcrp, rcrp->rcr_mac_handle)); 190344961713Sgirish 190444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1905*52ccf843Smisaki "==> nxge_rx_pkts_vring: dump packets " 1906*52ccf843Smisaki "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1907*52ccf843Smisaki mp, 1908*52ccf843Smisaki mp->b_rptr, 1909*52ccf843Smisaki mp->b_wptr, 1910*52ccf843Smisaki nxge_dump_packet((char *)mp->b_rptr, 1911*52ccf843Smisaki mp->b_wptr - mp->b_rptr))); 191214ea4bb7Ssd if (mp->b_cont) { 191314ea4bb7Ssd NXGE_DEBUG_MSG((nxgep, RX_CTL, 1914*52ccf843Smisaki "==> nxge_rx_pkts_vring: dump b_cont packets " 1915*52ccf843Smisaki "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1916*52ccf843Smisaki mp->b_cont, 1917*52ccf843Smisaki mp->b_cont->b_rptr, 1918*52ccf843Smisaki mp->b_cont->b_wptr, 1919*52ccf843Smisaki nxge_dump_packet((char *)mp->b_cont->b_rptr, 1920*52ccf843Smisaki mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 192114ea4bb7Ssd } 192244961713Sgirish if (mp->b_next) { 192344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1924*52ccf843Smisaki "==> nxge_rx_pkts_vring: dump next packets " 1925*52ccf843Smisaki "(b_rptr $%p): %s", 1926*52ccf843Smisaki mp->b_next->b_rptr, 1927*52ccf843Smisaki nxge_dump_packet((char *)mp->b_next->b_rptr, 1928*52ccf843Smisaki mp->b_next->b_wptr - mp->b_next->b_rptr))); 192944961713Sgirish } 193044961713Sgirish #endif 193144961713Sgirish 1932678453a8Sspeer if (!isLDOMguest(nxgep)) 1933678453a8Sspeer mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1934678453a8Sspeer #if defined(sun4v) 1935678453a8Sspeer else { /* isLDOMguest(nxgep) */ 1936678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1937678453a8Sspeer nxgep->nxge_hw_p->hio; 1938678453a8Sspeer nx_vio_fp_t *vio = &nhd->hio.vio; 1939678453a8Sspeer 1940678453a8Sspeer if (vio->cb.vio_net_rx_cb) { 1941678453a8Sspeer (*vio->cb.vio_net_rx_cb) 1942678453a8Sspeer (nxgep->hio_vr->vhp, mp); 1943678453a8Sspeer } 1944678453a8Sspeer } 1945678453a8Sspeer #endif 194644961713Sgirish } 194744961713Sgirish 194844961713Sgirish 194944961713Sgirish /* 195044961713Sgirish * This routine is the main packet receive processing function. 195144961713Sgirish * It gets the packet type, error code, and buffer related 195244961713Sgirish * information from the receive completion entry. 195344961713Sgirish * How many completion entries to process is based on the number of packets 195444961713Sgirish * queued by the hardware, a hardware maintained tail pointer 195544961713Sgirish * and a configurable receive packet count. 195644961713Sgirish * 195744961713Sgirish * A chain of message blocks will be created as result of processing 195844961713Sgirish * the completion entries. This chain of message blocks will be returned and 195944961713Sgirish * a hardware control status register will be updated with the number of 196044961713Sgirish * packets were removed from the hardware queue. 196144961713Sgirish * 196244961713Sgirish */ 1963678453a8Sspeer static mblk_t * 1964678453a8Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1965678453a8Sspeer int bytes_to_pickup) 196644961713Sgirish { 196744961713Sgirish npi_handle_t handle; 196844961713Sgirish uint8_t channel; 196944961713Sgirish uint32_t comp_rd_index; 197044961713Sgirish p_rcr_entry_t rcr_desc_rd_head_p; 197144961713Sgirish p_rcr_entry_t rcr_desc_rd_head_pp; 197244961713Sgirish p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 197344961713Sgirish uint16_t qlen, nrcr_read, npkt_read; 1974678453a8Sspeer uint32_t qlen_hw; 197544961713Sgirish boolean_t multi; 1976678453a8Sspeer rcrcfig_b_t rcr_cfg_b; 1977678453a8Sspeer int totallen = 0; 1978a3c5bd6dSspeer #if defined(_BIG_ENDIAN) 197944961713Sgirish npi_status_t rs = NPI_SUCCESS; 198044961713Sgirish #endif 198144961713Sgirish 1982678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 1983*52ccf843Smisaki "channel %d", rcr_p->rdc)); 198444961713Sgirish 198544961713Sgirish if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 198644961713Sgirish return (NULL); 198744961713Sgirish } 198844961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 198944961713Sgirish channel = rcr_p->rdc; 199044961713Sgirish 199144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1992*52ccf843Smisaki "==> nxge_rx_pkts: START: rcr channel %d " 1993*52ccf843Smisaki "head_p $%p head_pp $%p index %d ", 1994*52ccf843Smisaki channel, rcr_p->rcr_desc_rd_head_p, 1995*52ccf843Smisaki rcr_p->rcr_desc_rd_head_pp, 1996*52ccf843Smisaki rcr_p->comp_rd_index)); 199744961713Sgirish 199844961713Sgirish 1999a3c5bd6dSspeer #if !defined(_BIG_ENDIAN) 200044961713Sgirish qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 200144961713Sgirish #else 200244961713Sgirish rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 200344961713Sgirish if (rs != NPI_SUCCESS) { 2004678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 200544961713Sgirish "channel %d, get qlen failed 0x%08x", 2006*52ccf843Smisaki channel, rs)); 200744961713Sgirish return (NULL); 200844961713Sgirish } 200944961713Sgirish #endif 201044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2011*52ccf843Smisaki "qlen %d", channel, qlen)); 201244961713Sgirish 201344961713Sgirish 201444961713Sgirish 201544961713Sgirish if (!qlen) { 201644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2017*52ccf843Smisaki "==> nxge_rx_pkts:rcr channel %d " 2018*52ccf843Smisaki "qlen %d (no pkts)", channel, qlen)); 201944961713Sgirish 202044961713Sgirish return (NULL); 202144961713Sgirish } 202244961713Sgirish 202344961713Sgirish comp_rd_index = rcr_p->comp_rd_index; 202444961713Sgirish 202544961713Sgirish rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 202644961713Sgirish rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 202744961713Sgirish nrcr_read = npkt_read = 0; 202844961713Sgirish 202944961713Sgirish /* 203044961713Sgirish * Number of packets queued 203144961713Sgirish * (The jumbo or multi packet will be counted as only one 203244961713Sgirish * packets and it may take up more than one completion entry). 203344961713Sgirish */ 203444961713Sgirish qlen_hw = (qlen < nxge_max_rx_pkts) ? 2035*52ccf843Smisaki qlen : nxge_max_rx_pkts; 203644961713Sgirish head_mp = NULL; 203744961713Sgirish tail_mp = &head_mp; 203844961713Sgirish nmp = mp_cont = NULL; 203944961713Sgirish multi = B_FALSE; 204044961713Sgirish 2041a3c5bd6dSspeer while (qlen_hw) { 204244961713Sgirish 204344961713Sgirish #ifdef NXGE_DEBUG 204444961713Sgirish nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 204544961713Sgirish #endif 204644961713Sgirish /* 204744961713Sgirish * Process one completion ring entry. 204844961713Sgirish */ 204944961713Sgirish nxge_receive_packet(nxgep, 2050*52ccf843Smisaki rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 205144961713Sgirish 205244961713Sgirish /* 205344961713Sgirish * message chaining modes 205444961713Sgirish */ 205514ea4bb7Ssd if (nmp) { 205644961713Sgirish nmp->b_next = NULL; 205714ea4bb7Ssd if (!multi && !mp_cont) { /* frame fits a partition */ 205814ea4bb7Ssd *tail_mp = nmp; 205914ea4bb7Ssd tail_mp = &nmp->b_next; 2060678453a8Sspeer totallen += MBLKL(nmp); 206114ea4bb7Ssd nmp = NULL; 206214ea4bb7Ssd } else if (multi && !mp_cont) { /* first segment */ 206314ea4bb7Ssd *tail_mp = nmp; 206414ea4bb7Ssd tail_mp = &nmp->b_cont; 2065678453a8Sspeer totallen += MBLKL(nmp); 206614ea4bb7Ssd } else if (multi && mp_cont) { /* mid of multi segs */ 206714ea4bb7Ssd *tail_mp = mp_cont; 206814ea4bb7Ssd tail_mp = &mp_cont->b_cont; 2069678453a8Sspeer totallen += MBLKL(mp_cont); 207014ea4bb7Ssd } else if (!multi && mp_cont) { /* last segment */ 2071a3c5bd6dSspeer *tail_mp = mp_cont; 207214ea4bb7Ssd tail_mp = &nmp->b_next; 2073678453a8Sspeer totallen += MBLKL(mp_cont); 207414ea4bb7Ssd nmp = NULL; 207514ea4bb7Ssd } 207644961713Sgirish } 207744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2078*52ccf843Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 2079*52ccf843Smisaki "before updating: multi %d " 2080*52ccf843Smisaki "nrcr_read %d " 2081*52ccf843Smisaki "npk read %d " 2082*52ccf843Smisaki "head_pp $%p index %d ", 2083*52ccf843Smisaki channel, 2084*52ccf843Smisaki multi, 2085*52ccf843Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2086*52ccf843Smisaki comp_rd_index)); 208744961713Sgirish 208844961713Sgirish if (!multi) { 208944961713Sgirish qlen_hw--; 209044961713Sgirish npkt_read++; 209144961713Sgirish } 209244961713Sgirish 209344961713Sgirish /* 209444961713Sgirish * Update the next read entry. 209544961713Sgirish */ 209644961713Sgirish comp_rd_index = NEXT_ENTRY(comp_rd_index, 2097*52ccf843Smisaki rcr_p->comp_wrap_mask); 209844961713Sgirish 209944961713Sgirish rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2100*52ccf843Smisaki rcr_p->rcr_desc_first_p, 2101*52ccf843Smisaki rcr_p->rcr_desc_last_p); 210244961713Sgirish 210344961713Sgirish nrcr_read++; 210444961713Sgirish 210544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2106*52ccf843Smisaki "<== nxge_rx_pkts: (SAM, process one packet) " 2107*52ccf843Smisaki "nrcr_read %d", 2108*52ccf843Smisaki nrcr_read)); 210944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2110*52ccf843Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 2111*52ccf843Smisaki "multi %d " 2112*52ccf843Smisaki "nrcr_read %d " 2113*52ccf843Smisaki "npk read %d " 2114*52ccf843Smisaki "head_pp $%p index %d ", 2115*52ccf843Smisaki channel, 2116*52ccf843Smisaki multi, 2117*52ccf843Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2118*52ccf843Smisaki comp_rd_index)); 211944961713Sgirish 2120678453a8Sspeer if ((bytes_to_pickup != -1) && 2121678453a8Sspeer (totallen >= bytes_to_pickup)) { 2122678453a8Sspeer break; 2123678453a8Sspeer } 212444961713Sgirish } 212544961713Sgirish 212644961713Sgirish rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 212744961713Sgirish rcr_p->comp_rd_index = comp_rd_index; 212844961713Sgirish rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 212944961713Sgirish 213014ea4bb7Ssd if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2131*52ccf843Smisaki (nxgep->intr_threshold != rcr_p->intr_threshold)) { 213214ea4bb7Ssd rcr_p->intr_timeout = nxgep->intr_timeout; 213314ea4bb7Ssd rcr_p->intr_threshold = nxgep->intr_threshold; 213414ea4bb7Ssd rcr_cfg_b.value = 0x0ULL; 213514ea4bb7Ssd if (rcr_p->intr_timeout) 213614ea4bb7Ssd rcr_cfg_b.bits.ldw.entout = 1; 213714ea4bb7Ssd rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 213814ea4bb7Ssd rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 213914ea4bb7Ssd RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2140*52ccf843Smisaki channel, rcr_cfg_b.value); 214114ea4bb7Ssd } 214244961713Sgirish 214344961713Sgirish cs.bits.ldw.pktread = npkt_read; 214444961713Sgirish cs.bits.ldw.ptrread = nrcr_read; 214544961713Sgirish RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2146*52ccf843Smisaki channel, cs.value); 214744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2148*52ccf843Smisaki "==> nxge_rx_pkts: EXIT: rcr channel %d " 2149*52ccf843Smisaki "head_pp $%p index %016llx ", 2150*52ccf843Smisaki channel, 2151*52ccf843Smisaki rcr_p->rcr_desc_rd_head_pp, 2152*52ccf843Smisaki rcr_p->comp_rd_index)); 215344961713Sgirish /* 215444961713Sgirish * Update RCR buffer pointer read and number of packets 215544961713Sgirish * read. 215644961713Sgirish */ 215744961713Sgirish 215844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 215944961713Sgirish return (head_mp); 216044961713Sgirish } 216144961713Sgirish 216244961713Sgirish void 216344961713Sgirish nxge_receive_packet(p_nxge_t nxgep, 216444961713Sgirish p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 216544961713Sgirish boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 216644961713Sgirish { 216744961713Sgirish p_mblk_t nmp = NULL; 216844961713Sgirish uint64_t multi; 216944961713Sgirish uint64_t dcf_err; 217044961713Sgirish uint8_t channel; 217144961713Sgirish 217244961713Sgirish boolean_t first_entry = B_TRUE; 217344961713Sgirish boolean_t is_tcp_udp = B_FALSE; 217444961713Sgirish boolean_t buffer_free = B_FALSE; 217544961713Sgirish boolean_t error_send_up = B_FALSE; 217644961713Sgirish uint8_t error_type; 217744961713Sgirish uint16_t l2_len; 217844961713Sgirish uint16_t skip_len; 217944961713Sgirish uint8_t pktbufsz_type; 218044961713Sgirish uint64_t rcr_entry; 218144961713Sgirish uint64_t *pkt_buf_addr_pp; 218244961713Sgirish uint64_t *pkt_buf_addr_p; 218344961713Sgirish uint32_t buf_offset; 218444961713Sgirish uint32_t bsize; 218544961713Sgirish uint32_t error_disp_cnt; 218644961713Sgirish uint32_t msg_index; 218744961713Sgirish p_rx_rbr_ring_t rx_rbr_p; 218844961713Sgirish p_rx_msg_t *rx_msg_ring_p; 218944961713Sgirish p_rx_msg_t rx_msg_p; 219044961713Sgirish uint16_t sw_offset_bytes = 0, hdr_size = 0; 219144961713Sgirish nxge_status_t status = NXGE_OK; 219244961713Sgirish boolean_t is_valid = B_FALSE; 219344961713Sgirish p_nxge_rx_ring_stats_t rdc_stats; 2194a3c5bd6dSspeer uint32_t bytes_read; 2195a3c5bd6dSspeer uint64_t pkt_type; 2196a3c5bd6dSspeer uint64_t frag; 21974202ea4bSsbehera boolean_t pkt_too_long_err = B_FALSE; 219844961713Sgirish #ifdef NXGE_DEBUG 219944961713Sgirish int dump_len; 220044961713Sgirish #endif 220144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 220244961713Sgirish first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 220344961713Sgirish 220444961713Sgirish rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 220544961713Sgirish 220644961713Sgirish multi = (rcr_entry & RCR_MULTI_MASK); 220744961713Sgirish dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 220844961713Sgirish pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 220944961713Sgirish 221044961713Sgirish error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 221144961713Sgirish frag = (rcr_entry & RCR_FRAG_MASK); 221244961713Sgirish 221344961713Sgirish l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 221444961713Sgirish 221544961713Sgirish pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2216*52ccf843Smisaki RCR_PKTBUFSZ_SHIFT); 2217adfcba55Sjoycey #if defined(__i386) 2218adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2219*52ccf843Smisaki RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2220adfcba55Sjoycey #else 222144961713Sgirish pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2222*52ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT); 2223adfcba55Sjoycey #endif 222444961713Sgirish 222544961713Sgirish channel = rcr_p->rdc; 222644961713Sgirish 222744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2228*52ccf843Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2229*52ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2230*52ccf843Smisaki "error_type 0x%x pkt_type 0x%x " 2231*52ccf843Smisaki "pktbufsz_type %d ", 2232*52ccf843Smisaki rcr_desc_rd_head_p, 2233*52ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 2234*52ccf843Smisaki multi, 2235*52ccf843Smisaki error_type, 2236*52ccf843Smisaki pkt_type, 2237*52ccf843Smisaki pktbufsz_type)); 223844961713Sgirish 223944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2240*52ccf843Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2241*52ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2242*52ccf843Smisaki "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2243*52ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 2244*52ccf843Smisaki multi, 2245*52ccf843Smisaki error_type, 2246*52ccf843Smisaki pkt_type)); 224744961713Sgirish 224844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2249*52ccf843Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2250*52ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 2251*52ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 225244961713Sgirish 225344961713Sgirish /* get the stats ptr */ 225444961713Sgirish rdc_stats = rcr_p->rdc_stats; 225544961713Sgirish 225644961713Sgirish if (!l2_len) { 225744961713Sgirish 225844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2259*52ccf843Smisaki "<== nxge_receive_packet: failed: l2 length is 0.")); 226044961713Sgirish return; 226144961713Sgirish } 226244961713Sgirish 22634202ea4bSsbehera /* 22644202ea4bSsbehera * Sofware workaround for BMAC hardware limitation that allows 22654202ea4bSsbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 22664202ea4bSsbehera * instead of 0x2400 for jumbo. 22674202ea4bSsbehera */ 22684202ea4bSsbehera if (l2_len > nxgep->mac.maxframesize) { 22694202ea4bSsbehera pkt_too_long_err = B_TRUE; 22704202ea4bSsbehera } 22714202ea4bSsbehera 227256d930aeSspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 227356d930aeSspeer l2_len -= ETHERFCSL; 227456d930aeSspeer 227544961713Sgirish /* shift 6 bits to get the full io address */ 2276adfcba55Sjoycey #if defined(__i386) 2277adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2278*52ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 2279adfcba55Sjoycey #else 228044961713Sgirish pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2281*52ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 2282adfcba55Sjoycey #endif 228344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2284*52ccf843Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2285*52ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 2286*52ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 228744961713Sgirish 228844961713Sgirish rx_rbr_p = rcr_p->rx_rbr_p; 228944961713Sgirish rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 229044961713Sgirish 229144961713Sgirish if (first_entry) { 229244961713Sgirish hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2293*52ccf843Smisaki RXDMA_HDR_SIZE_DEFAULT); 229444961713Sgirish 229544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2296*52ccf843Smisaki "==> nxge_receive_packet: first entry 0x%016llx " 2297*52ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2298*52ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 2299*52ccf843Smisaki hdr_size)); 230044961713Sgirish } 230144961713Sgirish 230244961713Sgirish MUTEX_ENTER(&rcr_p->lock); 230344961713Sgirish MUTEX_ENTER(&rx_rbr_p->lock); 230444961713Sgirish 230544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2306*52ccf843Smisaki "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2307*52ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 2308*52ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 230944961713Sgirish 231044961713Sgirish /* 231144961713Sgirish * Packet buffer address in the completion entry points 231244961713Sgirish * to the starting buffer address (offset 0). 231344961713Sgirish * Use the starting buffer address to locate the corresponding 231444961713Sgirish * kernel address. 231544961713Sgirish */ 231644961713Sgirish status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2317*52ccf843Smisaki pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2318*52ccf843Smisaki &buf_offset, 2319*52ccf843Smisaki &msg_index); 232044961713Sgirish 232144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2322*52ccf843Smisaki "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2323*52ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 2324*52ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 232544961713Sgirish 232644961713Sgirish if (status != NXGE_OK) { 232744961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 232844961713Sgirish MUTEX_EXIT(&rcr_p->lock); 232944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2330*52ccf843Smisaki "<== nxge_receive_packet: found vaddr failed %d", 2331*52ccf843Smisaki status)); 233244961713Sgirish return; 233344961713Sgirish } 233444961713Sgirish 233544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2336*52ccf843Smisaki "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2337*52ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 2338*52ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 233944961713Sgirish 234044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2341*52ccf843Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2342*52ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 2343*52ccf843Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 234444961713Sgirish 234544961713Sgirish rx_msg_p = rx_msg_ring_p[msg_index]; 234644961713Sgirish 234744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2348*52ccf843Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2349*52ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 2350*52ccf843Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 235144961713Sgirish 235244961713Sgirish switch (pktbufsz_type) { 235344961713Sgirish case RCR_PKTBUFSZ_0: 235444961713Sgirish bsize = rx_rbr_p->pkt_buf_size0_bytes; 235544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2356*52ccf843Smisaki "==> nxge_receive_packet: 0 buf %d", bsize)); 235744961713Sgirish break; 235844961713Sgirish case RCR_PKTBUFSZ_1: 235944961713Sgirish bsize = rx_rbr_p->pkt_buf_size1_bytes; 236044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2361*52ccf843Smisaki "==> nxge_receive_packet: 1 buf %d", bsize)); 236244961713Sgirish break; 236344961713Sgirish case RCR_PKTBUFSZ_2: 236444961713Sgirish bsize = rx_rbr_p->pkt_buf_size2_bytes; 236544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2366*52ccf843Smisaki "==> nxge_receive_packet: 2 buf %d", bsize)); 236744961713Sgirish break; 236844961713Sgirish case RCR_SINGLE_BLOCK: 236944961713Sgirish bsize = rx_msg_p->block_size; 237044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2371*52ccf843Smisaki "==> nxge_receive_packet: single %d", bsize)); 237244961713Sgirish 237344961713Sgirish break; 237444961713Sgirish default: 237544961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 237644961713Sgirish MUTEX_EXIT(&rcr_p->lock); 237744961713Sgirish return; 237844961713Sgirish } 237944961713Sgirish 238044961713Sgirish DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2381*52ccf843Smisaki (buf_offset + sw_offset_bytes), 2382*52ccf843Smisaki (hdr_size + l2_len), 2383*52ccf843Smisaki DDI_DMA_SYNC_FORCPU); 238444961713Sgirish 238544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2386*52ccf843Smisaki "==> nxge_receive_packet: after first dump:usage count")); 238744961713Sgirish 238844961713Sgirish if (rx_msg_p->cur_usage_cnt == 0) { 238944961713Sgirish if (rx_rbr_p->rbr_use_bcopy) { 239044961713Sgirish atomic_inc_32(&rx_rbr_p->rbr_consumed); 239144961713Sgirish if (rx_rbr_p->rbr_consumed < 2392*52ccf843Smisaki rx_rbr_p->rbr_threshold_hi) { 239344961713Sgirish if (rx_rbr_p->rbr_threshold_lo == 0 || 2394*52ccf843Smisaki ((rx_rbr_p->rbr_consumed >= 2395*52ccf843Smisaki rx_rbr_p->rbr_threshold_lo) && 2396*52ccf843Smisaki (rx_rbr_p->rbr_bufsize_type >= 2397*52ccf843Smisaki pktbufsz_type))) { 239844961713Sgirish rx_msg_p->rx_use_bcopy = B_TRUE; 239944961713Sgirish } 240044961713Sgirish } else { 240144961713Sgirish rx_msg_p->rx_use_bcopy = B_TRUE; 240244961713Sgirish } 240344961713Sgirish } 240444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2405*52ccf843Smisaki "==> nxge_receive_packet: buf %d (new block) ", 2406*52ccf843Smisaki bsize)); 240744961713Sgirish 240844961713Sgirish rx_msg_p->pkt_buf_size_code = pktbufsz_type; 240944961713Sgirish rx_msg_p->pkt_buf_size = bsize; 241044961713Sgirish rx_msg_p->cur_usage_cnt = 1; 241144961713Sgirish if (pktbufsz_type == RCR_SINGLE_BLOCK) { 241244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2413*52ccf843Smisaki "==> nxge_receive_packet: buf %d " 2414*52ccf843Smisaki "(single block) ", 2415*52ccf843Smisaki bsize)); 241644961713Sgirish /* 241744961713Sgirish * Buffer can be reused once the free function 241844961713Sgirish * is called. 241944961713Sgirish */ 242044961713Sgirish rx_msg_p->max_usage_cnt = 1; 242144961713Sgirish buffer_free = B_TRUE; 242244961713Sgirish } else { 242344961713Sgirish rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 242444961713Sgirish if (rx_msg_p->max_usage_cnt == 1) { 242544961713Sgirish buffer_free = B_TRUE; 242644961713Sgirish } 242744961713Sgirish } 242844961713Sgirish } else { 242944961713Sgirish rx_msg_p->cur_usage_cnt++; 243044961713Sgirish if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 243144961713Sgirish buffer_free = B_TRUE; 243244961713Sgirish } 243344961713Sgirish } 243444961713Sgirish 243544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 243644961713Sgirish "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2437*52ccf843Smisaki msg_index, l2_len, 2438*52ccf843Smisaki rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 243944961713Sgirish 24404202ea4bSsbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 244144961713Sgirish rdc_stats->ierrors++; 244244961713Sgirish if (dcf_err) { 244344961713Sgirish rdc_stats->dcf_err++; 244444961713Sgirish #ifdef NXGE_DEBUG 244544961713Sgirish if (!rdc_stats->dcf_err) { 244644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 244744961713Sgirish "nxge_receive_packet: channel %d dcf_err rcr" 244844961713Sgirish " 0x%llx", channel, rcr_entry)); 244944961713Sgirish } 245044961713Sgirish #endif 245144961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2452*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_DCF_ERR); 24534202ea4bSsbehera } else if (pkt_too_long_err) { 24544202ea4bSsbehera rdc_stats->pkt_too_long_err++; 24554202ea4bSsbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 24564202ea4bSsbehera " channel %d packet length [%d] > " 24574202ea4bSsbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 24584202ea4bSsbehera nxgep->mac.maxframesize)); 245944961713Sgirish } else { 246044961713Sgirish /* Update error stats */ 246144961713Sgirish error_disp_cnt = NXGE_ERROR_SHOW_MAX; 246244961713Sgirish rdc_stats->errlog.compl_err_type = error_type; 246344961713Sgirish 246444961713Sgirish switch (error_type) { 2465f6485eecSyc /* 2466f6485eecSyc * Do not send FMA ereport for RCR_L2_ERROR and 2467f6485eecSyc * RCR_L4_CSUM_ERROR because most likely they indicate 2468f6485eecSyc * back pressure rather than HW failures. 2469f6485eecSyc */ 247053f3d8ecSyc case RCR_L2_ERROR: 247153f3d8ecSyc rdc_stats->l2_err++; 247253f3d8ecSyc if (rdc_stats->l2_err < 247353f3d8ecSyc error_disp_cnt) { 247444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 247553f3d8ecSyc " nxge_receive_packet:" 247653f3d8ecSyc " channel %d RCR L2_ERROR", 247753f3d8ecSyc channel)); 247853f3d8ecSyc } 247953f3d8ecSyc break; 248053f3d8ecSyc case RCR_L4_CSUM_ERROR: 248153f3d8ecSyc error_send_up = B_TRUE; 248253f3d8ecSyc rdc_stats->l4_cksum_err++; 248353f3d8ecSyc if (rdc_stats->l4_cksum_err < 248453f3d8ecSyc error_disp_cnt) { 248553f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 248653f3d8ecSyc " nxge_receive_packet:" 248753f3d8ecSyc " channel %d" 248853f3d8ecSyc " RCR L4_CSUM_ERROR", channel)); 248953f3d8ecSyc } 249053f3d8ecSyc break; 2491f6485eecSyc /* 2492f6485eecSyc * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2493f6485eecSyc * RCR_ZCP_SOFT_ERROR because they reflect the same 2494f6485eecSyc * FFLP and ZCP errors that have been reported by 2495f6485eecSyc * nxge_fflp.c and nxge_zcp.c. 2496f6485eecSyc */ 249753f3d8ecSyc case RCR_FFLP_SOFT_ERROR: 249853f3d8ecSyc error_send_up = B_TRUE; 249953f3d8ecSyc rdc_stats->fflp_soft_err++; 250053f3d8ecSyc if (rdc_stats->fflp_soft_err < 250153f3d8ecSyc error_disp_cnt) { 250253f3d8ecSyc NXGE_ERROR_MSG((nxgep, 250353f3d8ecSyc NXGE_ERR_CTL, 250453f3d8ecSyc " nxge_receive_packet:" 250553f3d8ecSyc " channel %d" 250653f3d8ecSyc " RCR FFLP_SOFT_ERROR", channel)); 250753f3d8ecSyc } 250853f3d8ecSyc break; 250953f3d8ecSyc case RCR_ZCP_SOFT_ERROR: 251053f3d8ecSyc error_send_up = B_TRUE; 251153f3d8ecSyc rdc_stats->fflp_soft_err++; 251253f3d8ecSyc if (rdc_stats->zcp_soft_err < 251353f3d8ecSyc error_disp_cnt) 251453f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 251553f3d8ecSyc " nxge_receive_packet: Channel %d" 251653f3d8ecSyc " RCR ZCP_SOFT_ERROR", channel)); 251753f3d8ecSyc break; 251853f3d8ecSyc default: 251953f3d8ecSyc rdc_stats->rcr_unknown_err++; 252053f3d8ecSyc if (rdc_stats->rcr_unknown_err 252153f3d8ecSyc < error_disp_cnt) { 252253f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 252353f3d8ecSyc " nxge_receive_packet: Channel %d" 252453f3d8ecSyc " RCR entry 0x%llx error 0x%x", 252553f3d8ecSyc rcr_entry, channel, error_type)); 252653f3d8ecSyc } 252753f3d8ecSyc break; 252844961713Sgirish } 252944961713Sgirish } 253044961713Sgirish 253144961713Sgirish /* 253244961713Sgirish * Update and repost buffer block if max usage 253344961713Sgirish * count is reached. 253444961713Sgirish */ 253544961713Sgirish if (error_send_up == B_FALSE) { 2536958cea9eSml atomic_inc_32(&rx_msg_p->ref_cnt); 253744961713Sgirish if (buffer_free == B_TRUE) { 253844961713Sgirish rx_msg_p->free = B_TRUE; 253944961713Sgirish } 254044961713Sgirish 254144961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 254244961713Sgirish MUTEX_EXIT(&rcr_p->lock); 254344961713Sgirish nxge_freeb(rx_msg_p); 254444961713Sgirish return; 254544961713Sgirish } 254644961713Sgirish } 254744961713Sgirish 254844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2549*52ccf843Smisaki "==> nxge_receive_packet: DMA sync second ")); 255044961713Sgirish 255153f3d8ecSyc bytes_read = rcr_p->rcvd_pkt_bytes; 255244961713Sgirish skip_len = sw_offset_bytes + hdr_size; 255344961713Sgirish if (!rx_msg_p->rx_use_bcopy) { 2554958cea9eSml /* 2555958cea9eSml * For loaned up buffers, the driver reference count 2556958cea9eSml * will be incremented first and then the free state. 2557958cea9eSml */ 255853f3d8ecSyc if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 255914ea4bb7Ssd if (first_entry) { 256014ea4bb7Ssd nmp->b_rptr = &nmp->b_rptr[skip_len]; 256153f3d8ecSyc if (l2_len < bsize - skip_len) { 256214ea4bb7Ssd nmp->b_wptr = &nmp->b_rptr[l2_len]; 256353f3d8ecSyc } else { 256453f3d8ecSyc nmp->b_wptr = &nmp->b_rptr[bsize 256553f3d8ecSyc - skip_len]; 256653f3d8ecSyc } 256714ea4bb7Ssd } else { 256853f3d8ecSyc if (l2_len - bytes_read < bsize) { 256914ea4bb7Ssd nmp->b_wptr = 257014ea4bb7Ssd &nmp->b_rptr[l2_len - bytes_read]; 257153f3d8ecSyc } else { 257253f3d8ecSyc nmp->b_wptr = &nmp->b_rptr[bsize]; 257353f3d8ecSyc } 257414ea4bb7Ssd } 257544961713Sgirish } 257653f3d8ecSyc } else { 257753f3d8ecSyc if (first_entry) { 257853f3d8ecSyc nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 257953f3d8ecSyc l2_len < bsize - skip_len ? 258053f3d8ecSyc l2_len : bsize - skip_len); 258153f3d8ecSyc } else { 258253f3d8ecSyc nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 258353f3d8ecSyc l2_len - bytes_read < bsize ? 258453f3d8ecSyc l2_len - bytes_read : bsize); 258553f3d8ecSyc } 258653f3d8ecSyc } 258753f3d8ecSyc if (nmp != NULL) { 258853f3d8ecSyc if (first_entry) 258953f3d8ecSyc bytes_read = nmp->b_wptr - nmp->b_rptr; 259053f3d8ecSyc else 259153f3d8ecSyc bytes_read += nmp->b_wptr - nmp->b_rptr; 259253f3d8ecSyc 259353f3d8ecSyc NXGE_DEBUG_MSG((nxgep, RX_CTL, 259453f3d8ecSyc "==> nxge_receive_packet after dupb: " 259553f3d8ecSyc "rbr consumed %d " 259653f3d8ecSyc "pktbufsz_type %d " 259753f3d8ecSyc "nmp $%p rptr $%p wptr $%p " 259853f3d8ecSyc "buf_offset %d bzise %d l2_len %d skip_len %d", 259953f3d8ecSyc rx_rbr_p->rbr_consumed, 260053f3d8ecSyc pktbufsz_type, 260153f3d8ecSyc nmp, nmp->b_rptr, nmp->b_wptr, 260253f3d8ecSyc buf_offset, bsize, l2_len, skip_len)); 260344961713Sgirish } else { 260444961713Sgirish cmn_err(CE_WARN, "!nxge_receive_packet: " 2605*52ccf843Smisaki "update stats (error)"); 26062e59129aSraghus atomic_inc_32(&rx_msg_p->ref_cnt); 26072e59129aSraghus if (buffer_free == B_TRUE) { 26082e59129aSraghus rx_msg_p->free = B_TRUE; 26092e59129aSraghus } 26102e59129aSraghus MUTEX_EXIT(&rx_rbr_p->lock); 26112e59129aSraghus MUTEX_EXIT(&rcr_p->lock); 26122e59129aSraghus nxge_freeb(rx_msg_p); 26132e59129aSraghus return; 261444961713Sgirish } 2615ee5416c9Syc 261644961713Sgirish if (buffer_free == B_TRUE) { 261744961713Sgirish rx_msg_p->free = B_TRUE; 261844961713Sgirish } 261944961713Sgirish /* 262044961713Sgirish * ERROR, FRAG and PKT_TYPE are only reported 262144961713Sgirish * in the first entry. 262244961713Sgirish * If a packet is not fragmented and no error bit is set, then 262344961713Sgirish * L4 checksum is OK. 262444961713Sgirish */ 262544961713Sgirish is_valid = (nmp != NULL); 262653f3d8ecSyc if (first_entry) { 262753f3d8ecSyc rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 262853f3d8ecSyc rdc_stats->ibytes += skip_len + l2_len < bsize ? 26297a2b8adfSyc l2_len : bsize; 263053f3d8ecSyc } else { 263153f3d8ecSyc rdc_stats->ibytes += l2_len - bytes_read < bsize ? 263253f3d8ecSyc l2_len - bytes_read : bsize; 263353f3d8ecSyc } 263453f3d8ecSyc 263553f3d8ecSyc rcr_p->rcvd_pkt_bytes = bytes_read; 263653f3d8ecSyc 263744961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 263844961713Sgirish MUTEX_EXIT(&rcr_p->lock); 263944961713Sgirish 264044961713Sgirish if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 264144961713Sgirish atomic_inc_32(&rx_msg_p->ref_cnt); 264244961713Sgirish nxge_freeb(rx_msg_p); 264344961713Sgirish } 264444961713Sgirish 264544961713Sgirish if (is_valid) { 2646a3c5bd6dSspeer nmp->b_cont = NULL; 264744961713Sgirish if (first_entry) { 264844961713Sgirish *mp = nmp; 264944961713Sgirish *mp_cont = NULL; 265053f3d8ecSyc } else { 265144961713Sgirish *mp_cont = nmp; 265253f3d8ecSyc } 265344961713Sgirish } 265444961713Sgirish 265544961713Sgirish /* 265644961713Sgirish * Update stats and hardware checksuming. 265744961713Sgirish */ 265844961713Sgirish if (is_valid && !multi) { 2659678453a8Sspeer /* 2660b4d05839Sml * If the checksum flag nxge_chksum_offload 2661b4d05839Sml * is 1, TCP and UDP packets can be sent 2662678453a8Sspeer * up with good checksum. If the checksum flag 2663b4d05839Sml * is set to 0, checksum reporting will apply to 2664678453a8Sspeer * TCP packets only (workaround for a hardware bug). 2665b4d05839Sml * If the checksum flag nxge_cksum_offload is 2666b4d05839Sml * greater than 1, both TCP and UDP packets 2667b4d05839Sml * will not be reported its hardware checksum results. 2668678453a8Sspeer */ 2669b4d05839Sml if (nxge_cksum_offload == 1) { 2670678453a8Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2671*52ccf843Smisaki pkt_type == RCR_PKT_IS_UDP) ? 2672*52ccf843Smisaki B_TRUE: B_FALSE); 2673b4d05839Sml } else if (!nxge_cksum_offload) { 2674678453a8Sspeer /* TCP checksum only. */ 2675678453a8Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2676*52ccf843Smisaki B_TRUE: B_FALSE); 2677678453a8Sspeer } 267844961713Sgirish 267944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2680*52ccf843Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2681*52ccf843Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 268244961713Sgirish 268344961713Sgirish if (is_tcp_udp && !frag && !error_type) { 268444961713Sgirish (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2685*52ccf843Smisaki HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 268644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 2687*52ccf843Smisaki "==> nxge_receive_packet: Full tcp/udp cksum " 2688*52ccf843Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2689*52ccf843Smisaki "error %d", 2690*52ccf843Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 269144961713Sgirish } 269244961713Sgirish } 269344961713Sgirish 269444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2695*52ccf843Smisaki "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 269644961713Sgirish 269744961713Sgirish *multi_p = (multi == RCR_MULTI_MASK); 269844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2699*52ccf843Smisaki "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2700*52ccf843Smisaki *multi_p, nmp, *mp, *mp_cont)); 270144961713Sgirish } 270244961713Sgirish 270344961713Sgirish /*ARGSUSED*/ 270444961713Sgirish static nxge_status_t 2705678453a8Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 270644961713Sgirish { 270744961713Sgirish p_nxge_rx_ring_stats_t rdc_stats; 270844961713Sgirish npi_handle_t handle; 270944961713Sgirish npi_status_t rs; 271044961713Sgirish boolean_t rxchan_fatal = B_FALSE; 271144961713Sgirish boolean_t rxport_fatal = B_FALSE; 271244961713Sgirish uint8_t portn; 271344961713Sgirish nxge_status_t status = NXGE_OK; 271444961713Sgirish uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 271544961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 271644961713Sgirish 271744961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 271844961713Sgirish portn = nxgep->mac.portnum; 2719678453a8Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 272044961713Sgirish 272144961713Sgirish if (cs.bits.hdw.rbr_tmout) { 272244961713Sgirish rdc_stats->rx_rbr_tmout++; 272344961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2724*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 272544961713Sgirish rxchan_fatal = B_TRUE; 272644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2727*52ccf843Smisaki "==> nxge_rx_err_evnts: rx_rbr_timeout")); 272844961713Sgirish } 272944961713Sgirish if (cs.bits.hdw.rsp_cnt_err) { 273044961713Sgirish rdc_stats->rsp_cnt_err++; 273144961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2732*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 273344961713Sgirish rxchan_fatal = B_TRUE; 273444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2735*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2736*52ccf843Smisaki "rsp_cnt_err", channel)); 273744961713Sgirish } 273844961713Sgirish if (cs.bits.hdw.byte_en_bus) { 273944961713Sgirish rdc_stats->byte_en_bus++; 274044961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2741*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 274244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2743*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2744*52ccf843Smisaki "fatal error: byte_en_bus", channel)); 274544961713Sgirish rxchan_fatal = B_TRUE; 274644961713Sgirish } 274744961713Sgirish if (cs.bits.hdw.rsp_dat_err) { 274844961713Sgirish rdc_stats->rsp_dat_err++; 274944961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2750*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 275144961713Sgirish rxchan_fatal = B_TRUE; 275244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2753*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2754*52ccf843Smisaki "fatal error: rsp_dat_err", channel)); 275544961713Sgirish } 275644961713Sgirish if (cs.bits.hdw.rcr_ack_err) { 275744961713Sgirish rdc_stats->rcr_ack_err++; 275844961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2759*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 276044961713Sgirish rxchan_fatal = B_TRUE; 276144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2762*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2763*52ccf843Smisaki "fatal error: rcr_ack_err", channel)); 276444961713Sgirish } 276544961713Sgirish if (cs.bits.hdw.dc_fifo_err) { 276644961713Sgirish rdc_stats->dc_fifo_err++; 276744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2768*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 276944961713Sgirish /* This is not a fatal error! */ 277044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2771*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2772*52ccf843Smisaki "dc_fifo_err", channel)); 277344961713Sgirish rxport_fatal = B_TRUE; 277444961713Sgirish } 277544961713Sgirish if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 277644961713Sgirish if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2777*52ccf843Smisaki &rdc_stats->errlog.pre_par, 2778*52ccf843Smisaki &rdc_stats->errlog.sha_par)) 2779*52ccf843Smisaki != NPI_SUCCESS) { 278044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2781*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2782*52ccf843Smisaki "rcr_sha_par: get perr", channel)); 278344961713Sgirish return (NXGE_ERROR | rs); 278444961713Sgirish } 278544961713Sgirish if (cs.bits.hdw.rcr_sha_par) { 278644961713Sgirish rdc_stats->rcr_sha_par++; 278744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2788*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 278944961713Sgirish rxchan_fatal = B_TRUE; 279044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2791*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2792*52ccf843Smisaki "fatal error: rcr_sha_par", channel)); 279344961713Sgirish } 279444961713Sgirish if (cs.bits.hdw.rbr_pre_par) { 279544961713Sgirish rdc_stats->rbr_pre_par++; 279644961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2797*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 279844961713Sgirish rxchan_fatal = B_TRUE; 279944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2800*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2801*52ccf843Smisaki "fatal error: rbr_pre_par", channel)); 280244961713Sgirish } 280344961713Sgirish } 280463e23a19Syc /* 280563e23a19Syc * The Following 4 status bits are for information, the system 280663e23a19Syc * is running fine. There is no need to send FMA ereports or 280763e23a19Syc * log messages. 280863e23a19Syc */ 280944961713Sgirish if (cs.bits.hdw.port_drop_pkt) { 281044961713Sgirish rdc_stats->port_drop_pkt++; 281144961713Sgirish } 281244961713Sgirish if (cs.bits.hdw.wred_drop) { 281344961713Sgirish rdc_stats->wred_drop++; 281444961713Sgirish } 281544961713Sgirish if (cs.bits.hdw.rbr_pre_empty) { 281644961713Sgirish rdc_stats->rbr_pre_empty++; 281744961713Sgirish } 281844961713Sgirish if (cs.bits.hdw.rcr_shadow_full) { 281944961713Sgirish rdc_stats->rcr_shadow_full++; 282044961713Sgirish } 282144961713Sgirish if (cs.bits.hdw.config_err) { 282244961713Sgirish rdc_stats->config_err++; 282344961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2824*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 282544961713Sgirish rxchan_fatal = B_TRUE; 282644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2827*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2828*52ccf843Smisaki "config error", channel)); 282944961713Sgirish } 283044961713Sgirish if (cs.bits.hdw.rcrincon) { 283144961713Sgirish rdc_stats->rcrincon++; 283244961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2833*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCRINCON); 283444961713Sgirish rxchan_fatal = B_TRUE; 283544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2836*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2837*52ccf843Smisaki "fatal error: rcrincon error", channel)); 283844961713Sgirish } 283944961713Sgirish if (cs.bits.hdw.rcrfull) { 284044961713Sgirish rdc_stats->rcrfull++; 284144961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2842*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCRFULL); 284344961713Sgirish rxchan_fatal = B_TRUE; 284444961713Sgirish if (rdc_stats->rcrfull < error_disp_cnt) 284544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2846*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2847*52ccf843Smisaki "fatal error: rcrfull error", channel)); 284844961713Sgirish } 284944961713Sgirish if (cs.bits.hdw.rbr_empty) { 285063e23a19Syc /* 285163e23a19Syc * This bit is for information, there is no need 285263e23a19Syc * send FMA ereport or log a message. 285363e23a19Syc */ 285444961713Sgirish rdc_stats->rbr_empty++; 285544961713Sgirish } 285644961713Sgirish if (cs.bits.hdw.rbrfull) { 285744961713Sgirish rdc_stats->rbrfull++; 285844961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2859*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBRFULL); 286044961713Sgirish rxchan_fatal = B_TRUE; 286144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2862*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2863*52ccf843Smisaki "fatal error: rbr_full error", channel)); 286444961713Sgirish } 286544961713Sgirish if (cs.bits.hdw.rbrlogpage) { 286644961713Sgirish rdc_stats->rbrlogpage++; 286744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2868*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 286944961713Sgirish rxchan_fatal = B_TRUE; 287044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2871*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2872*52ccf843Smisaki "fatal error: rbr logical page error", channel)); 287344961713Sgirish } 287444961713Sgirish if (cs.bits.hdw.cfiglogpage) { 287544961713Sgirish rdc_stats->cfiglogpage++; 287644961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2877*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 287844961713Sgirish rxchan_fatal = B_TRUE; 287944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2880*52ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 2881*52ccf843Smisaki "fatal error: cfig logical page error", channel)); 288244961713Sgirish } 288344961713Sgirish 288444961713Sgirish if (rxport_fatal) { 288544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2886678453a8Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 2887678453a8Sspeer portn)); 2888678453a8Sspeer if (isLDOMguest(nxgep)) { 2889678453a8Sspeer status = NXGE_ERROR; 2890678453a8Sspeer } else { 2891678453a8Sspeer status = nxge_ipp_fatal_err_recover(nxgep); 2892678453a8Sspeer if (status == NXGE_OK) { 2893678453a8Sspeer FM_SERVICE_RESTORED(nxgep); 2894678453a8Sspeer } 289544961713Sgirish } 289644961713Sgirish } 289744961713Sgirish 289844961713Sgirish if (rxchan_fatal) { 289944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2900678453a8Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 2901678453a8Sspeer channel)); 2902678453a8Sspeer if (isLDOMguest(nxgep)) { 2903678453a8Sspeer status = NXGE_ERROR; 2904678453a8Sspeer } else { 2905678453a8Sspeer status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2906678453a8Sspeer if (status == NXGE_OK) { 2907678453a8Sspeer FM_SERVICE_RESTORED(nxgep); 2908678453a8Sspeer } 290944961713Sgirish } 291044961713Sgirish } 291144961713Sgirish 291244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 291344961713Sgirish 291444961713Sgirish return (status); 291544961713Sgirish } 291644961713Sgirish 2917678453a8Sspeer /* 2918678453a8Sspeer * nxge_rdc_hvio_setup 2919678453a8Sspeer * 2920678453a8Sspeer * This code appears to setup some Hypervisor variables. 2921678453a8Sspeer * 2922678453a8Sspeer * Arguments: 2923678453a8Sspeer * nxgep 2924678453a8Sspeer * channel 2925678453a8Sspeer * 2926678453a8Sspeer * Notes: 2927678453a8Sspeer * What does NIU_LP_WORKAROUND mean? 2928678453a8Sspeer * 2929678453a8Sspeer * NPI/NXGE function calls: 2930678453a8Sspeer * na 2931678453a8Sspeer * 2932678453a8Sspeer * Context: 2933678453a8Sspeer * Any domain 2934678453a8Sspeer */ 2935678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2936678453a8Sspeer static void 2937678453a8Sspeer nxge_rdc_hvio_setup( 2938678453a8Sspeer nxge_t *nxgep, int channel) 293944961713Sgirish { 2940678453a8Sspeer nxge_dma_common_t *dma_common; 2941678453a8Sspeer nxge_dma_common_t *dma_control; 2942678453a8Sspeer rx_rbr_ring_t *ring; 2943678453a8Sspeer 2944678453a8Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 2945678453a8Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2946678453a8Sspeer 2947678453a8Sspeer ring->hv_set = B_FALSE; 2948678453a8Sspeer 2949678453a8Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 2950678453a8Sspeer dma_common->orig_ioaddr_pp; 2951678453a8Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 2952678453a8Sspeer dma_common->orig_alength; 2953678453a8Sspeer 2954678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2955678453a8Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 2956678453a8Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 2957678453a8Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 2958678453a8Sspeer dma_common->orig_alength, dma_common->orig_alength)); 2959678453a8Sspeer 2960678453a8Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2961678453a8Sspeer 2962678453a8Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 2963678453a8Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 2964678453a8Sspeer ring->hv_rx_cntl_ioaddr_size = 2965678453a8Sspeer (uint64_t)dma_control->orig_alength; 2966678453a8Sspeer 2967678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2968678453a8Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 2969678453a8Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 2970678453a8Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 2971678453a8Sspeer dma_control->orig_alength, dma_control->orig_alength)); 2972678453a8Sspeer } 297344961713Sgirish #endif 297444961713Sgirish 2975678453a8Sspeer /* 2976678453a8Sspeer * nxge_map_rxdma 2977678453a8Sspeer * 2978678453a8Sspeer * Map an RDC into our kernel space. 2979678453a8Sspeer * 2980678453a8Sspeer * Arguments: 2981678453a8Sspeer * nxgep 2982678453a8Sspeer * channel The channel to map. 2983678453a8Sspeer * 2984678453a8Sspeer * Notes: 2985678453a8Sspeer * 1. Allocate & initialise a memory pool, if necessary. 2986678453a8Sspeer * 2. Allocate however many receive buffers are required. 2987678453a8Sspeer * 3. Setup buffers, descriptors, and mailbox. 2988678453a8Sspeer * 2989678453a8Sspeer * NPI/NXGE function calls: 2990678453a8Sspeer * nxge_alloc_rx_mem_pool() 2991678453a8Sspeer * nxge_alloc_rbb() 2992678453a8Sspeer * nxge_map_rxdma_channel() 2993678453a8Sspeer * 2994678453a8Sspeer * Registers accessed: 2995678453a8Sspeer * 2996678453a8Sspeer * Context: 2997678453a8Sspeer * Any domain 2998678453a8Sspeer */ 2999678453a8Sspeer static nxge_status_t 3000678453a8Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 3001678453a8Sspeer { 3002678453a8Sspeer nxge_dma_common_t **data; 3003678453a8Sspeer nxge_dma_common_t **control; 3004678453a8Sspeer rx_rbr_ring_t **rbr_ring; 3005678453a8Sspeer rx_rcr_ring_t **rcr_ring; 3006678453a8Sspeer rx_mbox_t **mailbox; 3007678453a8Sspeer uint32_t chunks; 300844961713Sgirish 3009678453a8Sspeer nxge_status_t status; 301044961713Sgirish 3011678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 301244961713Sgirish 3013678453a8Sspeer if (!nxgep->rx_buf_pool_p) { 3014678453a8Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3015678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3016678453a8Sspeer "<== nxge_map_rxdma: buf not allocated")); 3017678453a8Sspeer return (NXGE_ERROR); 3018678453a8Sspeer } 301944961713Sgirish } 302044961713Sgirish 3021678453a8Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3022678453a8Sspeer return (NXGE_ERROR); 302314ea4bb7Ssd 302414ea4bb7Ssd /* 302514ea4bb7Ssd * Timeout should be set based on the system clock divider. 302614ea4bb7Ssd * The following timeout value of 1 assumes that the 302714ea4bb7Ssd * granularity (1000) is 3 microseconds running at 300MHz. 302814ea4bb7Ssd */ 302914ea4bb7Ssd 303014ea4bb7Ssd nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 303114ea4bb7Ssd nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 303244961713Sgirish 303344961713Sgirish /* 3034678453a8Sspeer * Map descriptors from the buffer polls for each dma channel. 303544961713Sgirish */ 303644961713Sgirish 3037678453a8Sspeer /* 3038678453a8Sspeer * Set up and prepare buffer blocks, descriptors 3039678453a8Sspeer * and mailbox. 3040678453a8Sspeer */ 3041678453a8Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3042678453a8Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3043678453a8Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 304444961713Sgirish 3045678453a8Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3046678453a8Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 304744961713Sgirish 3048678453a8Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 304944961713Sgirish 3050678453a8Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3051678453a8Sspeer chunks, control, rcr_ring, mailbox); 3052678453a8Sspeer if (status != NXGE_OK) { 3053678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3054*52ccf843Smisaki "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3055*52ccf843Smisaki "returned 0x%x", 3056*52ccf843Smisaki channel, status)); 3057678453a8Sspeer return (status); 3058678453a8Sspeer } 3059678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3060678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3061678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3062678453a8Sspeer &nxgep->statsp->rdc_stats[channel]; 306344961713Sgirish 3064678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3065678453a8Sspeer if (!isLDOMguest(nxgep)) 3066678453a8Sspeer nxge_rdc_hvio_setup(nxgep, channel); 3067678453a8Sspeer #endif 306844961713Sgirish 306944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3070678453a8Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 307144961713Sgirish 307244961713Sgirish return (status); 307344961713Sgirish } 307444961713Sgirish 307544961713Sgirish static void 3076678453a8Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 307744961713Sgirish { 3078678453a8Sspeer rx_rbr_ring_t *rbr_ring; 3079678453a8Sspeer rx_rcr_ring_t *rcr_ring; 3080678453a8Sspeer rx_mbox_t *mailbox; 308144961713Sgirish 3082678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 308344961713Sgirish 3084678453a8Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3085678453a8Sspeer !nxgep->rx_mbox_areas_p) 308644961713Sgirish return; 308744961713Sgirish 3088678453a8Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3089678453a8Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3090678453a8Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 309144961713Sgirish 3092678453a8Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 3093678453a8Sspeer return; 309444961713Sgirish 3095678453a8Sspeer (void) nxge_unmap_rxdma_channel( 3096*52ccf843Smisaki nxgep, channel, rbr_ring, rcr_ring, mailbox); 309744961713Sgirish 3098678453a8Sspeer nxge_free_rxb(nxgep, channel); 309944961713Sgirish 3100678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 310144961713Sgirish } 310244961713Sgirish 310344961713Sgirish nxge_status_t 310444961713Sgirish nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 310544961713Sgirish p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 310644961713Sgirish uint32_t num_chunks, 310744961713Sgirish p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 310844961713Sgirish p_rx_mbox_t *rx_mbox_p) 310944961713Sgirish { 311044961713Sgirish int status = NXGE_OK; 311144961713Sgirish 311244961713Sgirish /* 311344961713Sgirish * Set up and prepare buffer blocks, descriptors 311444961713Sgirish * and mailbox. 311544961713Sgirish */ 311644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3117*52ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d)", channel)); 311844961713Sgirish /* 311944961713Sgirish * Receive buffer blocks 312044961713Sgirish */ 312144961713Sgirish status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3122*52ccf843Smisaki dma_buf_p, rbr_p, num_chunks); 312344961713Sgirish if (status != NXGE_OK) { 312444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3125*52ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d): " 3126*52ccf843Smisaki "map buffer failed 0x%x", channel, status)); 312744961713Sgirish goto nxge_map_rxdma_channel_exit; 312844961713Sgirish } 312944961713Sgirish 313044961713Sgirish /* 313144961713Sgirish * Receive block ring, completion ring and mailbox. 313244961713Sgirish */ 313344961713Sgirish status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3134*52ccf843Smisaki dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 313544961713Sgirish if (status != NXGE_OK) { 313644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3137*52ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d): " 3138*52ccf843Smisaki "map config failed 0x%x", channel, status)); 313944961713Sgirish goto nxge_map_rxdma_channel_fail2; 314044961713Sgirish } 314144961713Sgirish 314244961713Sgirish goto nxge_map_rxdma_channel_exit; 314344961713Sgirish 314444961713Sgirish nxge_map_rxdma_channel_fail3: 314544961713Sgirish /* Free rbr, rcr */ 314644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3147*52ccf843Smisaki "==> nxge_map_rxdma_channel: free rbr/rcr " 3148*52ccf843Smisaki "(status 0x%x channel %d)", 3149*52ccf843Smisaki status, channel)); 315044961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3151*52ccf843Smisaki *rcr_p, *rx_mbox_p); 315244961713Sgirish 315344961713Sgirish nxge_map_rxdma_channel_fail2: 315444961713Sgirish /* Free buffer blocks */ 315544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3156*52ccf843Smisaki "==> nxge_map_rxdma_channel: free rx buffers" 3157*52ccf843Smisaki "(nxgep 0x%x status 0x%x channel %d)", 3158*52ccf843Smisaki nxgep, status, channel)); 315944961713Sgirish nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 316044961713Sgirish 316156d930aeSspeer status = NXGE_ERROR; 316256d930aeSspeer 316344961713Sgirish nxge_map_rxdma_channel_exit: 316444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3165*52ccf843Smisaki "<== nxge_map_rxdma_channel: " 3166*52ccf843Smisaki "(nxgep 0x%x status 0x%x channel %d)", 3167*52ccf843Smisaki nxgep, status, channel)); 316844961713Sgirish 316944961713Sgirish return (status); 317044961713Sgirish } 317144961713Sgirish 317244961713Sgirish /*ARGSUSED*/ 317344961713Sgirish static void 317444961713Sgirish nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 317544961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 317644961713Sgirish { 317744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3178*52ccf843Smisaki "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 317944961713Sgirish 318044961713Sgirish /* 318144961713Sgirish * unmap receive block ring, completion ring and mailbox. 318244961713Sgirish */ 318344961713Sgirish (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3184*52ccf843Smisaki rcr_p, rx_mbox_p); 318544961713Sgirish 318644961713Sgirish /* unmap buffer blocks */ 318744961713Sgirish (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 318844961713Sgirish 318944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 319044961713Sgirish } 319144961713Sgirish 319244961713Sgirish /*ARGSUSED*/ 319344961713Sgirish static nxge_status_t 319444961713Sgirish nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 319544961713Sgirish p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 319644961713Sgirish p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 319744961713Sgirish { 319844961713Sgirish p_rx_rbr_ring_t rbrp; 319944961713Sgirish p_rx_rcr_ring_t rcrp; 320044961713Sgirish p_rx_mbox_t mboxp; 320144961713Sgirish p_nxge_dma_common_t cntl_dmap; 320244961713Sgirish p_nxge_dma_common_t dmap; 320344961713Sgirish p_rx_msg_t *rx_msg_ring; 320444961713Sgirish p_rx_msg_t rx_msg_p; 320544961713Sgirish p_rbr_cfig_a_t rcfga_p; 320644961713Sgirish p_rbr_cfig_b_t rcfgb_p; 320744961713Sgirish p_rcrcfig_a_t cfga_p; 320844961713Sgirish p_rcrcfig_b_t cfgb_p; 320944961713Sgirish p_rxdma_cfig1_t cfig1_p; 321044961713Sgirish p_rxdma_cfig2_t cfig2_p; 321144961713Sgirish p_rbr_kick_t kick_p; 321244961713Sgirish uint32_t dmaaddrp; 321344961713Sgirish uint32_t *rbr_vaddrp; 321444961713Sgirish uint32_t bkaddr; 321544961713Sgirish nxge_status_t status = NXGE_OK; 321644961713Sgirish int i; 321744961713Sgirish uint32_t nxge_port_rcr_size; 321844961713Sgirish 321944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3220*52ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring")); 322144961713Sgirish 322244961713Sgirish cntl_dmap = *dma_cntl_p; 322344961713Sgirish 322444961713Sgirish /* Map in the receive block ring */ 322544961713Sgirish rbrp = *rbr_p; 322644961713Sgirish dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 322744961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 322844961713Sgirish /* 322944961713Sgirish * Zero out buffer block ring descriptors. 323044961713Sgirish */ 323144961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 323244961713Sgirish 323344961713Sgirish rcfga_p = &(rbrp->rbr_cfga); 323444961713Sgirish rcfgb_p = &(rbrp->rbr_cfgb); 323544961713Sgirish kick_p = &(rbrp->rbr_kick); 323644961713Sgirish rcfga_p->value = 0; 323744961713Sgirish rcfgb_p->value = 0; 323844961713Sgirish kick_p->value = 0; 323944961713Sgirish rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 324044961713Sgirish rcfga_p->value = (rbrp->rbr_addr & 3241*52ccf843Smisaki (RBR_CFIG_A_STDADDR_MASK | 3242*52ccf843Smisaki RBR_CFIG_A_STDADDR_BASE_MASK)); 324344961713Sgirish rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 324444961713Sgirish 324544961713Sgirish rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 324644961713Sgirish rcfgb_p->bits.ldw.vld0 = 1; 324744961713Sgirish rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 324844961713Sgirish rcfgb_p->bits.ldw.vld1 = 1; 324944961713Sgirish rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 325044961713Sgirish rcfgb_p->bits.ldw.vld2 = 1; 325144961713Sgirish rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 325244961713Sgirish 325344961713Sgirish /* 325444961713Sgirish * For each buffer block, enter receive block address to the ring. 325544961713Sgirish */ 325644961713Sgirish rbr_vaddrp = (uint32_t *)dmap->kaddrp; 325744961713Sgirish rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 325844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3259*52ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3260*52ccf843Smisaki "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 326144961713Sgirish 326244961713Sgirish rx_msg_ring = rbrp->rx_msg_ring; 326344961713Sgirish for (i = 0; i < rbrp->tnblocks; i++) { 326444961713Sgirish rx_msg_p = rx_msg_ring[i]; 326544961713Sgirish rx_msg_p->nxgep = nxgep; 326644961713Sgirish rx_msg_p->rx_rbr_p = rbrp; 326744961713Sgirish bkaddr = (uint32_t) 3268*52ccf843Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3269*52ccf843Smisaki >> RBR_BKADDR_SHIFT)); 327044961713Sgirish rx_msg_p->free = B_FALSE; 327144961713Sgirish rx_msg_p->max_usage_cnt = 0xbaddcafe; 327244961713Sgirish 327344961713Sgirish *rbr_vaddrp++ = bkaddr; 327444961713Sgirish } 327544961713Sgirish 327644961713Sgirish kick_p->bits.ldw.bkadd = rbrp->rbb_max; 327744961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 327844961713Sgirish 327944961713Sgirish rbrp->rbr_rd_index = 0; 328044961713Sgirish 328144961713Sgirish rbrp->rbr_consumed = 0; 328244961713Sgirish rbrp->rbr_use_bcopy = B_TRUE; 328344961713Sgirish rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 328444961713Sgirish /* 328544961713Sgirish * Do bcopy on packets greater than bcopy size once 328644961713Sgirish * the lo threshold is reached. 328744961713Sgirish * This lo threshold should be less than the hi threshold. 328844961713Sgirish * 328944961713Sgirish * Do bcopy on every packet once the hi threshold is reached. 329044961713Sgirish */ 329144961713Sgirish if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 329244961713Sgirish /* default it to use hi */ 329344961713Sgirish nxge_rx_threshold_lo = nxge_rx_threshold_hi; 329444961713Sgirish } 329544961713Sgirish 329644961713Sgirish if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 329744961713Sgirish nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 329844961713Sgirish } 329944961713Sgirish rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 330044961713Sgirish 330144961713Sgirish switch (nxge_rx_threshold_hi) { 330244961713Sgirish default: 330344961713Sgirish case NXGE_RX_COPY_NONE: 330444961713Sgirish /* Do not do bcopy at all */ 330544961713Sgirish rbrp->rbr_use_bcopy = B_FALSE; 330644961713Sgirish rbrp->rbr_threshold_hi = rbrp->rbb_max; 330744961713Sgirish break; 330844961713Sgirish 330944961713Sgirish case NXGE_RX_COPY_1: 331044961713Sgirish case NXGE_RX_COPY_2: 331144961713Sgirish case NXGE_RX_COPY_3: 331244961713Sgirish case NXGE_RX_COPY_4: 331344961713Sgirish case NXGE_RX_COPY_5: 331444961713Sgirish case NXGE_RX_COPY_6: 331544961713Sgirish case NXGE_RX_COPY_7: 331644961713Sgirish rbrp->rbr_threshold_hi = 3317*52ccf843Smisaki rbrp->rbb_max * 3318*52ccf843Smisaki (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 331944961713Sgirish break; 332044961713Sgirish 332144961713Sgirish case NXGE_RX_COPY_ALL: 332244961713Sgirish rbrp->rbr_threshold_hi = 0; 332344961713Sgirish break; 332444961713Sgirish } 332544961713Sgirish 332644961713Sgirish switch (nxge_rx_threshold_lo) { 332744961713Sgirish default: 332844961713Sgirish case NXGE_RX_COPY_NONE: 332944961713Sgirish /* Do not do bcopy at all */ 333044961713Sgirish if (rbrp->rbr_use_bcopy) { 333144961713Sgirish rbrp->rbr_use_bcopy = B_FALSE; 333244961713Sgirish } 333344961713Sgirish rbrp->rbr_threshold_lo = rbrp->rbb_max; 333444961713Sgirish break; 333544961713Sgirish 333644961713Sgirish case NXGE_RX_COPY_1: 333744961713Sgirish case NXGE_RX_COPY_2: 333844961713Sgirish case NXGE_RX_COPY_3: 333944961713Sgirish case NXGE_RX_COPY_4: 334044961713Sgirish case NXGE_RX_COPY_5: 334144961713Sgirish case NXGE_RX_COPY_6: 334244961713Sgirish case NXGE_RX_COPY_7: 334344961713Sgirish rbrp->rbr_threshold_lo = 3344*52ccf843Smisaki rbrp->rbb_max * 3345*52ccf843Smisaki (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 334644961713Sgirish break; 334744961713Sgirish 334844961713Sgirish case NXGE_RX_COPY_ALL: 334944961713Sgirish rbrp->rbr_threshold_lo = 0; 335044961713Sgirish break; 335144961713Sgirish } 335244961713Sgirish 335344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 3354*52ccf843Smisaki "nxge_map_rxdma_channel_cfg_ring: channel %d " 3355*52ccf843Smisaki "rbb_max %d " 3356*52ccf843Smisaki "rbrp->rbr_bufsize_type %d " 3357*52ccf843Smisaki "rbb_threshold_hi %d " 3358*52ccf843Smisaki "rbb_threshold_lo %d", 3359*52ccf843Smisaki dma_channel, 3360*52ccf843Smisaki rbrp->rbb_max, 3361*52ccf843Smisaki rbrp->rbr_bufsize_type, 3362*52ccf843Smisaki rbrp->rbr_threshold_hi, 3363*52ccf843Smisaki rbrp->rbr_threshold_lo)); 336444961713Sgirish 336544961713Sgirish rbrp->page_valid.value = 0; 336644961713Sgirish rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 336744961713Sgirish rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 336844961713Sgirish rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 336944961713Sgirish rbrp->page_hdl.value = 0; 337044961713Sgirish 337144961713Sgirish rbrp->page_valid.bits.ldw.page0 = 1; 337244961713Sgirish rbrp->page_valid.bits.ldw.page1 = 1; 337344961713Sgirish 337444961713Sgirish /* Map in the receive completion ring */ 337544961713Sgirish rcrp = (p_rx_rcr_ring_t) 3376*52ccf843Smisaki KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 337744961713Sgirish rcrp->rdc = dma_channel; 337844961713Sgirish 337944961713Sgirish nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 338044961713Sgirish rcrp->comp_size = nxge_port_rcr_size; 338144961713Sgirish rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 338244961713Sgirish 338344961713Sgirish rcrp->max_receive_pkts = nxge_max_rx_pkts; 338444961713Sgirish 338544961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 338644961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3387*52ccf843Smisaki sizeof (rcr_entry_t)); 338844961713Sgirish rcrp->comp_rd_index = 0; 338944961713Sgirish rcrp->comp_wt_index = 0; 339044961713Sgirish rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3391*52ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3392adfcba55Sjoycey #if defined(__i386) 3393*52ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3394*52ccf843Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3395adfcba55Sjoycey #else 3396*52ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3397*52ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3398adfcba55Sjoycey #endif 339944961713Sgirish 340044961713Sgirish rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3401*52ccf843Smisaki (nxge_port_rcr_size - 1); 340244961713Sgirish rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3403*52ccf843Smisaki (nxge_port_rcr_size - 1); 340444961713Sgirish 340544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3406*52ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 3407*52ccf843Smisaki "channel %d " 3408*52ccf843Smisaki "rbr_vaddrp $%p " 3409*52ccf843Smisaki "rcr_desc_rd_head_p $%p " 3410*52ccf843Smisaki "rcr_desc_rd_head_pp $%p " 3411*52ccf843Smisaki "rcr_desc_rd_last_p $%p " 3412*52ccf843Smisaki "rcr_desc_rd_last_pp $%p ", 3413*52ccf843Smisaki dma_channel, 3414*52ccf843Smisaki rbr_vaddrp, 3415*52ccf843Smisaki rcrp->rcr_desc_rd_head_p, 3416*52ccf843Smisaki rcrp->rcr_desc_rd_head_pp, 3417*52ccf843Smisaki rcrp->rcr_desc_last_p, 3418*52ccf843Smisaki rcrp->rcr_desc_last_pp)); 341944961713Sgirish 342044961713Sgirish /* 342144961713Sgirish * Zero out buffer block ring descriptors. 342244961713Sgirish */ 342344961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 342414ea4bb7Ssd rcrp->intr_timeout = nxgep->intr_timeout; 342514ea4bb7Ssd rcrp->intr_threshold = nxgep->intr_threshold; 342644961713Sgirish rcrp->full_hdr_flag = B_FALSE; 342744961713Sgirish rcrp->sw_priv_hdr_len = 0; 342844961713Sgirish 342944961713Sgirish cfga_p = &(rcrp->rcr_cfga); 343044961713Sgirish cfgb_p = &(rcrp->rcr_cfgb); 343144961713Sgirish cfga_p->value = 0; 343244961713Sgirish cfgb_p->value = 0; 343344961713Sgirish rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 343444961713Sgirish cfga_p->value = (rcrp->rcr_addr & 3435*52ccf843Smisaki (RCRCFIG_A_STADDR_MASK | 3436*52ccf843Smisaki RCRCFIG_A_STADDR_BASE_MASK)); 343744961713Sgirish 343844961713Sgirish rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3439*52ccf843Smisaki RCRCFIG_A_LEN_SHIF); 344044961713Sgirish 344144961713Sgirish /* 344244961713Sgirish * Timeout should be set based on the system clock divider. 344344961713Sgirish * The following timeout value of 1 assumes that the 344444961713Sgirish * granularity (1000) is 3 microseconds running at 300MHz. 344544961713Sgirish */ 344614ea4bb7Ssd cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 344714ea4bb7Ssd cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 344844961713Sgirish cfgb_p->bits.ldw.entout = 1; 344944961713Sgirish 345044961713Sgirish /* Map in the mailbox */ 345144961713Sgirish mboxp = (p_rx_mbox_t) 3452*52ccf843Smisaki KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 345344961713Sgirish dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 345444961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 345544961713Sgirish cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 345644961713Sgirish cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 345744961713Sgirish cfig1_p->value = cfig2_p->value = 0; 345844961713Sgirish 345944961713Sgirish mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 346044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3461*52ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 3462*52ccf843Smisaki "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3463*52ccf843Smisaki dma_channel, cfig1_p->value, cfig2_p->value, 3464*52ccf843Smisaki mboxp->mbox_addr)); 346544961713Sgirish 346644961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3467*52ccf843Smisaki & 0xfff); 346844961713Sgirish cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 346944961713Sgirish 347044961713Sgirish 347144961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 347244961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3473*52ccf843Smisaki RXDMA_CFIG2_MBADDR_L_MASK); 347444961713Sgirish 347544961713Sgirish cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 347644961713Sgirish 347744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3478*52ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 3479*52ccf843Smisaki "channel %d damaddrp $%p " 3480*52ccf843Smisaki "cfg1 0x%016llx cfig2 0x%016llx", 3481*52ccf843Smisaki dma_channel, dmaaddrp, 3482*52ccf843Smisaki cfig1_p->value, cfig2_p->value)); 348344961713Sgirish 348444961713Sgirish cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 348544961713Sgirish cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 348644961713Sgirish 348744961713Sgirish rbrp->rx_rcr_p = rcrp; 348844961713Sgirish rcrp->rx_rbr_p = rbrp; 348944961713Sgirish *rcr_p = rcrp; 349044961713Sgirish *rx_mbox_p = mboxp; 349144961713Sgirish 349244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3493*52ccf843Smisaki "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 349444961713Sgirish 349544961713Sgirish return (status); 349644961713Sgirish } 349744961713Sgirish 349844961713Sgirish /*ARGSUSED*/ 349944961713Sgirish static void 350044961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 350144961713Sgirish p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 350244961713Sgirish { 350344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3504*52ccf843Smisaki "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3505*52ccf843Smisaki rcr_p->rdc)); 350644961713Sgirish 350744961713Sgirish KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 350844961713Sgirish KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 350944961713Sgirish 351044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3511*52ccf843Smisaki "<== nxge_unmap_rxdma_channel_cfg_ring")); 351244961713Sgirish } 351344961713Sgirish 351444961713Sgirish static nxge_status_t 351544961713Sgirish nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 351644961713Sgirish p_nxge_dma_common_t *dma_buf_p, 351744961713Sgirish p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 351844961713Sgirish { 351944961713Sgirish p_rx_rbr_ring_t rbrp; 352044961713Sgirish p_nxge_dma_common_t dma_bufp, tmp_bufp; 352144961713Sgirish p_rx_msg_t *rx_msg_ring; 352244961713Sgirish p_rx_msg_t rx_msg_p; 352344961713Sgirish p_mblk_t mblk_p; 352444961713Sgirish 352544961713Sgirish rxring_info_t *ring_info; 352644961713Sgirish nxge_status_t status = NXGE_OK; 352744961713Sgirish int i, j, index; 352844961713Sgirish uint32_t size, bsize, nblocks, nmsgs; 352944961713Sgirish 353044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3531*52ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3532*52ccf843Smisaki channel)); 353344961713Sgirish 353444961713Sgirish dma_bufp = tmp_bufp = *dma_buf_p; 353544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3536*52ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3537*52ccf843Smisaki "chunks bufp 0x%016llx", 3538*52ccf843Smisaki channel, num_chunks, dma_bufp)); 353944961713Sgirish 354044961713Sgirish nmsgs = 0; 354144961713Sgirish for (i = 0; i < num_chunks; i++, tmp_bufp++) { 354244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3543*52ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3544*52ccf843Smisaki "bufp 0x%016llx nblocks %d nmsgs %d", 3545*52ccf843Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 354644961713Sgirish nmsgs += tmp_bufp->nblocks; 354744961713Sgirish } 354844961713Sgirish if (!nmsgs) { 354956d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3550*52ccf843Smisaki "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3551*52ccf843Smisaki "no msg blocks", 3552*52ccf843Smisaki channel)); 355344961713Sgirish status = NXGE_ERROR; 355444961713Sgirish goto nxge_map_rxdma_channel_buf_ring_exit; 355544961713Sgirish } 355644961713Sgirish 3557007969e0Stm rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 355844961713Sgirish 355944961713Sgirish size = nmsgs * sizeof (p_rx_msg_t); 356044961713Sgirish rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 356144961713Sgirish ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3562*52ccf843Smisaki KM_SLEEP); 356344961713Sgirish 356444961713Sgirish MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3565*52ccf843Smisaki (void *)nxgep->interrupt_cookie); 356644961713Sgirish MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3567*52ccf843Smisaki (void *)nxgep->interrupt_cookie); 356844961713Sgirish rbrp->rdc = channel; 356944961713Sgirish rbrp->num_blocks = num_chunks; 357044961713Sgirish rbrp->tnblocks = nmsgs; 357144961713Sgirish rbrp->rbb_max = nmsgs; 357244961713Sgirish rbrp->rbr_max_size = nmsgs; 357344961713Sgirish rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 357444961713Sgirish 357544961713Sgirish /* 357644961713Sgirish * Buffer sizes suggested by NIU architect. 357744961713Sgirish * 256, 512 and 2K. 357844961713Sgirish */ 357944961713Sgirish 358044961713Sgirish rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 358144961713Sgirish rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 358244961713Sgirish rbrp->npi_pkt_buf_size0 = SIZE_256B; 358344961713Sgirish 358444961713Sgirish rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 358544961713Sgirish rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 358644961713Sgirish rbrp->npi_pkt_buf_size1 = SIZE_1KB; 358744961713Sgirish 358844961713Sgirish rbrp->block_size = nxgep->rx_default_block_size; 358944961713Sgirish 359014ea4bb7Ssd if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 359144961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 359244961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 359344961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_2KB; 359444961713Sgirish } else { 359544961713Sgirish if (rbrp->block_size >= 0x2000) { 359644961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 359744961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 359844961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_8KB; 359944961713Sgirish } else { 360044961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 360144961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 360244961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_4KB; 360344961713Sgirish } 360444961713Sgirish } 360544961713Sgirish 360644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3607*52ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3608*52ccf843Smisaki "actual rbr max %d rbb_max %d nmsgs %d " 3609*52ccf843Smisaki "rbrp->block_size %d default_block_size %d " 3610*52ccf843Smisaki "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3611*52ccf843Smisaki channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3612*52ccf843Smisaki rbrp->block_size, nxgep->rx_default_block_size, 3613*52ccf843Smisaki nxge_rbr_size, nxge_rbr_spare_size)); 361444961713Sgirish 361544961713Sgirish /* Map in buffers from the buffer pool. */ 361644961713Sgirish index = 0; 361744961713Sgirish for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 361844961713Sgirish bsize = dma_bufp->block_size; 361944961713Sgirish nblocks = dma_bufp->nblocks; 3620adfcba55Sjoycey #if defined(__i386) 3621adfcba55Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3622adfcba55Sjoycey #else 362344961713Sgirish ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3624adfcba55Sjoycey #endif 362544961713Sgirish ring_info->buffer[i].buf_index = i; 362644961713Sgirish ring_info->buffer[i].buf_size = dma_bufp->alength; 362744961713Sgirish ring_info->buffer[i].start_index = index; 3628adfcba55Sjoycey #if defined(__i386) 3629adfcba55Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3630adfcba55Sjoycey #else 363144961713Sgirish ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3632adfcba55Sjoycey #endif 363344961713Sgirish 363444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3635*52ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: map channel %d " 3636*52ccf843Smisaki "chunk %d" 3637*52ccf843Smisaki " nblocks %d chunk_size %x block_size 0x%x " 3638*52ccf843Smisaki "dma_bufp $%p", channel, i, 3639*52ccf843Smisaki dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3640*52ccf843Smisaki dma_bufp)); 364144961713Sgirish 364244961713Sgirish for (j = 0; j < nblocks; j++) { 364344961713Sgirish if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3644*52ccf843Smisaki dma_bufp)) == NULL) { 364556d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3646*52ccf843Smisaki "allocb failed (index %d i %d j %d)", 3647*52ccf843Smisaki index, i, j)); 364856d930aeSspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 364944961713Sgirish } 365044961713Sgirish rx_msg_ring[index] = rx_msg_p; 365144961713Sgirish rx_msg_p->block_index = index; 365244961713Sgirish rx_msg_p->shifted_addr = (uint32_t) 3653*52ccf843Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3654*52ccf843Smisaki RBR_BKADDR_SHIFT)); 365544961713Sgirish 365644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3657*52ccf843Smisaki "index %d j %d rx_msg_p $%p mblk %p", 3658*52ccf843Smisaki index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 365944961713Sgirish 366044961713Sgirish mblk_p = rx_msg_p->rx_mblk_p; 366144961713Sgirish mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3662007969e0Stm 3663007969e0Stm rbrp->rbr_ref_cnt++; 366444961713Sgirish index++; 366544961713Sgirish rx_msg_p->buf_dma.dma_channel = channel; 366644961713Sgirish } 3667678453a8Sspeer 3668678453a8Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3669678453a8Sspeer if (dma_bufp->contig_alloc_type) { 3670678453a8Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3671678453a8Sspeer } 3672678453a8Sspeer 3673678453a8Sspeer if (dma_bufp->kmem_alloc_type) { 3674678453a8Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 3675678453a8Sspeer } 3676678453a8Sspeer 3677678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3678678453a8Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 3679678453a8Sspeer "chunk %d" 3680678453a8Sspeer " nblocks %d chunk_size %x block_size 0x%x " 3681678453a8Sspeer "dma_bufp $%p", 3682678453a8Sspeer channel, i, 3683678453a8Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3684678453a8Sspeer dma_bufp)); 368544961713Sgirish } 368644961713Sgirish if (i < rbrp->num_blocks) { 368744961713Sgirish goto nxge_map_rxdma_channel_buf_ring_fail1; 368844961713Sgirish } 368944961713Sgirish 369044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3691*52ccf843Smisaki "nxge_map_rxdma_channel_buf_ring: done buf init " 3692*52ccf843Smisaki "channel %d msg block entries %d", 3693*52ccf843Smisaki channel, index)); 369444961713Sgirish ring_info->block_size_mask = bsize - 1; 369544961713Sgirish rbrp->rx_msg_ring = rx_msg_ring; 369644961713Sgirish rbrp->dma_bufp = dma_buf_p; 369744961713Sgirish rbrp->ring_info = ring_info; 369844961713Sgirish 369944961713Sgirish status = nxge_rxbuf_index_info_init(nxgep, rbrp); 370044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3701*52ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: " 3702*52ccf843Smisaki "channel %d done buf info init", channel)); 370344961713Sgirish 3704007969e0Stm /* 3705007969e0Stm * Finally, permit nxge_freeb() to call nxge_post_page(). 3706007969e0Stm */ 3707007969e0Stm rbrp->rbr_state = RBR_POSTING; 3708007969e0Stm 370944961713Sgirish *rbr_p = rbrp; 371044961713Sgirish goto nxge_map_rxdma_channel_buf_ring_exit; 371144961713Sgirish 371244961713Sgirish nxge_map_rxdma_channel_buf_ring_fail1: 371344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3714*52ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3715*52ccf843Smisaki channel, status)); 371644961713Sgirish 371744961713Sgirish index--; 371844961713Sgirish for (; index >= 0; index--) { 371944961713Sgirish rx_msg_p = rx_msg_ring[index]; 372044961713Sgirish if (rx_msg_p != NULL) { 372114ea4bb7Ssd freeb(rx_msg_p->rx_mblk_p); 372244961713Sgirish rx_msg_ring[index] = NULL; 372344961713Sgirish } 372444961713Sgirish } 372544961713Sgirish nxge_map_rxdma_channel_buf_ring_fail: 372644961713Sgirish MUTEX_DESTROY(&rbrp->post_lock); 372744961713Sgirish MUTEX_DESTROY(&rbrp->lock); 372844961713Sgirish KMEM_FREE(ring_info, sizeof (rxring_info_t)); 372944961713Sgirish KMEM_FREE(rx_msg_ring, size); 373044961713Sgirish KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 373144961713Sgirish 373256d930aeSspeer status = NXGE_ERROR; 373356d930aeSspeer 373444961713Sgirish nxge_map_rxdma_channel_buf_ring_exit: 373544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3736*52ccf843Smisaki "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 373744961713Sgirish 373844961713Sgirish return (status); 373944961713Sgirish } 374044961713Sgirish 374144961713Sgirish /*ARGSUSED*/ 374244961713Sgirish static void 374344961713Sgirish nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 374444961713Sgirish p_rx_rbr_ring_t rbr_p) 374544961713Sgirish { 374644961713Sgirish p_rx_msg_t *rx_msg_ring; 374744961713Sgirish p_rx_msg_t rx_msg_p; 374844961713Sgirish rxring_info_t *ring_info; 374944961713Sgirish int i; 375044961713Sgirish uint32_t size; 375144961713Sgirish #ifdef NXGE_DEBUG 375244961713Sgirish int num_chunks; 375344961713Sgirish #endif 375444961713Sgirish 375544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3756*52ccf843Smisaki "==> nxge_unmap_rxdma_channel_buf_ring")); 375744961713Sgirish if (rbr_p == NULL) { 375844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 3759*52ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 376044961713Sgirish return; 376144961713Sgirish } 376244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3763*52ccf843Smisaki "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3764*52ccf843Smisaki rbr_p->rdc)); 376544961713Sgirish 376644961713Sgirish rx_msg_ring = rbr_p->rx_msg_ring; 376744961713Sgirish ring_info = rbr_p->ring_info; 376844961713Sgirish 376944961713Sgirish if (rx_msg_ring == NULL || ring_info == NULL) { 3770*52ccf843Smisaki NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3771*52ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: " 3772*52ccf843Smisaki "rx_msg_ring $%p ring_info $%p", 3773*52ccf843Smisaki rx_msg_p, ring_info)); 377444961713Sgirish return; 377544961713Sgirish } 377644961713Sgirish 377744961713Sgirish #ifdef NXGE_DEBUG 377844961713Sgirish num_chunks = rbr_p->num_blocks; 377944961713Sgirish #endif 378044961713Sgirish size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 378144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3782*52ccf843Smisaki " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3783*52ccf843Smisaki "tnblocks %d (max %d) size ptrs %d ", 3784*52ccf843Smisaki rbr_p->rdc, num_chunks, 3785*52ccf843Smisaki rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 378644961713Sgirish 378744961713Sgirish for (i = 0; i < rbr_p->tnblocks; i++) { 378844961713Sgirish rx_msg_p = rx_msg_ring[i]; 378944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3790*52ccf843Smisaki " nxge_unmap_rxdma_channel_buf_ring: " 3791*52ccf843Smisaki "rx_msg_p $%p", 3792*52ccf843Smisaki rx_msg_p)); 379344961713Sgirish if (rx_msg_p != NULL) { 379414ea4bb7Ssd freeb(rx_msg_p->rx_mblk_p); 379544961713Sgirish rx_msg_ring[i] = NULL; 379644961713Sgirish } 379744961713Sgirish } 379844961713Sgirish 3799007969e0Stm /* 3800007969e0Stm * We no longer may use the mutex <post_lock>. By setting 3801007969e0Stm * <rbr_state> to anything but POSTING, we prevent 3802007969e0Stm * nxge_post_page() from accessing a dead mutex. 3803007969e0Stm */ 3804007969e0Stm rbr_p->rbr_state = RBR_UNMAPPING; 380544961713Sgirish MUTEX_DESTROY(&rbr_p->post_lock); 3806007969e0Stm 380744961713Sgirish MUTEX_DESTROY(&rbr_p->lock); 3808007969e0Stm 3809007969e0Stm if (rbr_p->rbr_ref_cnt == 0) { 3810678453a8Sspeer /* 3811678453a8Sspeer * This is the normal state of affairs. 3812678453a8Sspeer * Need to free the following buffers: 3813678453a8Sspeer * - data buffers 3814678453a8Sspeer * - rx_msg ring 3815678453a8Sspeer * - ring_info 3816678453a8Sspeer * - rbr ring 3817678453a8Sspeer */ 3818678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 3819678453a8Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 3820678453a8Sspeer nxge_rxdma_databuf_free(rbr_p); 3821678453a8Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3822678453a8Sspeer KMEM_FREE(rx_msg_ring, size); 3823007969e0Stm KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3824007969e0Stm } else { 3825007969e0Stm /* 3826007969e0Stm * Some of our buffers are still being used. 3827007969e0Stm * Therefore, tell nxge_freeb() this ring is 3828007969e0Stm * unmapped, so it may free <rbr_p> for us. 3829007969e0Stm */ 3830007969e0Stm rbr_p->rbr_state = RBR_UNMAPPED; 3831007969e0Stm NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3832007969e0Stm "unmap_rxdma_buf_ring: %d %s outstanding.", 3833007969e0Stm rbr_p->rbr_ref_cnt, 3834007969e0Stm rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3835007969e0Stm } 383644961713Sgirish 383744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3838*52ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring")); 383944961713Sgirish } 384044961713Sgirish 3841678453a8Sspeer /* 3842678453a8Sspeer * nxge_rxdma_hw_start_common 3843678453a8Sspeer * 3844678453a8Sspeer * Arguments: 3845678453a8Sspeer * nxgep 3846678453a8Sspeer * 3847678453a8Sspeer * Notes: 3848678453a8Sspeer * 3849678453a8Sspeer * NPI/NXGE function calls: 3850678453a8Sspeer * nxge_init_fzc_rx_common(); 3851678453a8Sspeer * nxge_init_fzc_rxdma_port(); 3852678453a8Sspeer * 3853678453a8Sspeer * Registers accessed: 3854678453a8Sspeer * 3855678453a8Sspeer * Context: 3856678453a8Sspeer * Service domain 3857678453a8Sspeer */ 385844961713Sgirish static nxge_status_t 385944961713Sgirish nxge_rxdma_hw_start_common(p_nxge_t nxgep) 386044961713Sgirish { 386144961713Sgirish nxge_status_t status = NXGE_OK; 386244961713Sgirish 386344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 386444961713Sgirish 386544961713Sgirish /* 386644961713Sgirish * Load the sharable parameters by writing to the 386744961713Sgirish * function zero control registers. These FZC registers 386844961713Sgirish * should be initialized only once for the entire chip. 386944961713Sgirish */ 387044961713Sgirish (void) nxge_init_fzc_rx_common(nxgep); 387144961713Sgirish 387244961713Sgirish /* 387344961713Sgirish * Initialize the RXDMA port specific FZC control configurations. 387444961713Sgirish * These FZC registers are pertaining to each port. 387544961713Sgirish */ 387644961713Sgirish (void) nxge_init_fzc_rxdma_port(nxgep); 387744961713Sgirish 387844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 387944961713Sgirish 388044961713Sgirish return (status); 388144961713Sgirish } 388244961713Sgirish 388344961713Sgirish static nxge_status_t 3884678453a8Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 388544961713Sgirish { 388644961713Sgirish int i, ndmas; 388744961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 388844961713Sgirish p_rx_rbr_ring_t *rbr_rings; 388944961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 389044961713Sgirish p_rx_rcr_ring_t *rcr_rings; 389144961713Sgirish p_rx_mbox_areas_t rx_mbox_areas_p; 389244961713Sgirish p_rx_mbox_t *rx_mbox_p; 389344961713Sgirish nxge_status_t status = NXGE_OK; 389444961713Sgirish 389544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 389644961713Sgirish 389744961713Sgirish rx_rbr_rings = nxgep->rx_rbr_rings; 389844961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 389944961713Sgirish if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 390044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 3901*52ccf843Smisaki "<== nxge_rxdma_hw_start: NULL ring pointers")); 390244961713Sgirish return (NXGE_ERROR); 390344961713Sgirish } 390444961713Sgirish ndmas = rx_rbr_rings->ndmas; 390544961713Sgirish if (ndmas == 0) { 390644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 3907*52ccf843Smisaki "<== nxge_rxdma_hw_start: no dma channel allocated")); 390844961713Sgirish return (NXGE_ERROR); 390944961713Sgirish } 391044961713Sgirish 391144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3912*52ccf843Smisaki "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 391344961713Sgirish 391444961713Sgirish rbr_rings = rx_rbr_rings->rbr_rings; 391544961713Sgirish rcr_rings = rx_rcr_rings->rcr_rings; 391644961713Sgirish rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 391744961713Sgirish if (rx_mbox_areas_p) { 391844961713Sgirish rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 391944961713Sgirish } 392044961713Sgirish 3921678453a8Sspeer i = channel; 3922678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3923*52ccf843Smisaki "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3924*52ccf843Smisaki ndmas, channel)); 3925678453a8Sspeer status = nxge_rxdma_start_channel(nxgep, channel, 3926678453a8Sspeer (p_rx_rbr_ring_t)rbr_rings[i], 3927678453a8Sspeer (p_rx_rcr_ring_t)rcr_rings[i], 3928678453a8Sspeer (p_rx_mbox_t)rx_mbox_p[i]); 3929678453a8Sspeer if (status != NXGE_OK) { 3930678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3931678453a8Sspeer "==> nxge_rxdma_hw_start: disable " 3932678453a8Sspeer "(status 0x%x channel %d)", status, channel)); 3933678453a8Sspeer return (status); 393444961713Sgirish } 393544961713Sgirish 393644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3937*52ccf843Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 3938*52ccf843Smisaki rx_rbr_rings, rx_rcr_rings)); 393944961713Sgirish 394044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3941*52ccf843Smisaki "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 394244961713Sgirish 394344961713Sgirish return (status); 394444961713Sgirish } 394544961713Sgirish 394644961713Sgirish static void 3947678453a8Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 394844961713Sgirish { 394944961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 395044961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 395144961713Sgirish 395244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 395344961713Sgirish 395444961713Sgirish rx_rbr_rings = nxgep->rx_rbr_rings; 395544961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 395644961713Sgirish if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 395744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 3958*52ccf843Smisaki "<== nxge_rxdma_hw_stop: NULL ring pointers")); 395944961713Sgirish return; 396044961713Sgirish } 396144961713Sgirish 396244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3963*52ccf843Smisaki "==> nxge_rxdma_hw_stop(channel %d)", 3964*52ccf843Smisaki channel)); 3965678453a8Sspeer (void) nxge_rxdma_stop_channel(nxgep, channel); 396644961713Sgirish 396744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3968*52ccf843Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 3969*52ccf843Smisaki rx_rbr_rings, rx_rcr_rings)); 397044961713Sgirish 397144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 397244961713Sgirish } 397344961713Sgirish 397444961713Sgirish 397544961713Sgirish static nxge_status_t 397644961713Sgirish nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 397744961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 397844961713Sgirish 397944961713Sgirish { 398044961713Sgirish npi_handle_t handle; 398144961713Sgirish npi_status_t rs = NPI_SUCCESS; 398244961713Sgirish rx_dma_ctl_stat_t cs; 398344961713Sgirish rx_dma_ent_msk_t ent_mask; 398444961713Sgirish nxge_status_t status = NXGE_OK; 398544961713Sgirish 398644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 398744961713Sgirish 398844961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 398944961713Sgirish 399044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 399144961713Sgirish "npi handle addr $%p acc $%p", 399244961713Sgirish nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 399344961713Sgirish 3994678453a8Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 3995678453a8Sspeer if (!isLDOMguest(nxgep)) { 3996678453a8Sspeer rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3997678453a8Sspeer if (rs != NPI_SUCCESS) { 3998678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3999678453a8Sspeer "==> nxge_init_fzc_rdc: " 4000678453a8Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4001678453a8Sspeer channel, rs)); 4002678453a8Sspeer return (NXGE_ERROR | rs); 4003678453a8Sspeer } 4004678453a8Sspeer 4005678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4006678453a8Sspeer "==> nxge_rxdma_start_channel: reset done: channel %d", 4007678453a8Sspeer channel)); 400844961713Sgirish } 400944961713Sgirish 4010678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4011678453a8Sspeer if (isLDOMguest(nxgep)) 4012678453a8Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 4013678453a8Sspeer #endif 401444961713Sgirish 401544961713Sgirish /* 401644961713Sgirish * Initialize the RXDMA channel specific FZC control 401744961713Sgirish * configurations. These FZC registers are pertaining 401844961713Sgirish * to each RX channel (logical pages). 401944961713Sgirish */ 4020678453a8Sspeer if (!isLDOMguest(nxgep)) { 4021678453a8Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4022678453a8Sspeer if (status != NXGE_OK) { 4023678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4024678453a8Sspeer "==> nxge_rxdma_start_channel: " 4025678453a8Sspeer "init fzc rxdma failed (0x%08x channel %d)", 4026678453a8Sspeer status, channel)); 4027678453a8Sspeer return (status); 4028678453a8Sspeer } 402944961713Sgirish 4030678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4031678453a8Sspeer "==> nxge_rxdma_start_channel: fzc done")); 4032678453a8Sspeer } 403344961713Sgirish 403444961713Sgirish /* Set up the interrupt event masks. */ 403544961713Sgirish ent_mask.value = 0; 403644961713Sgirish ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 403744961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4038678453a8Sspeer &ent_mask); 403944961713Sgirish if (rs != NPI_SUCCESS) { 404044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 404144961713Sgirish "==> nxge_rxdma_start_channel: " 4042678453a8Sspeer "init rxdma event masks failed " 4043678453a8Sspeer "(0x%08x channel %d)", 404444961713Sgirish status, channel)); 404544961713Sgirish return (NXGE_ERROR | rs); 404644961713Sgirish } 404744961713Sgirish 4048678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4049678453a8Sspeer "==> nxge_rxdma_start_channel: " 405044961713Sgirish "event done: channel %d (mask 0x%016llx)", 405144961713Sgirish channel, ent_mask.value)); 405244961713Sgirish 405344961713Sgirish /* Initialize the receive DMA control and status register */ 405444961713Sgirish cs.value = 0; 405544961713Sgirish cs.bits.hdw.mex = 1; 405644961713Sgirish cs.bits.hdw.rcrthres = 1; 405744961713Sgirish cs.bits.hdw.rcrto = 1; 405844961713Sgirish cs.bits.hdw.rbr_empty = 1; 405944961713Sgirish status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 406044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 406144961713Sgirish "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 406244961713Sgirish if (status != NXGE_OK) { 406344961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 406444961713Sgirish "==> nxge_rxdma_start_channel: " 406544961713Sgirish "init rxdma control register failed (0x%08x channel %d", 406644961713Sgirish status, channel)); 406744961713Sgirish return (status); 406844961713Sgirish } 406944961713Sgirish 407044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 407144961713Sgirish "control done - channel %d cs 0x%016llx", channel, cs.value)); 407244961713Sgirish 407344961713Sgirish /* 407444961713Sgirish * Load RXDMA descriptors, buffers, mailbox, 407544961713Sgirish * initialise the receive DMA channels and 407644961713Sgirish * enable each DMA channel. 407744961713Sgirish */ 407844961713Sgirish status = nxge_enable_rxdma_channel(nxgep, 4079678453a8Sspeer channel, rbr_p, rcr_p, mbox_p); 408044961713Sgirish 408144961713Sgirish if (status != NXGE_OK) { 408244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4083678453a8Sspeer " nxge_rxdma_start_channel: " 4084678453a8Sspeer " enable rxdma failed (0x%08x channel %d)", 4085678453a8Sspeer status, channel)); 408644961713Sgirish return (status); 408744961713Sgirish } 408844961713Sgirish 4089678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4090678453a8Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 4091678453a8Sspeer 4092678453a8Sspeer if (isLDOMguest(nxgep)) { 4093678453a8Sspeer /* Add interrupt handler for this channel. */ 4094678453a8Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4095678453a8Sspeer != NXGE_OK) { 4096678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4097678453a8Sspeer " nxge_rxdma_start_channel: " 4098678453a8Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 4099678453a8Sspeer status, channel)); 4100678453a8Sspeer } 4101678453a8Sspeer } 4102678453a8Sspeer 410344961713Sgirish ent_mask.value = 0; 410444961713Sgirish ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 410544961713Sgirish RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 410644961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 410744961713Sgirish &ent_mask); 410844961713Sgirish if (rs != NPI_SUCCESS) { 410944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 411044961713Sgirish "==> nxge_rxdma_start_channel: " 411144961713Sgirish "init rxdma event masks failed (0x%08x channel %d)", 411244961713Sgirish status, channel)); 411344961713Sgirish return (NXGE_ERROR | rs); 411444961713Sgirish } 411544961713Sgirish 411644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 411744961713Sgirish "control done - channel %d cs 0x%016llx", channel, cs.value)); 411844961713Sgirish 411944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 412044961713Sgirish 412144961713Sgirish return (NXGE_OK); 412244961713Sgirish } 412344961713Sgirish 412444961713Sgirish static nxge_status_t 412544961713Sgirish nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 412644961713Sgirish { 412744961713Sgirish npi_handle_t handle; 412844961713Sgirish npi_status_t rs = NPI_SUCCESS; 412944961713Sgirish rx_dma_ctl_stat_t cs; 413044961713Sgirish rx_dma_ent_msk_t ent_mask; 413144961713Sgirish nxge_status_t status = NXGE_OK; 413244961713Sgirish 413344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 413444961713Sgirish 413544961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 413644961713Sgirish 413744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4138*52ccf843Smisaki "npi handle addr $%p acc $%p", 4139*52ccf843Smisaki nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 414044961713Sgirish 414144961713Sgirish /* Reset RXDMA channel */ 414244961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 414344961713Sgirish if (rs != NPI_SUCCESS) { 414444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4145*52ccf843Smisaki " nxge_rxdma_stop_channel: " 4146*52ccf843Smisaki " reset rxdma failed (0x%08x channel %d)", 4147*52ccf843Smisaki rs, channel)); 414844961713Sgirish return (NXGE_ERROR | rs); 414944961713Sgirish } 415044961713Sgirish 415144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 4152*52ccf843Smisaki "==> nxge_rxdma_stop_channel: reset done")); 415344961713Sgirish 415444961713Sgirish /* Set up the interrupt event masks. */ 415544961713Sgirish ent_mask.value = RX_DMA_ENT_MSK_ALL; 415644961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4157*52ccf843Smisaki &ent_mask); 415844961713Sgirish if (rs != NPI_SUCCESS) { 415944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4160*52ccf843Smisaki "==> nxge_rxdma_stop_channel: " 4161*52ccf843Smisaki "set rxdma event masks failed (0x%08x channel %d)", 4162*52ccf843Smisaki rs, channel)); 416344961713Sgirish return (NXGE_ERROR | rs); 416444961713Sgirish } 416544961713Sgirish 416644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 4167*52ccf843Smisaki "==> nxge_rxdma_stop_channel: event done")); 416844961713Sgirish 416944961713Sgirish /* Initialize the receive DMA control and status register */ 417044961713Sgirish cs.value = 0; 417144961713Sgirish status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4172*52ccf843Smisaki &cs); 417344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4174*52ccf843Smisaki " to default (all 0s) 0x%08x", cs.value)); 417544961713Sgirish if (status != NXGE_OK) { 417644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4177*52ccf843Smisaki " nxge_rxdma_stop_channel: init rxdma" 4178*52ccf843Smisaki " control register failed (0x%08x channel %d", 4179*52ccf843Smisaki status, channel)); 418044961713Sgirish return (status); 418144961713Sgirish } 418244961713Sgirish 418344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 4184*52ccf843Smisaki "==> nxge_rxdma_stop_channel: control done")); 418544961713Sgirish 418644961713Sgirish /* disable dma channel */ 418744961713Sgirish status = nxge_disable_rxdma_channel(nxgep, channel); 418844961713Sgirish 418944961713Sgirish if (status != NXGE_OK) { 419044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4191*52ccf843Smisaki " nxge_rxdma_stop_channel: " 4192*52ccf843Smisaki " init enable rxdma failed (0x%08x channel %d)", 4193*52ccf843Smisaki status, channel)); 419444961713Sgirish return (status); 419544961713Sgirish } 419644961713Sgirish 419744961713Sgirish NXGE_DEBUG_MSG((nxgep, 4198*52ccf843Smisaki RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 419944961713Sgirish 420044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 420144961713Sgirish 420244961713Sgirish return (NXGE_OK); 420344961713Sgirish } 420444961713Sgirish 420544961713Sgirish nxge_status_t 420644961713Sgirish nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 420744961713Sgirish { 420844961713Sgirish npi_handle_t handle; 420944961713Sgirish p_nxge_rdc_sys_stats_t statsp; 421044961713Sgirish rx_ctl_dat_fifo_stat_t stat; 421144961713Sgirish uint32_t zcp_err_status; 421244961713Sgirish uint32_t ipp_err_status; 421344961713Sgirish nxge_status_t status = NXGE_OK; 421444961713Sgirish npi_status_t rs = NPI_SUCCESS; 421544961713Sgirish boolean_t my_err = B_FALSE; 421644961713Sgirish 421744961713Sgirish handle = nxgep->npi_handle; 421844961713Sgirish statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 421944961713Sgirish 422044961713Sgirish rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 422144961713Sgirish 422244961713Sgirish if (rs != NPI_SUCCESS) 422344961713Sgirish return (NXGE_ERROR | rs); 422444961713Sgirish 422544961713Sgirish if (stat.bits.ldw.id_mismatch) { 422644961713Sgirish statsp->id_mismatch++; 422744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4228*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 422944961713Sgirish /* Global fatal error encountered */ 423044961713Sgirish } 423144961713Sgirish 423244961713Sgirish if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 423344961713Sgirish switch (nxgep->mac.portnum) { 423444961713Sgirish case 0: 423544961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4236*52ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 423744961713Sgirish my_err = B_TRUE; 423844961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 423944961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 424044961713Sgirish } 424144961713Sgirish break; 424244961713Sgirish case 1: 424344961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4244*52ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 424544961713Sgirish my_err = B_TRUE; 424644961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 424744961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 424844961713Sgirish } 424944961713Sgirish break; 425044961713Sgirish case 2: 425144961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4252*52ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 425344961713Sgirish my_err = B_TRUE; 425444961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 425544961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 425644961713Sgirish } 425744961713Sgirish break; 425844961713Sgirish case 3: 425944961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4260*52ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 426144961713Sgirish my_err = B_TRUE; 426244961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 426344961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 426444961713Sgirish } 426544961713Sgirish break; 426644961713Sgirish default: 426744961713Sgirish return (NXGE_ERROR); 426844961713Sgirish } 426944961713Sgirish } 427044961713Sgirish 427144961713Sgirish if (my_err) { 427244961713Sgirish status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4273*52ccf843Smisaki zcp_err_status); 427444961713Sgirish if (status != NXGE_OK) 427544961713Sgirish return (status); 427644961713Sgirish } 427744961713Sgirish 427844961713Sgirish return (NXGE_OK); 427944961713Sgirish } 428044961713Sgirish 428144961713Sgirish static nxge_status_t 428244961713Sgirish nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 428344961713Sgirish uint32_t zcp_status) 428444961713Sgirish { 428544961713Sgirish boolean_t rxport_fatal = B_FALSE; 428644961713Sgirish p_nxge_rdc_sys_stats_t statsp; 428744961713Sgirish nxge_status_t status = NXGE_OK; 428844961713Sgirish uint8_t portn; 428944961713Sgirish 429044961713Sgirish portn = nxgep->mac.portnum; 429144961713Sgirish statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 429244961713Sgirish 429344961713Sgirish if (ipp_status & (0x1 << portn)) { 429444961713Sgirish statsp->ipp_eop_err++; 429544961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4296*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 429744961713Sgirish rxport_fatal = B_TRUE; 429844961713Sgirish } 429944961713Sgirish 430044961713Sgirish if (zcp_status & (0x1 << portn)) { 430144961713Sgirish statsp->zcp_eop_err++; 430244961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4303*52ccf843Smisaki NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 430444961713Sgirish rxport_fatal = B_TRUE; 430544961713Sgirish } 430644961713Sgirish 430744961713Sgirish if (rxport_fatal) { 430844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4309*52ccf843Smisaki " nxge_rxdma_handle_port_error: " 4310*52ccf843Smisaki " fatal error on Port #%d\n", 4311*52ccf843Smisaki portn)); 431244961713Sgirish status = nxge_rx_port_fatal_err_recover(nxgep); 431344961713Sgirish if (status == NXGE_OK) { 431444961713Sgirish FM_SERVICE_RESTORED(nxgep); 431544961713Sgirish } 431644961713Sgirish } 431744961713Sgirish 431844961713Sgirish return (status); 431944961713Sgirish } 432044961713Sgirish 432144961713Sgirish static nxge_status_t 432244961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 432344961713Sgirish { 432444961713Sgirish npi_handle_t handle; 432544961713Sgirish npi_status_t rs = NPI_SUCCESS; 432644961713Sgirish nxge_status_t status = NXGE_OK; 432744961713Sgirish p_rx_rbr_ring_t rbrp; 432844961713Sgirish p_rx_rcr_ring_t rcrp; 432944961713Sgirish p_rx_mbox_t mboxp; 433044961713Sgirish rx_dma_ent_msk_t ent_mask; 433144961713Sgirish p_nxge_dma_common_t dmap; 433244961713Sgirish int ring_idx; 433344961713Sgirish uint32_t ref_cnt; 433444961713Sgirish p_rx_msg_t rx_msg_p; 433544961713Sgirish int i; 433644961713Sgirish uint32_t nxge_port_rcr_size; 433744961713Sgirish 433844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 433944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4340*52ccf843Smisaki "Recovering from RxDMAChannel#%d error...", channel)); 434144961713Sgirish 434244961713Sgirish /* 434344961713Sgirish * Stop the dma channel waits for the stop done. 434444961713Sgirish * If the stop done bit is not set, then create 434544961713Sgirish * an error. 434644961713Sgirish */ 434744961713Sgirish 434844961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 434944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 435044961713Sgirish 435144961713Sgirish ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 435244961713Sgirish rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 435344961713Sgirish rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 435444961713Sgirish 435544961713Sgirish MUTEX_ENTER(&rcrp->lock); 435644961713Sgirish MUTEX_ENTER(&rbrp->lock); 435744961713Sgirish MUTEX_ENTER(&rbrp->post_lock); 435844961713Sgirish 435944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 436044961713Sgirish 436144961713Sgirish rs = npi_rxdma_cfg_rdc_disable(handle, channel); 436244961713Sgirish if (rs != NPI_SUCCESS) { 436344961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4364*52ccf843Smisaki "nxge_disable_rxdma_channel:failed")); 436544961713Sgirish goto fail; 436644961713Sgirish } 436744961713Sgirish 436844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 436944961713Sgirish 437044961713Sgirish /* Disable interrupt */ 437144961713Sgirish ent_mask.value = RX_DMA_ENT_MSK_ALL; 437244961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 437344961713Sgirish if (rs != NPI_SUCCESS) { 437444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4375*52ccf843Smisaki "nxge_rxdma_stop_channel: " 4376*52ccf843Smisaki "set rxdma event masks failed (channel %d)", 4377*52ccf843Smisaki channel)); 437844961713Sgirish } 437944961713Sgirish 438044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 438144961713Sgirish 438244961713Sgirish /* Reset RXDMA channel */ 438344961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 438444961713Sgirish if (rs != NPI_SUCCESS) { 438544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4386*52ccf843Smisaki "nxge_rxdma_fatal_err_recover: " 4387*52ccf843Smisaki " reset rxdma failed (channel %d)", channel)); 438844961713Sgirish goto fail; 438944961713Sgirish } 439044961713Sgirish 439144961713Sgirish nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 439244961713Sgirish 439344961713Sgirish mboxp = 4394*52ccf843Smisaki (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 439544961713Sgirish 439644961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 439744961713Sgirish rbrp->rbr_rd_index = 0; 439844961713Sgirish 439944961713Sgirish rcrp->comp_rd_index = 0; 440044961713Sgirish rcrp->comp_wt_index = 0; 440144961713Sgirish rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4402*52ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4403adfcba55Sjoycey #if defined(__i386) 4404*52ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4405*52ccf843Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4406adfcba55Sjoycey #else 4407*52ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4408*52ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4409adfcba55Sjoycey #endif 441044961713Sgirish 441144961713Sgirish rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4412*52ccf843Smisaki (nxge_port_rcr_size - 1); 441344961713Sgirish rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4414*52ccf843Smisaki (nxge_port_rcr_size - 1); 441544961713Sgirish 441644961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 441744961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 441844961713Sgirish 441944961713Sgirish cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 442044961713Sgirish 442144961713Sgirish for (i = 0; i < rbrp->rbr_max_size; i++) { 442244961713Sgirish rx_msg_p = rbrp->rx_msg_ring[i]; 442344961713Sgirish ref_cnt = rx_msg_p->ref_cnt; 442444961713Sgirish if (ref_cnt != 1) { 4425a3c5bd6dSspeer if (rx_msg_p->cur_usage_cnt != 4426*52ccf843Smisaki rx_msg_p->max_usage_cnt) { 442744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4428*52ccf843Smisaki "buf[%d]: cur_usage_cnt = %d " 4429*52ccf843Smisaki "max_usage_cnt = %d\n", i, 4430*52ccf843Smisaki rx_msg_p->cur_usage_cnt, 4431*52ccf843Smisaki rx_msg_p->max_usage_cnt)); 4432a3c5bd6dSspeer } else { 4433a3c5bd6dSspeer /* Buffer can be re-posted */ 4434a3c5bd6dSspeer rx_msg_p->free = B_TRUE; 4435a3c5bd6dSspeer rx_msg_p->cur_usage_cnt = 0; 4436a3c5bd6dSspeer rx_msg_p->max_usage_cnt = 0xbaddcafe; 4437a3c5bd6dSspeer rx_msg_p->pkt_buf_size = 0; 4438a3c5bd6dSspeer } 443944961713Sgirish } 444044961713Sgirish } 444144961713Sgirish 444244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 444344961713Sgirish 444444961713Sgirish status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 444544961713Sgirish if (status != NXGE_OK) { 444644961713Sgirish goto fail; 444744961713Sgirish } 444844961713Sgirish 444944961713Sgirish MUTEX_EXIT(&rbrp->post_lock); 445044961713Sgirish MUTEX_EXIT(&rbrp->lock); 445144961713Sgirish MUTEX_EXIT(&rcrp->lock); 445244961713Sgirish 445344961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4454*52ccf843Smisaki "Recovery Successful, RxDMAChannel#%d Restored", 4455*52ccf843Smisaki channel)); 445644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 445744961713Sgirish 445844961713Sgirish return (NXGE_OK); 445944961713Sgirish fail: 446044961713Sgirish MUTEX_EXIT(&rbrp->post_lock); 446144961713Sgirish MUTEX_EXIT(&rbrp->lock); 446244961713Sgirish MUTEX_EXIT(&rcrp->lock); 446344961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 446444961713Sgirish 446544961713Sgirish return (NXGE_ERROR | rs); 446644961713Sgirish } 446744961713Sgirish 446844961713Sgirish nxge_status_t 446944961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 447044961713Sgirish { 4471678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 4472678453a8Sspeer nxge_status_t status = NXGE_OK; 4473678453a8Sspeer int rdc; 447444961713Sgirish 447544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 447644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4477*52ccf843Smisaki "Recovering from RxPort error...")); 4478678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 447944961713Sgirish 448044961713Sgirish if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 448144961713Sgirish goto fail; 448244961713Sgirish 448344961713Sgirish NXGE_DELAY(1000); 448444961713Sgirish 4485678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 448644961713Sgirish 4487678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4488678453a8Sspeer if ((1 << rdc) & set->owned.map) { 4489678453a8Sspeer if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4490678453a8Sspeer != NXGE_OK) { 4491678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4492678453a8Sspeer "Could not recover channel %d", rdc)); 4493678453a8Sspeer } 449444961713Sgirish } 449544961713Sgirish } 449644961713Sgirish 4497678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 449844961713Sgirish 449944961713Sgirish /* Reset IPP */ 450044961713Sgirish if (nxge_ipp_reset(nxgep) != NXGE_OK) { 450144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4502*52ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 4503*52ccf843Smisaki "Failed to reset IPP")); 450444961713Sgirish goto fail; 450544961713Sgirish } 450644961713Sgirish 450744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 450844961713Sgirish 450944961713Sgirish /* Reset RxMAC */ 451044961713Sgirish if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 451144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4512*52ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 4513*52ccf843Smisaki "Failed to reset RxMAC")); 451444961713Sgirish goto fail; 451544961713Sgirish } 451644961713Sgirish 451744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 451844961713Sgirish 451944961713Sgirish /* Re-Initialize IPP */ 452044961713Sgirish if (nxge_ipp_init(nxgep) != NXGE_OK) { 452144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4522*52ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 4523*52ccf843Smisaki "Failed to init IPP")); 452444961713Sgirish goto fail; 452544961713Sgirish } 452644961713Sgirish 452744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 452844961713Sgirish 452944961713Sgirish /* Re-Initialize RxMAC */ 453044961713Sgirish if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 453144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4532*52ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 4533*52ccf843Smisaki "Failed to reset RxMAC")); 453444961713Sgirish goto fail; 453544961713Sgirish } 453644961713Sgirish 453744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 453844961713Sgirish 453944961713Sgirish /* Re-enable RxMAC */ 454044961713Sgirish if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 454144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4542*52ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 4543*52ccf843Smisaki "Failed to enable RxMAC")); 454444961713Sgirish goto fail; 454544961713Sgirish } 454644961713Sgirish 454744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4548*52ccf843Smisaki "Recovery Successful, RxPort Restored")); 454944961713Sgirish 455044961713Sgirish return (NXGE_OK); 455144961713Sgirish fail: 455244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 455344961713Sgirish return (status); 455444961713Sgirish } 455544961713Sgirish 455644961713Sgirish void 455744961713Sgirish nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 455844961713Sgirish { 455944961713Sgirish rx_dma_ctl_stat_t cs; 456044961713Sgirish rx_ctl_dat_fifo_stat_t cdfs; 456144961713Sgirish 456244961713Sgirish switch (err_id) { 456344961713Sgirish case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 456444961713Sgirish case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 456544961713Sgirish case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 456644961713Sgirish case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 456744961713Sgirish case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 456844961713Sgirish case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 456944961713Sgirish case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 457044961713Sgirish case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 457144961713Sgirish case NXGE_FM_EREPORT_RDMC_RCRINCON: 457244961713Sgirish case NXGE_FM_EREPORT_RDMC_RCRFULL: 457344961713Sgirish case NXGE_FM_EREPORT_RDMC_RBRFULL: 457444961713Sgirish case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 457544961713Sgirish case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 457644961713Sgirish case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 457744961713Sgirish RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4578*52ccf843Smisaki chan, &cs.value); 457944961713Sgirish if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 458044961713Sgirish cs.bits.hdw.rcr_ack_err = 1; 458144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 458244961713Sgirish cs.bits.hdw.dc_fifo_err = 1; 458344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 458444961713Sgirish cs.bits.hdw.rcr_sha_par = 1; 458544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 458644961713Sgirish cs.bits.hdw.rbr_pre_par = 1; 458744961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 458844961713Sgirish cs.bits.hdw.rbr_tmout = 1; 458944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 459044961713Sgirish cs.bits.hdw.rsp_cnt_err = 1; 459144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 459244961713Sgirish cs.bits.hdw.byte_en_bus = 1; 459344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 459444961713Sgirish cs.bits.hdw.rsp_dat_err = 1; 459544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 459644961713Sgirish cs.bits.hdw.config_err = 1; 459744961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 459844961713Sgirish cs.bits.hdw.rcrincon = 1; 459944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 460044961713Sgirish cs.bits.hdw.rcrfull = 1; 460144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 460244961713Sgirish cs.bits.hdw.rbrfull = 1; 460344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 460444961713Sgirish cs.bits.hdw.rbrlogpage = 1; 460544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 460644961713Sgirish cs.bits.hdw.cfiglogpage = 1; 4607adfcba55Sjoycey #if defined(__i386) 4608adfcba55Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4609*52ccf843Smisaki cs.value); 4610adfcba55Sjoycey #else 461144961713Sgirish cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4612*52ccf843Smisaki cs.value); 4613adfcba55Sjoycey #endif 461444961713Sgirish RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4615*52ccf843Smisaki chan, cs.value); 461644961713Sgirish break; 461744961713Sgirish case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 461844961713Sgirish case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 461944961713Sgirish case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 462044961713Sgirish cdfs.value = 0; 462144961713Sgirish if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 462244961713Sgirish cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 462344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 462444961713Sgirish cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 462544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 462644961713Sgirish cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4627adfcba55Sjoycey #if defined(__i386) 4628adfcba55Sjoycey cmn_err(CE_NOTE, 4629*52ccf843Smisaki "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4630*52ccf843Smisaki cdfs.value); 4631adfcba55Sjoycey #else 463244961713Sgirish cmn_err(CE_NOTE, 4633*52ccf843Smisaki "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4634*52ccf843Smisaki cdfs.value); 4635adfcba55Sjoycey #endif 4636678453a8Sspeer NXGE_REG_WR64(nxgep->npi_handle, 4637678453a8Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 463844961713Sgirish break; 463944961713Sgirish case NXGE_FM_EREPORT_RDMC_DCF_ERR: 464044961713Sgirish break; 464153f3d8ecSyc case NXGE_FM_EREPORT_RDMC_RCR_ERR: 464244961713Sgirish break; 464344961713Sgirish } 464444961713Sgirish } 4645678453a8Sspeer 4646678453a8Sspeer static void 4647678453a8Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4648678453a8Sspeer { 4649678453a8Sspeer rxring_info_t *ring_info; 4650678453a8Sspeer int index; 4651678453a8Sspeer uint32_t chunk_size; 4652678453a8Sspeer uint64_t kaddr; 4653678453a8Sspeer uint_t num_blocks; 4654678453a8Sspeer 4655678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4656678453a8Sspeer 4657678453a8Sspeer if (rbr_p == NULL) { 4658678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4659678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4660678453a8Sspeer return; 4661678453a8Sspeer } 4662678453a8Sspeer 4663678453a8Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4664678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4665678453a8Sspeer "==> nxge_rxdma_databuf_free: DDI")); 4666678453a8Sspeer return; 4667678453a8Sspeer } 4668678453a8Sspeer 4669678453a8Sspeer ring_info = rbr_p->ring_info; 4670678453a8Sspeer if (ring_info == NULL) { 4671678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4672678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 4673678453a8Sspeer return; 4674678453a8Sspeer } 4675678453a8Sspeer num_blocks = rbr_p->num_blocks; 4676678453a8Sspeer for (index = 0; index < num_blocks; index++) { 4677678453a8Sspeer kaddr = ring_info->buffer[index].kaddr; 4678678453a8Sspeer chunk_size = ring_info->buffer[index].buf_size; 4679678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4680678453a8Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 4681678453a8Sspeer "kaddrp $%p chunk size %d", 4682678453a8Sspeer index, kaddr, chunk_size)); 4683678453a8Sspeer if (kaddr == NULL) continue; 4684678453a8Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4685678453a8Sspeer ring_info->buffer[index].kaddr = NULL; 4686678453a8Sspeer } 4687678453a8Sspeer 4688678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4689678453a8Sspeer } 4690678453a8Sspeer 4691678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4692678453a8Sspeer extern void contig_mem_free(void *, size_t); 4693678453a8Sspeer #endif 4694678453a8Sspeer 4695678453a8Sspeer void 4696678453a8Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4697678453a8Sspeer { 4698678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4699678453a8Sspeer 4700678453a8Sspeer if (kaddr == NULL || !buf_size) { 4701678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4702678453a8Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4703678453a8Sspeer kaddr, buf_size)); 4704678453a8Sspeer return; 4705678453a8Sspeer } 4706678453a8Sspeer 4707678453a8Sspeer switch (alloc_type) { 4708678453a8Sspeer case KMEM_ALLOC: 4709678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4710678453a8Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 4711678453a8Sspeer kaddr, buf_size)); 4712678453a8Sspeer #if defined(__i386) 4713678453a8Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4714678453a8Sspeer #else 4715678453a8Sspeer KMEM_FREE((void *)kaddr, buf_size); 4716678453a8Sspeer #endif 4717678453a8Sspeer break; 4718678453a8Sspeer 4719678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4720678453a8Sspeer case CONTIG_MEM_ALLOC: 4721678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4722678453a8Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4723678453a8Sspeer kaddr, buf_size)); 4724678453a8Sspeer contig_mem_free((void *)kaddr, buf_size); 4725678453a8Sspeer break; 4726678453a8Sspeer #endif 4727678453a8Sspeer 4728678453a8Sspeer default: 4729678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4730678453a8Sspeer "<== nxge_free_buf: unsupported alloc type %d", 4731678453a8Sspeer alloc_type)); 4732678453a8Sspeer return; 4733678453a8Sspeer } 4734678453a8Sspeer 4735678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 4736678453a8Sspeer } 4737