144961713Sgirish /* 244961713Sgirish * CDDL HEADER START 344961713Sgirish * 444961713Sgirish * The contents of this file are subject to the terms of the 544961713Sgirish * Common Development and Distribution License (the "License"). 644961713Sgirish * You may not use this file except in compliance with the License. 744961713Sgirish * 844961713Sgirish * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 944961713Sgirish * or http://www.opensolaris.org/os/licensing. 1044961713Sgirish * See the License for the specific language governing permissions 1144961713Sgirish * and limitations under the License. 1244961713Sgirish * 1344961713Sgirish * When distributing Covered Code, include this CDDL HEADER in each 1444961713Sgirish * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1544961713Sgirish * If applicable, add the following below this CDDL HEADER, with the 1644961713Sgirish * fields enclosed by brackets "[]" replaced with your own identifying 1744961713Sgirish * information: Portions Copyright [yyyy] [name of copyright owner] 1844961713Sgirish * 1944961713Sgirish * CDDL HEADER END 2044961713Sgirish */ 21*ef523517SMichael Speer 2244961713Sgirish /* 237b26d9ffSSantwona Behera * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 2444961713Sgirish * Use is subject to license terms. 2544961713Sgirish */ 2644961713Sgirish 2744961713Sgirish #include <sys/nxge/nxge_impl.h> 2844961713Sgirish #include <sys/nxge/nxge_rxdma.h> 29678453a8Sspeer #include <sys/nxge/nxge_hio.h> 30678453a8Sspeer 31678453a8Sspeer #if !defined(_BIG_ENDIAN) 32678453a8Sspeer #include <npi_rx_rd32.h> 33678453a8Sspeer #endif 34678453a8Sspeer #include <npi_rx_rd64.h> 35678453a8Sspeer #include <npi_rx_wr64.h> 3644961713Sgirish 3744961713Sgirish #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 38678453a8Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 3944961713Sgirish #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 4044961713Sgirish (rdc + nxgep->pt_config.hw_config.start_rdc) 4144961713Sgirish 4244961713Sgirish /* 4344961713Sgirish * Globals: tunable parameters (/etc/system or adb) 4444961713Sgirish * 4544961713Sgirish */ 4644961713Sgirish extern uint32_t nxge_rbr_size; 4744961713Sgirish extern uint32_t nxge_rcr_size; 4844961713Sgirish extern uint32_t nxge_rbr_spare_size; 4944961713Sgirish 5044961713Sgirish extern uint32_t nxge_mblks_pending; 5144961713Sgirish 5244961713Sgirish /* 5344961713Sgirish * Tunable to reduce the amount of time spent in the 5444961713Sgirish * ISR doing Rx Processing. 5544961713Sgirish */ 5644961713Sgirish extern uint32_t nxge_max_rx_pkts; 5744961713Sgirish 5844961713Sgirish /* 5944961713Sgirish * Tunables to manage the receive buffer blocks. 6044961713Sgirish * 6144961713Sgirish * nxge_rx_threshold_hi: copy all buffers. 6244961713Sgirish * nxge_rx_bcopy_size_type: receive buffer block size type. 6344961713Sgirish * nxge_rx_threshold_lo: copy only up to tunable block size type. 6444961713Sgirish */ 6544961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 6644961713Sgirish extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 6744961713Sgirish extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 6844961713Sgirish 69b4d05839Sml extern uint32_t nxge_cksum_offload; 70678453a8Sspeer 71678453a8Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 72678453a8Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 7344961713Sgirish 7444961713Sgirish static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 7544961713Sgirish 76678453a8Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 77678453a8Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 7844961713Sgirish 7944961713Sgirish static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 8044961713Sgirish p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 8144961713Sgirish uint32_t, 8244961713Sgirish p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 8344961713Sgirish p_rx_mbox_t *); 8444961713Sgirish static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 8544961713Sgirish p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 8644961713Sgirish 8744961713Sgirish static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 8844961713Sgirish uint16_t, 8944961713Sgirish p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 9044961713Sgirish p_rx_rcr_ring_t *, p_rx_mbox_t *); 9144961713Sgirish static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 9244961713Sgirish p_rx_rcr_ring_t, p_rx_mbox_t); 9344961713Sgirish 9444961713Sgirish static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 9544961713Sgirish uint16_t, 9644961713Sgirish p_nxge_dma_common_t *, 9744961713Sgirish p_rx_rbr_ring_t *, uint32_t); 9844961713Sgirish static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 9944961713Sgirish p_rx_rbr_ring_t); 10044961713Sgirish 10144961713Sgirish static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 10244961713Sgirish p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 10344961713Sgirish static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 10444961713Sgirish 105678453a8Sspeer static mblk_t * 106678453a8Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 10744961713Sgirish 10844961713Sgirish static void nxge_receive_packet(p_nxge_t, 10944961713Sgirish p_rx_rcr_ring_t, 11044961713Sgirish p_rcr_entry_t, 11144961713Sgirish boolean_t *, 11244961713Sgirish mblk_t **, mblk_t **); 11344961713Sgirish 11444961713Sgirish nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 11544961713Sgirish 11644961713Sgirish static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 11744961713Sgirish static void nxge_freeb(p_rx_msg_t); 118678453a8Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 11944961713Sgirish 12044961713Sgirish static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 12144961713Sgirish uint32_t, uint32_t); 12244961713Sgirish 12344961713Sgirish static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 12444961713Sgirish p_rx_rbr_ring_t); 12544961713Sgirish 12644961713Sgirish 12744961713Sgirish static nxge_status_t 12844961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 12944961713Sgirish 13044961713Sgirish nxge_status_t 13144961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t); 13244961713Sgirish 133678453a8Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 134678453a8Sspeer 13544961713Sgirish nxge_status_t 13644961713Sgirish nxge_init_rxdma_channels(p_nxge_t nxgep) 13744961713Sgirish { 138e11f0814SMichael Speer nxge_grp_set_t *set = &nxgep->rx_set; 139da14cebeSEric Cheng int i, count, channel; 140e11f0814SMichael Speer nxge_grp_t *group; 141da14cebeSEric Cheng dc_map_t map; 142da14cebeSEric Cheng int dev_gindex; 14344961713Sgirish 14444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 14544961713Sgirish 146678453a8Sspeer if (!isLDOMguest(nxgep)) { 147678453a8Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 148678453a8Sspeer cmn_err(CE_NOTE, "hw_start_common"); 149678453a8Sspeer return (NXGE_ERROR); 150678453a8Sspeer } 151678453a8Sspeer } 152678453a8Sspeer 153678453a8Sspeer /* 154678453a8Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 155678453a8Sspeer * We only have 8 hardware RDC tables, but we may have 156678453a8Sspeer * up to 16 logical (software-defined) groups of RDCS, 157678453a8Sspeer * if we make use of layer 3 & 4 hardware classification. 158678453a8Sspeer */ 159678453a8Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 160678453a8Sspeer if ((1 << i) & set->lg.map) { 161e11f0814SMichael Speer group = set->group[i]; 162da14cebeSEric Cheng dev_gindex = 163da14cebeSEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 164da14cebeSEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 165678453a8Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 166da14cebeSEric Cheng if ((1 << channel) & map) { 167678453a8Sspeer if ((nxge_grp_dc_add(nxgep, 1686920a987SMisaki Miyashita group, VP_BOUND_RX, channel))) 169e11f0814SMichael Speer goto init_rxdma_channels_exit; 170678453a8Sspeer } 171678453a8Sspeer } 172678453a8Sspeer } 173678453a8Sspeer if (++count == set->lg.count) 174678453a8Sspeer break; 17544961713Sgirish } 17644961713Sgirish 177678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 178678453a8Sspeer return (NXGE_OK); 179e11f0814SMichael Speer 180e11f0814SMichael Speer init_rxdma_channels_exit: 181e11f0814SMichael Speer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 182e11f0814SMichael Speer if ((1 << i) & set->lg.map) { 183e11f0814SMichael Speer group = set->group[i]; 184da14cebeSEric Cheng dev_gindex = 185da14cebeSEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 186da14cebeSEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 187da14cebeSEric Cheng for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 188da14cebeSEric Cheng if ((1 << channel) & map) { 189e11f0814SMichael Speer nxge_grp_dc_remove(nxgep, 190da14cebeSEric Cheng VP_BOUND_RX, channel); 191e11f0814SMichael Speer } 192e11f0814SMichael Speer } 193e11f0814SMichael Speer } 194e11f0814SMichael Speer if (++count == set->lg.count) 195e11f0814SMichael Speer break; 196e11f0814SMichael Speer } 197e11f0814SMichael Speer 198e11f0814SMichael Speer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 199e11f0814SMichael Speer return (NXGE_ERROR); 200678453a8Sspeer } 201678453a8Sspeer 202678453a8Sspeer nxge_status_t 203678453a8Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 204678453a8Sspeer { 20508ac1c49SNicolas Droux nxge_status_t status; 206678453a8Sspeer 207678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 208678453a8Sspeer 209678453a8Sspeer status = nxge_map_rxdma(nxge, channel); 21044961713Sgirish if (status != NXGE_OK) { 211678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 212678453a8Sspeer "<== nxge_init_rxdma: status 0x%x", status)); 213678453a8Sspeer return (status); 21444961713Sgirish } 21544961713Sgirish 21608ac1c49SNicolas Droux #if defined(sun4v) 21708ac1c49SNicolas Droux if (isLDOMguest(nxge)) { 21808ac1c49SNicolas Droux /* set rcr_ring */ 21908ac1c49SNicolas Droux p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 22008ac1c49SNicolas Droux 22108ac1c49SNicolas Droux status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 22208ac1c49SNicolas Droux if (status != NXGE_OK) { 22308ac1c49SNicolas Droux nxge_unmap_rxdma(nxge, channel); 22408ac1c49SNicolas Droux return (status); 22508ac1c49SNicolas Droux } 22608ac1c49SNicolas Droux } 22708ac1c49SNicolas Droux #endif 22808ac1c49SNicolas Droux 229678453a8Sspeer status = nxge_rxdma_hw_start(nxge, channel); 23044961713Sgirish if (status != NXGE_OK) { 231678453a8Sspeer nxge_unmap_rxdma(nxge, channel); 23244961713Sgirish } 23344961713Sgirish 234678453a8Sspeer if (!nxge->statsp->rdc_ksp[channel]) 235678453a8Sspeer nxge_setup_rdc_kstats(nxge, channel); 236678453a8Sspeer 237678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 238678453a8Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 23944961713Sgirish 24044961713Sgirish return (status); 24144961713Sgirish } 24244961713Sgirish 24344961713Sgirish void 24444961713Sgirish nxge_uninit_rxdma_channels(p_nxge_t nxgep) 24544961713Sgirish { 246678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 247678453a8Sspeer int rdc; 248678453a8Sspeer 24944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 25044961713Sgirish 251678453a8Sspeer if (set->owned.map == 0) { 252678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 253678453a8Sspeer "nxge_uninit_rxdma_channels: no channels")); 254678453a8Sspeer return; 255678453a8Sspeer } 25644961713Sgirish 257678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 258678453a8Sspeer if ((1 << rdc) & set->owned.map) { 259678453a8Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 260678453a8Sspeer } 261678453a8Sspeer } 262678453a8Sspeer 263678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 264678453a8Sspeer } 265678453a8Sspeer 266678453a8Sspeer void 267678453a8Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 268678453a8Sspeer { 269678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 270678453a8Sspeer 271678453a8Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 272678453a8Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 273678453a8Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 274678453a8Sspeer } 275678453a8Sspeer 276678453a8Sspeer nxge_rxdma_hw_stop(nxgep, channel); 277678453a8Sspeer nxge_unmap_rxdma(nxgep, channel); 278678453a8Sspeer 279678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 28044961713Sgirish } 28144961713Sgirish 28244961713Sgirish nxge_status_t 28344961713Sgirish nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 28444961713Sgirish { 28544961713Sgirish npi_handle_t handle; 28644961713Sgirish npi_status_t rs = NPI_SUCCESS; 28744961713Sgirish nxge_status_t status = NXGE_OK; 28844961713Sgirish 289330cd344SMichael Speer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 29044961713Sgirish 29144961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 29244961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 29344961713Sgirish 29444961713Sgirish if (rs != NPI_SUCCESS) { 29544961713Sgirish status = NXGE_ERROR | rs; 29644961713Sgirish } 29744961713Sgirish 298330cd344SMichael Speer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 299330cd344SMichael Speer 30044961713Sgirish return (status); 30144961713Sgirish } 30244961713Sgirish 30344961713Sgirish void 30444961713Sgirish nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 30544961713Sgirish { 306678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 307678453a8Sspeer int rdc; 30844961713Sgirish 30944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 31044961713Sgirish 311678453a8Sspeer if (!isLDOMguest(nxgep)) { 312678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 313678453a8Sspeer (void) npi_rxdma_dump_fzc_regs(handle); 31444961713Sgirish } 315678453a8Sspeer 316678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 317678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 318678453a8Sspeer "nxge_rxdma_regs_dump_channels: " 319678453a8Sspeer "NULL ring pointer(s)")); 32044961713Sgirish return; 32144961713Sgirish } 32244961713Sgirish 323678453a8Sspeer if (set->owned.map == 0) { 32444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 325678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 32644961713Sgirish return; 32744961713Sgirish } 32844961713Sgirish 329678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 330678453a8Sspeer if ((1 << rdc) & set->owned.map) { 331678453a8Sspeer rx_rbr_ring_t *ring = 332678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 333678453a8Sspeer if (ring) { 334678453a8Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 335678453a8Sspeer } 33644961713Sgirish } 33744961713Sgirish } 33844961713Sgirish 33944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 34044961713Sgirish } 34144961713Sgirish 34244961713Sgirish nxge_status_t 34344961713Sgirish nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 34444961713Sgirish { 34544961713Sgirish npi_handle_t handle; 34644961713Sgirish npi_status_t rs = NPI_SUCCESS; 34744961713Sgirish nxge_status_t status = NXGE_OK; 34844961713Sgirish 34944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 35044961713Sgirish 35144961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 35244961713Sgirish rs = npi_rxdma_dump_rdc_regs(handle, channel); 35344961713Sgirish 35444961713Sgirish if (rs != NPI_SUCCESS) { 35544961713Sgirish status = NXGE_ERROR | rs; 35644961713Sgirish } 35744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 35844961713Sgirish return (status); 35944961713Sgirish } 36044961713Sgirish 36144961713Sgirish nxge_status_t 36244961713Sgirish nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 36344961713Sgirish p_rx_dma_ent_msk_t mask_p) 36444961713Sgirish { 36544961713Sgirish npi_handle_t handle; 36644961713Sgirish npi_status_t rs = NPI_SUCCESS; 36744961713Sgirish nxge_status_t status = NXGE_OK; 36844961713Sgirish 36944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 37052ccf843Smisaki "<== nxge_init_rxdma_channel_event_mask")); 37144961713Sgirish 37244961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 37344961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 37444961713Sgirish if (rs != NPI_SUCCESS) { 37544961713Sgirish status = NXGE_ERROR | rs; 37644961713Sgirish } 37744961713Sgirish 37844961713Sgirish return (status); 37944961713Sgirish } 38044961713Sgirish 38144961713Sgirish nxge_status_t 38244961713Sgirish nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 38344961713Sgirish p_rx_dma_ctl_stat_t cs_p) 38444961713Sgirish { 38544961713Sgirish npi_handle_t handle; 38644961713Sgirish npi_status_t rs = NPI_SUCCESS; 38744961713Sgirish nxge_status_t status = NXGE_OK; 38844961713Sgirish 38944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 39052ccf843Smisaki "<== nxge_init_rxdma_channel_cntl_stat")); 39144961713Sgirish 39244961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 39344961713Sgirish rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 39444961713Sgirish 39544961713Sgirish if (rs != NPI_SUCCESS) { 39644961713Sgirish status = NXGE_ERROR | rs; 39744961713Sgirish } 39844961713Sgirish 39944961713Sgirish return (status); 40044961713Sgirish } 40144961713Sgirish 402678453a8Sspeer /* 403678453a8Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 404678453a8Sspeer * 405678453a8Sspeer * Set the default RDC for an RDC Group (Table) 406678453a8Sspeer * 407678453a8Sspeer * Arguments: 408678453a8Sspeer * nxgep 409678453a8Sspeer * rdcgrp The group to modify 410678453a8Sspeer * rdc The new default RDC. 411678453a8Sspeer * 412678453a8Sspeer * Notes: 413678453a8Sspeer * 414678453a8Sspeer * NPI/NXGE function calls: 415678453a8Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 416678453a8Sspeer * 417678453a8Sspeer * Registers accessed: 418678453a8Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 419678453a8Sspeer * 420678453a8Sspeer * Context: 421678453a8Sspeer * Service domain 422678453a8Sspeer */ 42344961713Sgirish nxge_status_t 424678453a8Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 425678453a8Sspeer p_nxge_t nxgep, 426678453a8Sspeer uint8_t rdcgrp, 427678453a8Sspeer uint8_t rdc) 42844961713Sgirish { 42944961713Sgirish npi_handle_t handle; 43044961713Sgirish npi_status_t rs = NPI_SUCCESS; 43144961713Sgirish p_nxge_dma_pt_cfg_t p_dma_cfgp; 43244961713Sgirish p_nxge_rdc_grp_t rdc_grp_p; 43344961713Sgirish uint8_t actual_rdcgrp, actual_rdc; 43444961713Sgirish 43544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 43652ccf843Smisaki " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 43744961713Sgirish p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 43844961713Sgirish 43944961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 44044961713Sgirish 441678453a8Sspeer /* 442678453a8Sspeer * This has to be rewritten. Do we even allow this anymore? 443678453a8Sspeer */ 44444961713Sgirish rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 445678453a8Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 446678453a8Sspeer rdc_grp_p->def_rdc = rdc; 44744961713Sgirish 44844961713Sgirish actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 44944961713Sgirish actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 45044961713Sgirish 451678453a8Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 45252ccf843Smisaki handle, actual_rdcgrp, actual_rdc); 45344961713Sgirish 45444961713Sgirish if (rs != NPI_SUCCESS) { 45544961713Sgirish return (NXGE_ERROR | rs); 45644961713Sgirish } 45744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 45852ccf843Smisaki " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 45944961713Sgirish return (NXGE_OK); 46044961713Sgirish } 46144961713Sgirish 46244961713Sgirish nxge_status_t 46344961713Sgirish nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 46444961713Sgirish { 46544961713Sgirish npi_handle_t handle; 46644961713Sgirish 46744961713Sgirish uint8_t actual_rdc; 46844961713Sgirish npi_status_t rs = NPI_SUCCESS; 46944961713Sgirish 47044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 47152ccf843Smisaki " ==> nxge_rxdma_cfg_port_default_rdc")); 47244961713Sgirish 47344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 474678453a8Sspeer actual_rdc = rdc; /* XXX Hack! */ 47544961713Sgirish rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 47644961713Sgirish 47744961713Sgirish 47844961713Sgirish if (rs != NPI_SUCCESS) { 47944961713Sgirish return (NXGE_ERROR | rs); 48044961713Sgirish } 48144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 48252ccf843Smisaki " <== nxge_rxdma_cfg_port_default_rdc")); 48344961713Sgirish 48444961713Sgirish return (NXGE_OK); 48544961713Sgirish } 48644961713Sgirish 48744961713Sgirish nxge_status_t 48844961713Sgirish nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 48944961713Sgirish uint16_t pkts) 49044961713Sgirish { 49144961713Sgirish npi_status_t rs = NPI_SUCCESS; 49244961713Sgirish npi_handle_t handle; 49344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 49452ccf843Smisaki " ==> nxge_rxdma_cfg_rcr_threshold")); 49544961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 49644961713Sgirish 49744961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 49844961713Sgirish 49944961713Sgirish if (rs != NPI_SUCCESS) { 50044961713Sgirish return (NXGE_ERROR | rs); 50144961713Sgirish } 50244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 50344961713Sgirish return (NXGE_OK); 50444961713Sgirish } 50544961713Sgirish 50644961713Sgirish nxge_status_t 50744961713Sgirish nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 50844961713Sgirish uint16_t tout, uint8_t enable) 50944961713Sgirish { 51044961713Sgirish npi_status_t rs = NPI_SUCCESS; 51144961713Sgirish npi_handle_t handle; 51244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 51344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 51444961713Sgirish if (enable == 0) { 51544961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 51644961713Sgirish } else { 51744961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 51852ccf843Smisaki tout); 51944961713Sgirish } 52044961713Sgirish 52144961713Sgirish if (rs != NPI_SUCCESS) { 52244961713Sgirish return (NXGE_ERROR | rs); 52344961713Sgirish } 52444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 52544961713Sgirish return (NXGE_OK); 52644961713Sgirish } 52744961713Sgirish 52844961713Sgirish nxge_status_t 52944961713Sgirish nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 53044961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 53144961713Sgirish { 53244961713Sgirish npi_handle_t handle; 53344961713Sgirish rdc_desc_cfg_t rdc_desc; 53444961713Sgirish p_rcrcfig_b_t cfgb_p; 53544961713Sgirish npi_status_t rs = NPI_SUCCESS; 53644961713Sgirish 53744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 53844961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 53944961713Sgirish /* 54044961713Sgirish * Use configuration data composed at init time. 54144961713Sgirish * Write to hardware the receive ring configurations. 54244961713Sgirish */ 54344961713Sgirish rdc_desc.mbox_enable = 1; 54444961713Sgirish rdc_desc.mbox_addr = mbox_p->mbox_addr; 54544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 54652ccf843Smisaki "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 54752ccf843Smisaki mbox_p->mbox_addr, rdc_desc.mbox_addr)); 54844961713Sgirish 54944961713Sgirish rdc_desc.rbr_len = rbr_p->rbb_max; 55044961713Sgirish rdc_desc.rbr_addr = rbr_p->rbr_addr; 55144961713Sgirish 55244961713Sgirish switch (nxgep->rx_bksize_code) { 55344961713Sgirish case RBR_BKSIZE_4K: 55444961713Sgirish rdc_desc.page_size = SIZE_4KB; 55544961713Sgirish break; 55644961713Sgirish case RBR_BKSIZE_8K: 55744961713Sgirish rdc_desc.page_size = SIZE_8KB; 55844961713Sgirish break; 55944961713Sgirish case RBR_BKSIZE_16K: 56044961713Sgirish rdc_desc.page_size = SIZE_16KB; 56144961713Sgirish break; 56244961713Sgirish case RBR_BKSIZE_32K: 56344961713Sgirish rdc_desc.page_size = SIZE_32KB; 56444961713Sgirish break; 56544961713Sgirish } 56644961713Sgirish 56744961713Sgirish rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 56844961713Sgirish rdc_desc.valid0 = 1; 56944961713Sgirish 57044961713Sgirish rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 57144961713Sgirish rdc_desc.valid1 = 1; 57244961713Sgirish 57344961713Sgirish rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 57444961713Sgirish rdc_desc.valid2 = 1; 57544961713Sgirish 57644961713Sgirish rdc_desc.full_hdr = rcr_p->full_hdr_flag; 57744961713Sgirish rdc_desc.offset = rcr_p->sw_priv_hdr_len; 57844961713Sgirish 57944961713Sgirish rdc_desc.rcr_len = rcr_p->comp_size; 58044961713Sgirish rdc_desc.rcr_addr = rcr_p->rcr_addr; 58144961713Sgirish 58244961713Sgirish cfgb_p = &(rcr_p->rcr_cfgb); 58344961713Sgirish rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 584678453a8Sspeer /* For now, disable this timeout in a guest domain. */ 585678453a8Sspeer if (isLDOMguest(nxgep)) { 586678453a8Sspeer rdc_desc.rcr_timeout = 0; 587678453a8Sspeer rdc_desc.rcr_timeout_enable = 0; 588678453a8Sspeer } else { 589678453a8Sspeer rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 590678453a8Sspeer rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 591678453a8Sspeer } 59244961713Sgirish 59344961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 59452ccf843Smisaki "rbr_len qlen %d pagesize code %d rcr_len %d", 59552ccf843Smisaki rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 59644961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 59752ccf843Smisaki "size 0 %d size 1 %d size 2 %d", 59852ccf843Smisaki rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 59952ccf843Smisaki rbr_p->npi_pkt_buf_size2)); 60044961713Sgirish 60144961713Sgirish rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 60244961713Sgirish if (rs != NPI_SUCCESS) { 60344961713Sgirish return (NXGE_ERROR | rs); 60444961713Sgirish } 60544961713Sgirish 60644961713Sgirish /* 60744961713Sgirish * Enable the timeout and threshold. 60844961713Sgirish */ 60944961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 61052ccf843Smisaki rdc_desc.rcr_threshold); 61144961713Sgirish if (rs != NPI_SUCCESS) { 61244961713Sgirish return (NXGE_ERROR | rs); 61344961713Sgirish } 61444961713Sgirish 61544961713Sgirish rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 61652ccf843Smisaki rdc_desc.rcr_timeout); 61744961713Sgirish if (rs != NPI_SUCCESS) { 61844961713Sgirish return (NXGE_ERROR | rs); 61944961713Sgirish } 62044961713Sgirish 621e759c33aSMichael Speer if (!isLDOMguest(nxgep)) { 622e759c33aSMichael Speer /* Enable the DMA */ 623e759c33aSMichael Speer rs = npi_rxdma_cfg_rdc_enable(handle, channel); 624e759c33aSMichael Speer if (rs != NPI_SUCCESS) { 625e759c33aSMichael Speer return (NXGE_ERROR | rs); 626e759c33aSMichael Speer } 62744961713Sgirish } 62844961713Sgirish 62944961713Sgirish /* Kick the DMA engine. */ 63044961713Sgirish npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 631e759c33aSMichael Speer 632e759c33aSMichael Speer if (!isLDOMguest(nxgep)) { 633e759c33aSMichael Speer /* Clear the rbr empty bit */ 634e759c33aSMichael Speer (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 635e759c33aSMichael Speer } 63644961713Sgirish 63744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 63844961713Sgirish 63944961713Sgirish return (NXGE_OK); 64044961713Sgirish } 64144961713Sgirish 64244961713Sgirish nxge_status_t 64344961713Sgirish nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 64444961713Sgirish { 64544961713Sgirish npi_handle_t handle; 64644961713Sgirish npi_status_t rs = NPI_SUCCESS; 64744961713Sgirish 64844961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 64944961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 65044961713Sgirish 65144961713Sgirish /* disable the DMA */ 65244961713Sgirish rs = npi_rxdma_cfg_rdc_disable(handle, channel); 65344961713Sgirish if (rs != NPI_SUCCESS) { 65444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 65552ccf843Smisaki "<== nxge_disable_rxdma_channel:failed (0x%x)", 65652ccf843Smisaki rs)); 65744961713Sgirish return (NXGE_ERROR | rs); 65844961713Sgirish } 65944961713Sgirish 66044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 66144961713Sgirish return (NXGE_OK); 66244961713Sgirish } 66344961713Sgirish 66444961713Sgirish nxge_status_t 66544961713Sgirish nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 66644961713Sgirish { 66744961713Sgirish npi_handle_t handle; 66844961713Sgirish nxge_status_t status = NXGE_OK; 66944961713Sgirish 67044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 67152ccf843Smisaki "<== nxge_init_rxdma_channel_rcrflush")); 67244961713Sgirish 67344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 67444961713Sgirish npi_rxdma_rdc_rcr_flush(handle, channel); 67544961713Sgirish 67644961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 67752ccf843Smisaki "<== nxge_init_rxdma_channel_rcrflsh")); 67844961713Sgirish return (status); 67944961713Sgirish 68044961713Sgirish } 68144961713Sgirish 68244961713Sgirish #define MID_INDEX(l, r) ((r + l + 1) >> 1) 68344961713Sgirish 68444961713Sgirish #define TO_LEFT -1 68544961713Sgirish #define TO_RIGHT 1 68644961713Sgirish #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 68744961713Sgirish #define BOTH_LEFT (TO_LEFT + TO_LEFT) 68844961713Sgirish #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 68944961713Sgirish #define NO_HINT 0xffffffff 69044961713Sgirish 69144961713Sgirish /*ARGSUSED*/ 69244961713Sgirish nxge_status_t 69344961713Sgirish nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 694a3c5bd6dSspeer uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 695a3c5bd6dSspeer uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 69644961713Sgirish { 69744961713Sgirish int bufsize; 69844961713Sgirish uint64_t pktbuf_pp; 69944961713Sgirish uint64_t dvma_addr; 70044961713Sgirish rxring_info_t *ring_info; 70144961713Sgirish int base_side, end_side; 70244961713Sgirish int r_index, l_index, anchor_index; 70344961713Sgirish int found, search_done; 70444961713Sgirish uint32_t offset, chunk_size, block_size, page_size_mask; 70544961713Sgirish uint32_t chunk_index, block_index, total_index; 70644961713Sgirish int max_iterations, iteration; 70744961713Sgirish rxbuf_index_info_t *bufinfo; 70844961713Sgirish 70944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 71044961713Sgirish 71144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 71252ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 71352ccf843Smisaki pkt_buf_addr_pp, 71452ccf843Smisaki pktbufsz_type)); 715adfcba55Sjoycey #if defined(__i386) 716adfcba55Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 717adfcba55Sjoycey #else 71844961713Sgirish pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 719adfcba55Sjoycey #endif 72044961713Sgirish 72144961713Sgirish switch (pktbufsz_type) { 72244961713Sgirish case 0: 72344961713Sgirish bufsize = rbr_p->pkt_buf_size0; 72444961713Sgirish break; 72544961713Sgirish case 1: 72644961713Sgirish bufsize = rbr_p->pkt_buf_size1; 72744961713Sgirish break; 72844961713Sgirish case 2: 72944961713Sgirish bufsize = rbr_p->pkt_buf_size2; 73044961713Sgirish break; 73144961713Sgirish case RCR_SINGLE_BLOCK: 73244961713Sgirish bufsize = 0; 73344961713Sgirish anchor_index = 0; 73444961713Sgirish break; 73544961713Sgirish default: 73644961713Sgirish return (NXGE_ERROR); 73744961713Sgirish } 73844961713Sgirish 73944961713Sgirish if (rbr_p->num_blocks == 1) { 74044961713Sgirish anchor_index = 0; 74144961713Sgirish ring_info = rbr_p->ring_info; 74244961713Sgirish bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 74344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 74452ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 74552ccf843Smisaki "buf_pp $%p btype %d anchor_index %d " 74652ccf843Smisaki "bufinfo $%p", 74752ccf843Smisaki pkt_buf_addr_pp, 74852ccf843Smisaki pktbufsz_type, 74952ccf843Smisaki anchor_index, 75052ccf843Smisaki bufinfo)); 75144961713Sgirish 75244961713Sgirish goto found_index; 75344961713Sgirish } 75444961713Sgirish 75544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 75652ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 75752ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 75852ccf843Smisaki pkt_buf_addr_pp, 75952ccf843Smisaki pktbufsz_type, 76052ccf843Smisaki anchor_index)); 76144961713Sgirish 76244961713Sgirish ring_info = rbr_p->ring_info; 76344961713Sgirish found = B_FALSE; 76444961713Sgirish bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 76544961713Sgirish iteration = 0; 76644961713Sgirish max_iterations = ring_info->max_iterations; 76744961713Sgirish /* 768a3c5bd6dSspeer * First check if this block has been seen 76944961713Sgirish * recently. This is indicated by a hint which 77044961713Sgirish * is initialized when the first buffer of the block 77144961713Sgirish * is seen. The hint is reset when the last buffer of 77244961713Sgirish * the block has been processed. 77344961713Sgirish * As three block sizes are supported, three hints 77444961713Sgirish * are kept. The idea behind the hints is that once 77544961713Sgirish * the hardware uses a block for a buffer of that 77644961713Sgirish * size, it will use it exclusively for that size 77744961713Sgirish * and will use it until it is exhausted. It is assumed 77844961713Sgirish * that there would a single block being used for the same 77944961713Sgirish * buffer sizes at any given time. 78044961713Sgirish */ 78144961713Sgirish if (ring_info->hint[pktbufsz_type] != NO_HINT) { 78244961713Sgirish anchor_index = ring_info->hint[pktbufsz_type]; 78344961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 78444961713Sgirish chunk_size = bufinfo[anchor_index].buf_size; 78544961713Sgirish if ((pktbuf_pp >= dvma_addr) && 78652ccf843Smisaki (pktbuf_pp < (dvma_addr + chunk_size))) { 78744961713Sgirish found = B_TRUE; 78844961713Sgirish /* 78944961713Sgirish * check if this is the last buffer in the block 79044961713Sgirish * If so, then reset the hint for the size; 79144961713Sgirish */ 79244961713Sgirish 79344961713Sgirish if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 79444961713Sgirish ring_info->hint[pktbufsz_type] = NO_HINT; 79544961713Sgirish } 79644961713Sgirish } 79744961713Sgirish 79844961713Sgirish if (found == B_FALSE) { 79944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 80052ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (!found)" 80152ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 80252ccf843Smisaki pkt_buf_addr_pp, 80352ccf843Smisaki pktbufsz_type, 80452ccf843Smisaki anchor_index)); 80544961713Sgirish 80644961713Sgirish /* 80744961713Sgirish * This is the first buffer of the block of this 80844961713Sgirish * size. Need to search the whole information 80944961713Sgirish * array. 81044961713Sgirish * the search algorithm uses a binary tree search 81144961713Sgirish * algorithm. It assumes that the information is 81244961713Sgirish * already sorted with increasing order 81344961713Sgirish * info[0] < info[1] < info[2] .... < info[n-1] 81444961713Sgirish * where n is the size of the information array 81544961713Sgirish */ 81644961713Sgirish r_index = rbr_p->num_blocks - 1; 81744961713Sgirish l_index = 0; 81844961713Sgirish search_done = B_FALSE; 81944961713Sgirish anchor_index = MID_INDEX(r_index, l_index); 82044961713Sgirish while (search_done == B_FALSE) { 82144961713Sgirish if ((r_index == l_index) || 82252ccf843Smisaki (iteration >= max_iterations)) 82344961713Sgirish search_done = B_TRUE; 82444961713Sgirish end_side = TO_RIGHT; /* to the right */ 82544961713Sgirish base_side = TO_LEFT; /* to the left */ 82644961713Sgirish /* read the DVMA address information and sort it */ 82744961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 82844961713Sgirish chunk_size = bufinfo[anchor_index].buf_size; 82944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 83052ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (searching)" 83152ccf843Smisaki "buf_pp $%p btype %d " 83252ccf843Smisaki "anchor_index %d chunk_size %d dvmaaddr $%p", 83352ccf843Smisaki pkt_buf_addr_pp, 83452ccf843Smisaki pktbufsz_type, 83552ccf843Smisaki anchor_index, 83652ccf843Smisaki chunk_size, 83752ccf843Smisaki dvma_addr)); 83844961713Sgirish 83944961713Sgirish if (pktbuf_pp >= dvma_addr) 84044961713Sgirish base_side = TO_RIGHT; /* to the right */ 84144961713Sgirish if (pktbuf_pp < (dvma_addr + chunk_size)) 84244961713Sgirish end_side = TO_LEFT; /* to the left */ 84344961713Sgirish 84444961713Sgirish switch (base_side + end_side) { 84552ccf843Smisaki case IN_MIDDLE: 84652ccf843Smisaki /* found */ 84752ccf843Smisaki found = B_TRUE; 84852ccf843Smisaki search_done = B_TRUE; 84952ccf843Smisaki if ((pktbuf_pp + bufsize) < 85052ccf843Smisaki (dvma_addr + chunk_size)) 85152ccf843Smisaki ring_info->hint[pktbufsz_type] = 85252ccf843Smisaki bufinfo[anchor_index].buf_index; 85352ccf843Smisaki break; 85452ccf843Smisaki case BOTH_RIGHT: 85552ccf843Smisaki /* not found: go to the right */ 85652ccf843Smisaki l_index = anchor_index + 1; 85752ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 85852ccf843Smisaki break; 85952ccf843Smisaki 86052ccf843Smisaki case BOTH_LEFT: 86152ccf843Smisaki /* not found: go to the left */ 86252ccf843Smisaki r_index = anchor_index - 1; 86352ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 86452ccf843Smisaki break; 86552ccf843Smisaki default: /* should not come here */ 86652ccf843Smisaki return (NXGE_ERROR); 86744961713Sgirish } 86844961713Sgirish iteration++; 86944961713Sgirish } 87044961713Sgirish 87144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 87252ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (search done)" 87352ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 87452ccf843Smisaki pkt_buf_addr_pp, 87552ccf843Smisaki pktbufsz_type, 87652ccf843Smisaki anchor_index)); 87744961713Sgirish } 87844961713Sgirish 87944961713Sgirish if (found == B_FALSE) { 88044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 88152ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (search failed)" 88252ccf843Smisaki "buf_pp $%p btype %d anchor_index %d", 88352ccf843Smisaki pkt_buf_addr_pp, 88452ccf843Smisaki pktbufsz_type, 88552ccf843Smisaki anchor_index)); 88644961713Sgirish return (NXGE_ERROR); 88744961713Sgirish } 88844961713Sgirish 88944961713Sgirish found_index: 89044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 89152ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 89252ccf843Smisaki "buf_pp $%p btype %d bufsize %d anchor_index %d", 89352ccf843Smisaki pkt_buf_addr_pp, 89452ccf843Smisaki pktbufsz_type, 89552ccf843Smisaki bufsize, 89652ccf843Smisaki anchor_index)); 89744961713Sgirish 89844961713Sgirish /* index of the first block in this chunk */ 89944961713Sgirish chunk_index = bufinfo[anchor_index].start_index; 90044961713Sgirish dvma_addr = bufinfo[anchor_index].dvma_addr; 90144961713Sgirish page_size_mask = ring_info->block_size_mask; 90244961713Sgirish 90344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 90452ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 90552ccf843Smisaki "buf_pp $%p btype %d bufsize %d " 90652ccf843Smisaki "anchor_index %d chunk_index %d dvma $%p", 90752ccf843Smisaki pkt_buf_addr_pp, 90852ccf843Smisaki pktbufsz_type, 90952ccf843Smisaki bufsize, 91052ccf843Smisaki anchor_index, 91152ccf843Smisaki chunk_index, 91252ccf843Smisaki dvma_addr)); 91344961713Sgirish 91444961713Sgirish offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 91544961713Sgirish block_size = rbr_p->block_size; /* System block(page) size */ 91644961713Sgirish 91744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 91852ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 91952ccf843Smisaki "buf_pp $%p btype %d bufsize %d " 92052ccf843Smisaki "anchor_index %d chunk_index %d dvma $%p " 92152ccf843Smisaki "offset %d block_size %d", 92252ccf843Smisaki pkt_buf_addr_pp, 92352ccf843Smisaki pktbufsz_type, 92452ccf843Smisaki bufsize, 92552ccf843Smisaki anchor_index, 92652ccf843Smisaki chunk_index, 92752ccf843Smisaki dvma_addr, 92852ccf843Smisaki offset, 92952ccf843Smisaki block_size)); 93044961713Sgirish 93144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 93244961713Sgirish 93344961713Sgirish block_index = (offset / block_size); /* index within chunk */ 93444961713Sgirish total_index = chunk_index + block_index; 93544961713Sgirish 93644961713Sgirish 93744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 93852ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 93952ccf843Smisaki "total_index %d dvma_addr $%p " 94052ccf843Smisaki "offset %d block_size %d " 94152ccf843Smisaki "block_index %d ", 94252ccf843Smisaki total_index, dvma_addr, 94352ccf843Smisaki offset, block_size, 94452ccf843Smisaki block_index)); 945adfcba55Sjoycey #if defined(__i386) 946adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 94752ccf843Smisaki (uint32_t)offset); 948adfcba55Sjoycey #else 949adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 95052ccf843Smisaki (uint64_t)offset); 951adfcba55Sjoycey #endif 95244961713Sgirish 95344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 95452ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: " 95552ccf843Smisaki "total_index %d dvma_addr $%p " 95652ccf843Smisaki "offset %d block_size %d " 95752ccf843Smisaki "block_index %d " 95852ccf843Smisaki "*pkt_buf_addr_p $%p", 95952ccf843Smisaki total_index, dvma_addr, 96052ccf843Smisaki offset, block_size, 96152ccf843Smisaki block_index, 96252ccf843Smisaki *pkt_buf_addr_p)); 96344961713Sgirish 96444961713Sgirish 96544961713Sgirish *msg_index = total_index; 96644961713Sgirish *bufoffset = (offset & page_size_mask); 96744961713Sgirish 96844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 96952ccf843Smisaki "==> nxge_rxbuf_pp_to_vp: get msg index: " 97052ccf843Smisaki "msg_index %d bufoffset_index %d", 97152ccf843Smisaki *msg_index, 97252ccf843Smisaki *bufoffset)); 97344961713Sgirish 97444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 97544961713Sgirish 97644961713Sgirish return (NXGE_OK); 97744961713Sgirish } 97844961713Sgirish 97944961713Sgirish /* 98044961713Sgirish * used by quick sort (qsort) function 98144961713Sgirish * to perform comparison 98244961713Sgirish */ 98344961713Sgirish static int 98444961713Sgirish nxge_sort_compare(const void *p1, const void *p2) 98544961713Sgirish { 98644961713Sgirish 98744961713Sgirish rxbuf_index_info_t *a, *b; 98844961713Sgirish 98944961713Sgirish a = (rxbuf_index_info_t *)p1; 99044961713Sgirish b = (rxbuf_index_info_t *)p2; 99144961713Sgirish 99244961713Sgirish if (a->dvma_addr > b->dvma_addr) 99344961713Sgirish return (1); 99444961713Sgirish if (a->dvma_addr < b->dvma_addr) 99544961713Sgirish return (-1); 99644961713Sgirish return (0); 99744961713Sgirish } 99844961713Sgirish 99944961713Sgirish 100044961713Sgirish 100144961713Sgirish /* 100244961713Sgirish * grabbed this sort implementation from common/syscall/avl.c 100344961713Sgirish * 100444961713Sgirish */ 100544961713Sgirish /* 100644961713Sgirish * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 100744961713Sgirish * v = Ptr to array/vector of objs 100844961713Sgirish * n = # objs in the array 100944961713Sgirish * s = size of each obj (must be multiples of a word size) 101044961713Sgirish * f = ptr to function to compare two objs 101144961713Sgirish * returns (-1 = less than, 0 = equal, 1 = greater than 101244961713Sgirish */ 101344961713Sgirish void 101444961713Sgirish nxge_ksort(caddr_t v, int n, int s, int (*f)()) 101544961713Sgirish { 101644961713Sgirish int g, i, j, ii; 101744961713Sgirish unsigned int *p1, *p2; 101844961713Sgirish unsigned int tmp; 101944961713Sgirish 102044961713Sgirish /* No work to do */ 102144961713Sgirish if (v == NULL || n <= 1) 102244961713Sgirish return; 102344961713Sgirish /* Sanity check on arguments */ 102444961713Sgirish ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 102544961713Sgirish ASSERT(s > 0); 102644961713Sgirish 102744961713Sgirish for (g = n / 2; g > 0; g /= 2) { 102844961713Sgirish for (i = g; i < n; i++) { 102944961713Sgirish for (j = i - g; j >= 0 && 103052ccf843Smisaki (*f)(v + j * s, v + (j + g) * s) == 1; 103152ccf843Smisaki j -= g) { 103244961713Sgirish p1 = (unsigned *)(v + j * s); 103344961713Sgirish p2 = (unsigned *)(v + (j + g) * s); 103444961713Sgirish for (ii = 0; ii < s / 4; ii++) { 103544961713Sgirish tmp = *p1; 103644961713Sgirish *p1++ = *p2; 103744961713Sgirish *p2++ = tmp; 103844961713Sgirish } 103944961713Sgirish } 104044961713Sgirish } 104144961713Sgirish } 104244961713Sgirish } 104344961713Sgirish 104444961713Sgirish /* 104544961713Sgirish * Initialize data structures required for rxdma 104644961713Sgirish * buffer dvma->vmem address lookup 104744961713Sgirish */ 104844961713Sgirish /*ARGSUSED*/ 104944961713Sgirish static nxge_status_t 105044961713Sgirish nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 105144961713Sgirish { 105244961713Sgirish 105344961713Sgirish int index; 105444961713Sgirish rxring_info_t *ring_info; 105544961713Sgirish int max_iteration = 0, max_index = 0; 105644961713Sgirish 105744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 105844961713Sgirish 105944961713Sgirish ring_info = rbrp->ring_info; 106044961713Sgirish ring_info->hint[0] = NO_HINT; 106144961713Sgirish ring_info->hint[1] = NO_HINT; 106244961713Sgirish ring_info->hint[2] = NO_HINT; 106344961713Sgirish max_index = rbrp->num_blocks; 106444961713Sgirish 106544961713Sgirish /* read the DVMA address information and sort it */ 106644961713Sgirish /* do init of the information array */ 106744961713Sgirish 106844961713Sgirish 106944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 107052ccf843Smisaki " nxge_rxbuf_index_info_init Sort ptrs")); 107144961713Sgirish 107244961713Sgirish /* sort the array */ 107344961713Sgirish nxge_ksort((void *)ring_info->buffer, max_index, 107452ccf843Smisaki sizeof (rxbuf_index_info_t), nxge_sort_compare); 107544961713Sgirish 107644961713Sgirish 107744961713Sgirish 107844961713Sgirish for (index = 0; index < max_index; index++) { 107944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 108052ccf843Smisaki " nxge_rxbuf_index_info_init: sorted chunk %d " 108152ccf843Smisaki " ioaddr $%p kaddr $%p size %x", 108252ccf843Smisaki index, ring_info->buffer[index].dvma_addr, 108352ccf843Smisaki ring_info->buffer[index].kaddr, 108452ccf843Smisaki ring_info->buffer[index].buf_size)); 108544961713Sgirish } 108644961713Sgirish 108744961713Sgirish max_iteration = 0; 108844961713Sgirish while (max_index >= (1ULL << max_iteration)) 108944961713Sgirish max_iteration++; 109044961713Sgirish ring_info->max_iterations = max_iteration + 1; 109144961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 109252ccf843Smisaki " nxge_rxbuf_index_info_init Find max iter %d", 109352ccf843Smisaki ring_info->max_iterations)); 109444961713Sgirish 109544961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 109644961713Sgirish return (NXGE_OK); 109744961713Sgirish } 109844961713Sgirish 10990a8e077aSspeer /* ARGSUSED */ 110044961713Sgirish void 110144961713Sgirish nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 110244961713Sgirish { 110344961713Sgirish #ifdef NXGE_DEBUG 110444961713Sgirish 110544961713Sgirish uint32_t bptr; 110644961713Sgirish uint64_t pp; 110744961713Sgirish 110844961713Sgirish bptr = entry_p->bits.hdw.pkt_buf_addr; 110944961713Sgirish 111044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 111152ccf843Smisaki "\trcr entry $%p " 111252ccf843Smisaki "\trcr entry 0x%0llx " 111352ccf843Smisaki "\trcr entry 0x%08x " 111452ccf843Smisaki "\trcr entry 0x%08x " 111552ccf843Smisaki "\tvalue 0x%0llx\n" 111652ccf843Smisaki "\tmulti = %d\n" 111752ccf843Smisaki "\tpkt_type = 0x%x\n" 111852ccf843Smisaki "\tzero_copy = %d\n" 111952ccf843Smisaki "\tnoport = %d\n" 112052ccf843Smisaki "\tpromis = %d\n" 112152ccf843Smisaki "\terror = 0x%04x\n" 112252ccf843Smisaki "\tdcf_err = 0x%01x\n" 112352ccf843Smisaki "\tl2_len = %d\n" 112452ccf843Smisaki "\tpktbufsize = %d\n" 112552ccf843Smisaki "\tpkt_buf_addr = $%p\n" 112652ccf843Smisaki "\tpkt_buf_addr (<< 6) = $%p\n", 112752ccf843Smisaki entry_p, 112852ccf843Smisaki *(int64_t *)entry_p, 112952ccf843Smisaki *(int32_t *)entry_p, 113052ccf843Smisaki *(int32_t *)((char *)entry_p + 32), 113152ccf843Smisaki entry_p->value, 113252ccf843Smisaki entry_p->bits.hdw.multi, 113352ccf843Smisaki entry_p->bits.hdw.pkt_type, 113452ccf843Smisaki entry_p->bits.hdw.zero_copy, 113552ccf843Smisaki entry_p->bits.hdw.noport, 113652ccf843Smisaki entry_p->bits.hdw.promis, 113752ccf843Smisaki entry_p->bits.hdw.error, 113852ccf843Smisaki entry_p->bits.hdw.dcf_err, 113952ccf843Smisaki entry_p->bits.hdw.l2_len, 114052ccf843Smisaki entry_p->bits.hdw.pktbufsz, 114152ccf843Smisaki bptr, 114252ccf843Smisaki entry_p->bits.ldw.pkt_buf_addr)); 114344961713Sgirish 114444961713Sgirish pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 114552ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT; 114644961713Sgirish 114744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 114852ccf843Smisaki pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 114944961713Sgirish #endif 115044961713Sgirish } 115144961713Sgirish 115244961713Sgirish void 115344961713Sgirish nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 115444961713Sgirish { 115544961713Sgirish npi_handle_t handle; 115644961713Sgirish rbr_stat_t rbr_stat; 115744961713Sgirish addr44_t hd_addr; 115844961713Sgirish addr44_t tail_addr; 115944961713Sgirish uint16_t qlen; 116044961713Sgirish 116144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 116252ccf843Smisaki "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 116344961713Sgirish 116444961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 116544961713Sgirish 116644961713Sgirish /* RBR head */ 116744961713Sgirish hd_addr.addr = 0; 116844961713Sgirish (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1169adfcba55Sjoycey #if defined(__i386) 117053f3d8ecSyc printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 117152ccf843Smisaki (void *)(uint32_t)hd_addr.addr); 1172adfcba55Sjoycey #else 117353f3d8ecSyc printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 117452ccf843Smisaki (void *)hd_addr.addr); 1175adfcba55Sjoycey #endif 117644961713Sgirish 117744961713Sgirish /* RBR stats */ 117844961713Sgirish (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 117944961713Sgirish printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 118044961713Sgirish 118144961713Sgirish /* RCR tail */ 118244961713Sgirish tail_addr.addr = 0; 118344961713Sgirish (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1184adfcba55Sjoycey #if defined(__i386) 118553f3d8ecSyc printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 118652ccf843Smisaki (void *)(uint32_t)tail_addr.addr); 1187adfcba55Sjoycey #else 118853f3d8ecSyc printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 118952ccf843Smisaki (void *)tail_addr.addr); 1190adfcba55Sjoycey #endif 119144961713Sgirish 119244961713Sgirish /* RCR qlen */ 119344961713Sgirish (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 119444961713Sgirish printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 119544961713Sgirish 119644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 119752ccf843Smisaki "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 119844961713Sgirish } 119944961713Sgirish 120044961713Sgirish nxge_status_t 120144961713Sgirish nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 120244961713Sgirish { 1203678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1204678453a8Sspeer nxge_status_t status; 1205678453a8Sspeer npi_status_t rs; 1206678453a8Sspeer int rdc; 120744961713Sgirish 120844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 120952ccf843Smisaki "==> nxge_rxdma_hw_mode: mode %d", enable)); 121044961713Sgirish 121144961713Sgirish if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 121244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1213678453a8Sspeer "<== nxge_rxdma_mode: not initialized")); 121444961713Sgirish return (NXGE_ERROR); 121544961713Sgirish } 121644961713Sgirish 1217678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1218678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1219678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1220678453a8Sspeer "NULL ring pointer(s)")); 122144961713Sgirish return (NXGE_ERROR); 122244961713Sgirish } 122344961713Sgirish 1224678453a8Sspeer if (set->owned.map == 0) { 122544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1226678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 1227678453a8Sspeer return (NULL); 122844961713Sgirish } 122944961713Sgirish 1230678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1231678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1232678453a8Sspeer rx_rbr_ring_t *ring = 1233678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1234678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1235678453a8Sspeer if (ring) { 1236678453a8Sspeer if (enable) { 1237678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1238678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1239678453a8Sspeer "channel %d (enable)", rdc)); 1240678453a8Sspeer rs = npi_rxdma_cfg_rdc_enable 1241678453a8Sspeer (handle, rdc); 1242678453a8Sspeer } else { 1243678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1244678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1245678453a8Sspeer "channel %d disable)", rdc)); 1246678453a8Sspeer rs = npi_rxdma_cfg_rdc_disable 1247678453a8Sspeer (handle, rdc); 1248678453a8Sspeer } 1249678453a8Sspeer } 125044961713Sgirish } 125144961713Sgirish } 125244961713Sgirish 125344961713Sgirish status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 125444961713Sgirish 125544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 125652ccf843Smisaki "<== nxge_rxdma_hw_mode: status 0x%x", status)); 125744961713Sgirish 125844961713Sgirish return (status); 125944961713Sgirish } 126044961713Sgirish 126144961713Sgirish void 126244961713Sgirish nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 126344961713Sgirish { 126444961713Sgirish npi_handle_t handle; 126544961713Sgirish 126644961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 126752ccf843Smisaki "==> nxge_rxdma_enable_channel: channel %d", channel)); 126844961713Sgirish 126944961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 127044961713Sgirish (void) npi_rxdma_cfg_rdc_enable(handle, channel); 127144961713Sgirish 127244961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 127344961713Sgirish } 127444961713Sgirish 127544961713Sgirish void 127644961713Sgirish nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 127744961713Sgirish { 127844961713Sgirish npi_handle_t handle; 127944961713Sgirish 128044961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 128152ccf843Smisaki "==> nxge_rxdma_disable_channel: channel %d", channel)); 128244961713Sgirish 128344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 128444961713Sgirish (void) npi_rxdma_cfg_rdc_disable(handle, channel); 128544961713Sgirish 128644961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 128744961713Sgirish } 128844961713Sgirish 128944961713Sgirish void 129044961713Sgirish nxge_hw_start_rx(p_nxge_t nxgep) 129144961713Sgirish { 129244961713Sgirish NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 129344961713Sgirish 129444961713Sgirish (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 129544961713Sgirish (void) nxge_rx_mac_enable(nxgep); 129644961713Sgirish 129744961713Sgirish NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 129844961713Sgirish } 129944961713Sgirish 130044961713Sgirish /*ARGSUSED*/ 130144961713Sgirish void 130244961713Sgirish nxge_fixup_rxdma_rings(p_nxge_t nxgep) 130344961713Sgirish { 1304678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1305678453a8Sspeer int rdc; 130644961713Sgirish 130744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 130844961713Sgirish 1309678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1310678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1311678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1312678453a8Sspeer "NULL ring pointer(s)")); 131344961713Sgirish return; 131444961713Sgirish } 131544961713Sgirish 1316678453a8Sspeer if (set->owned.map == 0) { 131744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1318678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 131944961713Sgirish return; 132044961713Sgirish } 132144961713Sgirish 1322678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1323678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1324678453a8Sspeer rx_rbr_ring_t *ring = 1325678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1326678453a8Sspeer if (ring) { 1327678453a8Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 1328678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 132952ccf843Smisaki "==> nxge_fixup_rxdma_rings: " 133052ccf843Smisaki "channel %d ring $%px", 133152ccf843Smisaki rdc, ring)); 13323587e8e2SMichael Speer (void) nxge_rxdma_fix_channel(nxgep, rdc); 1333678453a8Sspeer } 1334678453a8Sspeer } 133544961713Sgirish } 133644961713Sgirish 133744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 133844961713Sgirish } 133944961713Sgirish 134044961713Sgirish void 134144961713Sgirish nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 134244961713Sgirish { 134344961713Sgirish int ndmas; 134444961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 134544961713Sgirish p_rx_rbr_ring_t *rbr_rings; 134644961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 134744961713Sgirish p_rx_rcr_ring_t *rcr_rings; 134844961713Sgirish p_rx_mbox_areas_t rx_mbox_areas_p; 134944961713Sgirish p_rx_mbox_t *rx_mbox_p; 135044961713Sgirish p_nxge_dma_pool_t dma_buf_poolp; 135144961713Sgirish p_nxge_dma_pool_t dma_cntl_poolp; 135244961713Sgirish p_rx_rbr_ring_t rbrp; 135344961713Sgirish p_rx_rcr_ring_t rcrp; 135444961713Sgirish p_rx_mbox_t mboxp; 135544961713Sgirish p_nxge_dma_common_t dmap; 135644961713Sgirish nxge_status_t status = NXGE_OK; 135744961713Sgirish 13583587e8e2SMichael Speer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 135944961713Sgirish 136044961713Sgirish (void) nxge_rxdma_stop_channel(nxgep, channel); 136144961713Sgirish 136244961713Sgirish dma_buf_poolp = nxgep->rx_buf_pool_p; 136344961713Sgirish dma_cntl_poolp = nxgep->rx_cntl_pool_p; 136444961713Sgirish 136544961713Sgirish if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 136644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13673587e8e2SMichael Speer "<== nxge_rxdma_fix_channel: buf not allocated")); 136844961713Sgirish return; 136944961713Sgirish } 137044961713Sgirish 137144961713Sgirish ndmas = dma_buf_poolp->ndmas; 137244961713Sgirish if (!ndmas) { 137344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13743587e8e2SMichael Speer "<== nxge_rxdma_fix_channel: no dma allocated")); 137544961713Sgirish return; 137644961713Sgirish } 137744961713Sgirish 1378a3c5bd6dSspeer rx_rbr_rings = nxgep->rx_rbr_rings; 137944961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 138044961713Sgirish rbr_rings = rx_rbr_rings->rbr_rings; 138144961713Sgirish rcr_rings = rx_rcr_rings->rcr_rings; 138244961713Sgirish rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 138344961713Sgirish rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 138444961713Sgirish 138544961713Sgirish /* Reinitialize the receive block and completion rings */ 13863587e8e2SMichael Speer rbrp = (p_rx_rbr_ring_t)rbr_rings[channel], 13873587e8e2SMichael Speer rcrp = (p_rx_rcr_ring_t)rcr_rings[channel], 13883587e8e2SMichael Speer mboxp = (p_rx_mbox_t)rx_mbox_p[channel]; 138944961713Sgirish 139044961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 139144961713Sgirish rbrp->rbr_rd_index = 0; 139244961713Sgirish rcrp->comp_rd_index = 0; 139344961713Sgirish rcrp->comp_wt_index = 0; 139444961713Sgirish 139544961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 139644961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 139744961713Sgirish 139844961713Sgirish status = nxge_rxdma_start_channel(nxgep, channel, 139952ccf843Smisaki rbrp, rcrp, mboxp); 140044961713Sgirish if (status != NXGE_OK) { 14013587e8e2SMichael Speer goto nxge_rxdma_fix_channel_fail; 1402da14cebeSEric Cheng } 1403da14cebeSEric Cheng 1404da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14053587e8e2SMichael Speer "<== nxge_rxdma_fix_channel: success (0x%08x)", status)); 14063587e8e2SMichael Speer return; 1407da14cebeSEric Cheng 14083587e8e2SMichael Speer nxge_rxdma_fix_channel_fail: 1409da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14103587e8e2SMichael Speer "<== nxge_rxdma_fix_channel: failed (0x%08x)", status)); 141144961713Sgirish } 141244961713Sgirish 141344961713Sgirish p_rx_rbr_ring_t 141444961713Sgirish nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 141544961713Sgirish { 1416678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1417678453a8Sspeer nxge_channel_t rdc; 141844961713Sgirish 141944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 142052ccf843Smisaki "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 142144961713Sgirish 1422678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1423678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1424678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: " 1425678453a8Sspeer "NULL ring pointer(s)")); 142644961713Sgirish return (NULL); 142744961713Sgirish } 1428678453a8Sspeer 1429678453a8Sspeer if (set->owned.map == 0) { 143044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1431678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 143244961713Sgirish return (NULL); 143344961713Sgirish } 143444961713Sgirish 1435678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1436678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1437678453a8Sspeer rx_rbr_ring_t *ring = 1438678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1439678453a8Sspeer if (ring) { 1440678453a8Sspeer if (channel == ring->rdc) { 1441678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 1442678453a8Sspeer "==> nxge_rxdma_get_rbr_ring: " 1443678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1444678453a8Sspeer return (ring); 1445678453a8Sspeer } 1446678453a8Sspeer } 144744961713Sgirish } 144844961713Sgirish } 144944961713Sgirish 145044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 145152ccf843Smisaki "<== nxge_rxdma_get_rbr_ring: not found")); 145244961713Sgirish 145344961713Sgirish return (NULL); 145444961713Sgirish } 145544961713Sgirish 145644961713Sgirish p_rx_rcr_ring_t 145744961713Sgirish nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 145844961713Sgirish { 1459678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1460678453a8Sspeer nxge_channel_t rdc; 146144961713Sgirish 146244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 146352ccf843Smisaki "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 146444961713Sgirish 1465678453a8Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1466678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1467678453a8Sspeer "<== nxge_rxdma_get_rcr_ring: " 1468678453a8Sspeer "NULL ring pointer(s)")); 146944961713Sgirish return (NULL); 147044961713Sgirish } 1471678453a8Sspeer 1472678453a8Sspeer if (set->owned.map == 0) { 147344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 1474678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 147544961713Sgirish return (NULL); 147644961713Sgirish } 147744961713Sgirish 1478678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1479678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1480678453a8Sspeer rx_rcr_ring_t *ring = 1481678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 1482678453a8Sspeer if (ring) { 1483678453a8Sspeer if (channel == ring->rdc) { 1484678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 1485678453a8Sspeer "==> nxge_rxdma_get_rcr_ring: " 1486678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1487678453a8Sspeer return (ring); 1488678453a8Sspeer } 1489678453a8Sspeer } 149044961713Sgirish } 149144961713Sgirish } 149244961713Sgirish 149344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 149452ccf843Smisaki "<== nxge_rxdma_get_rcr_ring: not found")); 149544961713Sgirish 149644961713Sgirish return (NULL); 149744961713Sgirish } 149844961713Sgirish 149944961713Sgirish /* 150044961713Sgirish * Static functions start here. 150144961713Sgirish */ 150244961713Sgirish static p_rx_msg_t 150344961713Sgirish nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 150444961713Sgirish { 150544961713Sgirish p_rx_msg_t nxge_mp = NULL; 150644961713Sgirish p_nxge_dma_common_t dmamsg_p; 150744961713Sgirish uchar_t *buffer; 150844961713Sgirish 150944961713Sgirish nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 151044961713Sgirish if (nxge_mp == NULL) { 151156d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 151252ccf843Smisaki "Allocation of a rx msg failed.")); 151344961713Sgirish goto nxge_allocb_exit; 151444961713Sgirish } 151544961713Sgirish 151644961713Sgirish nxge_mp->use_buf_pool = B_FALSE; 151744961713Sgirish if (dmabuf_p) { 151844961713Sgirish nxge_mp->use_buf_pool = B_TRUE; 151944961713Sgirish dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 152044961713Sgirish *dmamsg_p = *dmabuf_p; 152144961713Sgirish dmamsg_p->nblocks = 1; 152244961713Sgirish dmamsg_p->block_size = size; 152344961713Sgirish dmamsg_p->alength = size; 152444961713Sgirish buffer = (uchar_t *)dmabuf_p->kaddrp; 152544961713Sgirish 152644961713Sgirish dmabuf_p->kaddrp = (void *) 152752ccf843Smisaki ((char *)dmabuf_p->kaddrp + size); 152844961713Sgirish dmabuf_p->ioaddr_pp = (void *) 152952ccf843Smisaki ((char *)dmabuf_p->ioaddr_pp + size); 153044961713Sgirish dmabuf_p->alength -= size; 153144961713Sgirish dmabuf_p->offset += size; 153244961713Sgirish dmabuf_p->dma_cookie.dmac_laddress += size; 153344961713Sgirish dmabuf_p->dma_cookie.dmac_size -= size; 153444961713Sgirish 153544961713Sgirish } else { 153644961713Sgirish buffer = KMEM_ALLOC(size, KM_NOSLEEP); 153744961713Sgirish if (buffer == NULL) { 153856d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 153952ccf843Smisaki "Allocation of a receive page failed.")); 154044961713Sgirish goto nxge_allocb_fail1; 154144961713Sgirish } 154244961713Sgirish } 154344961713Sgirish 154444961713Sgirish nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 154544961713Sgirish if (nxge_mp->rx_mblk_p == NULL) { 154656d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 154744961713Sgirish goto nxge_allocb_fail2; 154844961713Sgirish } 154944961713Sgirish 155044961713Sgirish nxge_mp->buffer = buffer; 155144961713Sgirish nxge_mp->block_size = size; 155244961713Sgirish nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 155344961713Sgirish nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 155444961713Sgirish nxge_mp->ref_cnt = 1; 155544961713Sgirish nxge_mp->free = B_TRUE; 155644961713Sgirish nxge_mp->rx_use_bcopy = B_FALSE; 155744961713Sgirish 155814ea4bb7Ssd atomic_inc_32(&nxge_mblks_pending); 155944961713Sgirish 156044961713Sgirish goto nxge_allocb_exit; 156144961713Sgirish 156244961713Sgirish nxge_allocb_fail2: 156344961713Sgirish if (!nxge_mp->use_buf_pool) { 156444961713Sgirish KMEM_FREE(buffer, size); 156544961713Sgirish } 156644961713Sgirish 156744961713Sgirish nxge_allocb_fail1: 156844961713Sgirish KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 156944961713Sgirish nxge_mp = NULL; 157044961713Sgirish 157144961713Sgirish nxge_allocb_exit: 157244961713Sgirish return (nxge_mp); 157344961713Sgirish } 157444961713Sgirish 157544961713Sgirish p_mblk_t 157644961713Sgirish nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 157744961713Sgirish { 157844961713Sgirish p_mblk_t mp; 157944961713Sgirish 158044961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 158144961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 158252ccf843Smisaki "offset = 0x%08X " 158352ccf843Smisaki "size = 0x%08X", 158452ccf843Smisaki nxge_mp, offset, size)); 158544961713Sgirish 158644961713Sgirish mp = desballoc(&nxge_mp->buffer[offset], size, 158752ccf843Smisaki 0, &nxge_mp->freeb); 158844961713Sgirish if (mp == NULL) { 158944961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 159044961713Sgirish goto nxge_dupb_exit; 159144961713Sgirish } 159244961713Sgirish atomic_inc_32(&nxge_mp->ref_cnt); 159344961713Sgirish 159444961713Sgirish 159544961713Sgirish nxge_dupb_exit: 159644961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 159752ccf843Smisaki nxge_mp)); 159844961713Sgirish return (mp); 159944961713Sgirish } 160044961713Sgirish 160144961713Sgirish p_mblk_t 160244961713Sgirish nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 160344961713Sgirish { 160444961713Sgirish p_mblk_t mp; 160544961713Sgirish uchar_t *dp; 160644961713Sgirish 160744961713Sgirish mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 160844961713Sgirish if (mp == NULL) { 160944961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 161044961713Sgirish goto nxge_dupb_bcopy_exit; 161144961713Sgirish } 161244961713Sgirish dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 161344961713Sgirish bcopy((void *)&nxge_mp->buffer[offset], dp, size); 161444961713Sgirish mp->b_wptr = dp + size; 161544961713Sgirish 161644961713Sgirish nxge_dupb_bcopy_exit: 161744961713Sgirish NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 161852ccf843Smisaki nxge_mp)); 161944961713Sgirish return (mp); 162044961713Sgirish } 162144961713Sgirish 162244961713Sgirish void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 162344961713Sgirish p_rx_msg_t rx_msg_p); 162444961713Sgirish 162544961713Sgirish void 162644961713Sgirish nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 162744961713Sgirish { 162844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 162944961713Sgirish 163044961713Sgirish /* Reuse this buffer */ 163144961713Sgirish rx_msg_p->free = B_FALSE; 163244961713Sgirish rx_msg_p->cur_usage_cnt = 0; 163344961713Sgirish rx_msg_p->max_usage_cnt = 0; 163444961713Sgirish rx_msg_p->pkt_buf_size = 0; 163544961713Sgirish 163644961713Sgirish if (rx_rbr_p->rbr_use_bcopy) { 163744961713Sgirish rx_msg_p->rx_use_bcopy = B_FALSE; 163844961713Sgirish atomic_dec_32(&rx_rbr_p->rbr_consumed); 163944961713Sgirish } 164044961713Sgirish 164144961713Sgirish /* 164244961713Sgirish * Get the rbr header pointer and its offset index. 164344961713Sgirish */ 164444961713Sgirish MUTEX_ENTER(&rx_rbr_p->post_lock); 164544961713Sgirish rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 164652ccf843Smisaki rx_rbr_p->rbr_wrap_mask); 164744961713Sgirish rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 164844961713Sgirish MUTEX_EXIT(&rx_rbr_p->post_lock); 164930ac2e7bSml npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 165030ac2e7bSml rx_rbr_p->rdc, 1); 165144961713Sgirish 165244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 165352ccf843Smisaki "<== nxge_post_page (channel %d post_next_index %d)", 165452ccf843Smisaki rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 165544961713Sgirish 165644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 165744961713Sgirish } 165844961713Sgirish 165944961713Sgirish void 166044961713Sgirish nxge_freeb(p_rx_msg_t rx_msg_p) 166144961713Sgirish { 166244961713Sgirish size_t size; 166344961713Sgirish uchar_t *buffer = NULL; 166444961713Sgirish int ref_cnt; 1665958cea9eSml boolean_t free_state = B_FALSE; 166644961713Sgirish 1667007969e0Stm rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1668007969e0Stm 166944961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 167044961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, 167152ccf843Smisaki "nxge_freeb:rx_msg_p = $%p (block pending %d)", 167252ccf843Smisaki rx_msg_p, nxge_mblks_pending)); 167344961713Sgirish 1674958cea9eSml /* 1675958cea9eSml * First we need to get the free state, then 1676958cea9eSml * atomic decrement the reference count to prevent 1677958cea9eSml * the race condition with the interrupt thread that 1678958cea9eSml * is processing a loaned up buffer block. 1679958cea9eSml */ 1680958cea9eSml free_state = rx_msg_p->free; 1681958cea9eSml ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 168244961713Sgirish if (!ref_cnt) { 168330ac2e7bSml atomic_dec_32(&nxge_mblks_pending); 168444961713Sgirish buffer = rx_msg_p->buffer; 168544961713Sgirish size = rx_msg_p->block_size; 168644961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 168752ccf843Smisaki "will free: rx_msg_p = $%p (block pending %d)", 168852ccf843Smisaki rx_msg_p, nxge_mblks_pending)); 168944961713Sgirish 169044961713Sgirish if (!rx_msg_p->use_buf_pool) { 169144961713Sgirish KMEM_FREE(buffer, size); 169244961713Sgirish } 169314ea4bb7Ssd 169414ea4bb7Ssd KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1695007969e0Stm 16963e82a89eSmisaki if (ring) { 16973e82a89eSmisaki /* 16983e82a89eSmisaki * Decrement the receive buffer ring's reference 16993e82a89eSmisaki * count, too. 17003e82a89eSmisaki */ 17013e82a89eSmisaki atomic_dec_32(&ring->rbr_ref_cnt); 1702007969e0Stm 17033e82a89eSmisaki /* 1704678453a8Sspeer * Free the receive buffer ring, if 17053e82a89eSmisaki * 1. all the receive buffers have been freed 17063e82a89eSmisaki * 2. and we are in the proper state (that is, 17073e82a89eSmisaki * we are not UNMAPPING). 17083e82a89eSmisaki */ 17093e82a89eSmisaki if (ring->rbr_ref_cnt == 0 && 17103e82a89eSmisaki ring->rbr_state == RBR_UNMAPPED) { 1711678453a8Sspeer /* 1712678453a8Sspeer * Free receive data buffers, 1713678453a8Sspeer * buffer index information 1714678453a8Sspeer * (rxring_info) and 1715678453a8Sspeer * the message block ring. 1716678453a8Sspeer */ 1717678453a8Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 1718678453a8Sspeer "nxge_freeb:rx_msg_p = $%p " 1719678453a8Sspeer "(block pending %d) free buffers", 1720678453a8Sspeer rx_msg_p, nxge_mblks_pending)); 1721678453a8Sspeer nxge_rxdma_databuf_free(ring); 1722678453a8Sspeer if (ring->ring_info) { 1723678453a8Sspeer KMEM_FREE(ring->ring_info, 1724678453a8Sspeer sizeof (rxring_info_t)); 1725678453a8Sspeer } 1726678453a8Sspeer 1727678453a8Sspeer if (ring->rx_msg_ring) { 1728678453a8Sspeer KMEM_FREE(ring->rx_msg_ring, 1729678453a8Sspeer ring->tnblocks * 1730678453a8Sspeer sizeof (p_rx_msg_t)); 1731678453a8Sspeer } 17323e82a89eSmisaki KMEM_FREE(ring, sizeof (*ring)); 17333e82a89eSmisaki } 1734007969e0Stm } 173514ea4bb7Ssd return; 173644961713Sgirish } 173744961713Sgirish 173844961713Sgirish /* 173944961713Sgirish * Repost buffer. 174044961713Sgirish */ 17413e82a89eSmisaki if (free_state && (ref_cnt == 1) && ring) { 174244961713Sgirish NXGE_DEBUG_MSG((NULL, RX_CTL, 174344961713Sgirish "nxge_freeb: post page $%p:", rx_msg_p)); 1744007969e0Stm if (ring->rbr_state == RBR_POSTING) 1745007969e0Stm nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 174644961713Sgirish } 174744961713Sgirish 174844961713Sgirish NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 174944961713Sgirish } 175044961713Sgirish 175144961713Sgirish uint_t 175244961713Sgirish nxge_rx_intr(void *arg1, void *arg2) 175344961713Sgirish { 175444961713Sgirish p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 175544961713Sgirish p_nxge_t nxgep = (p_nxge_t)arg2; 175644961713Sgirish p_nxge_ldg_t ldgp; 175744961713Sgirish uint8_t channel; 175844961713Sgirish npi_handle_t handle; 175944961713Sgirish rx_dma_ctl_stat_t cs; 176063f531d1SSriharsha Basavapatna p_rx_rcr_ring_t rcrp; 176148056c53SMichael Speer mblk_t *mp = NULL; 176244961713Sgirish 176344961713Sgirish if (ldvp == NULL) { 176444961713Sgirish NXGE_DEBUG_MSG((NULL, INT_CTL, 176552ccf843Smisaki "<== nxge_rx_intr: arg2 $%p arg1 $%p", 176652ccf843Smisaki nxgep, ldvp)); 176744961713Sgirish return (DDI_INTR_CLAIMED); 176844961713Sgirish } 176944961713Sgirish 177044961713Sgirish if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 177144961713Sgirish nxgep = ldvp->nxgep; 177244961713Sgirish } 17731d36aa9eSspeer 17741d36aa9eSspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 17751d36aa9eSspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 17761d36aa9eSspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 17771d36aa9eSspeer "<== nxge_rx_intr: interface not started or intialized")); 17781d36aa9eSspeer return (DDI_INTR_CLAIMED); 17791d36aa9eSspeer } 17801d36aa9eSspeer 178144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 178252ccf843Smisaki "==> nxge_rx_intr: arg2 $%p arg1 $%p", 178352ccf843Smisaki nxgep, ldvp)); 178444961713Sgirish 178544961713Sgirish /* 1786e759c33aSMichael Speer * Get the PIO handle. 178744961713Sgirish */ 178844961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 1789da14cebeSEric Cheng 1790e759c33aSMichael Speer /* 1791e759c33aSMichael Speer * Get the ring to enable us to process packets. 1792e759c33aSMichael Speer */ 179363f531d1SSriharsha Basavapatna rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1794da14cebeSEric Cheng 1795da14cebeSEric Cheng /* 1796da14cebeSEric Cheng * The RCR ring lock must be held when packets 1797da14cebeSEric Cheng * are being processed and the hardware registers are 1798da14cebeSEric Cheng * being read or written to prevent race condition 1799da14cebeSEric Cheng * among the interrupt thread, the polling thread 1800da14cebeSEric Cheng * (will cause fatal errors such as rcrincon bit set) 1801da14cebeSEric Cheng * and the setting of the poll_flag. 1802da14cebeSEric Cheng */ 180363f531d1SSriharsha Basavapatna MUTEX_ENTER(&rcrp->lock); 1804da14cebeSEric Cheng 180544961713Sgirish /* 180644961713Sgirish * Get the control and status for this channel. 180744961713Sgirish */ 180844961713Sgirish channel = ldvp->channel; 180944961713Sgirish ldgp = ldvp->ldgp; 1810da14cebeSEric Cheng 1811e759c33aSMichael Speer if (!isLDOMguest(nxgep) && (!nxgep->rx_channel_started[channel])) { 1812e759c33aSMichael Speer NXGE_DEBUG_MSG((nxgep, INT_CTL, 1813e759c33aSMichael Speer "<== nxge_rx_intr: channel is not started")); 1814e759c33aSMichael Speer 1815e759c33aSMichael Speer /* 1816e759c33aSMichael Speer * We received an interrupt before the ring is started. 1817e759c33aSMichael Speer */ 1818e759c33aSMichael Speer RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1819e759c33aSMichael Speer &cs.value); 1820e759c33aSMichael Speer cs.value &= RX_DMA_CTL_STAT_WR1C; 1821e759c33aSMichael Speer cs.bits.hdw.mex = 1; 1822e759c33aSMichael Speer RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1823e759c33aSMichael Speer cs.value); 1824e759c33aSMichael Speer 1825e759c33aSMichael Speer /* 1826e759c33aSMichael Speer * Rearm this logical group if this is a single device 1827e759c33aSMichael Speer * group. 1828e759c33aSMichael Speer */ 1829e759c33aSMichael Speer if (ldgp->nldvs == 1) { 1830e759c33aSMichael Speer if (isLDOMguest(nxgep)) { 1831e759c33aSMichael Speer nxge_hio_ldgimgn(nxgep, ldgp); 1832e759c33aSMichael Speer } else { 1833e759c33aSMichael Speer ldgimgm_t mgm; 1834e759c33aSMichael Speer 1835e759c33aSMichael Speer mgm.value = 0; 1836e759c33aSMichael Speer mgm.bits.ldw.arm = 1; 1837e759c33aSMichael Speer mgm.bits.ldw.timer = ldgp->ldg_timer; 1838e759c33aSMichael Speer 1839e759c33aSMichael Speer NXGE_REG_WR64(handle, 1840e759c33aSMichael Speer LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1841e759c33aSMichael Speer mgm.value); 1842e759c33aSMichael Speer } 1843da14cebeSEric Cheng } 184463f531d1SSriharsha Basavapatna MUTEX_EXIT(&rcrp->lock); 1845e759c33aSMichael Speer return (DDI_INTR_CLAIMED); 1846da14cebeSEric Cheng } 1847da14cebeSEric Cheng 184863f531d1SSriharsha Basavapatna ASSERT(rcrp->ldgp == ldgp); 184963f531d1SSriharsha Basavapatna ASSERT(rcrp->ldvp == ldvp); 1850da14cebeSEric Cheng 185144961713Sgirish RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 185244961713Sgirish 185344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 185452ccf843Smisaki "cs 0x%016llx rcrto 0x%x rcrthres %x", 185552ccf843Smisaki channel, 185652ccf843Smisaki cs.value, 185752ccf843Smisaki cs.bits.hdw.rcrto, 185852ccf843Smisaki cs.bits.hdw.rcrthres)); 185944961713Sgirish 186063f531d1SSriharsha Basavapatna if (!rcrp->poll_flag) { 186163f531d1SSriharsha Basavapatna mp = nxge_rx_pkts(nxgep, rcrp, cs, -1); 186248056c53SMichael Speer } 186344961713Sgirish 186444961713Sgirish /* error events. */ 186544961713Sgirish if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1866678453a8Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 186744961713Sgirish } 186844961713Sgirish 186944961713Sgirish /* 187044961713Sgirish * Enable the mailbox update interrupt if we want 187144961713Sgirish * to use mailbox. We probably don't need to use 187244961713Sgirish * mailbox as it only saves us one pio read. 187344961713Sgirish * Also write 1 to rcrthres and rcrto to clear 187444961713Sgirish * these two edge triggered bits. 187544961713Sgirish */ 187644961713Sgirish cs.value &= RX_DMA_CTL_STAT_WR1C; 187763f531d1SSriharsha Basavapatna cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1; 187844961713Sgirish RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 187952ccf843Smisaki cs.value); 188044961713Sgirish 188144961713Sgirish /* 1882da14cebeSEric Cheng * If the polling mode is enabled, disable the interrupt. 188344961713Sgirish */ 188463f531d1SSriharsha Basavapatna if (rcrp->poll_flag) { 1885da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1886da14cebeSEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1887da14cebeSEric Cheng "(disabling interrupts)", channel, ldgp, ldvp)); 188863f531d1SSriharsha Basavapatna 1889da14cebeSEric Cheng /* 1890da14cebeSEric Cheng * Disarm this logical group if this is a single device 1891da14cebeSEric Cheng * group. 1892da14cebeSEric Cheng */ 1893da14cebeSEric Cheng if (ldgp->nldvs == 1) { 189463f531d1SSriharsha Basavapatna if (isLDOMguest(nxgep)) { 189563f531d1SSriharsha Basavapatna ldgp->arm = B_FALSE; 189663f531d1SSriharsha Basavapatna nxge_hio_ldgimgn(nxgep, ldgp); 189763f531d1SSriharsha Basavapatna } else { 189863f531d1SSriharsha Basavapatna ldgimgm_t mgm; 189963f531d1SSriharsha Basavapatna mgm.value = 0; 190063f531d1SSriharsha Basavapatna mgm.bits.ldw.arm = 0; 190163f531d1SSriharsha Basavapatna NXGE_REG_WR64(handle, 190263f531d1SSriharsha Basavapatna LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 190363f531d1SSriharsha Basavapatna mgm.value); 190463f531d1SSriharsha Basavapatna } 1905da14cebeSEric Cheng } 1906da14cebeSEric Cheng } else { 1907da14cebeSEric Cheng /* 190808ac1c49SNicolas Droux * Rearm this logical group if this is a single device 190908ac1c49SNicolas Droux * group. 1910da14cebeSEric Cheng */ 1911da14cebeSEric Cheng if (ldgp->nldvs == 1) { 1912da14cebeSEric Cheng if (isLDOMguest(nxgep)) { 1913da14cebeSEric Cheng nxge_hio_ldgimgn(nxgep, ldgp); 1914da14cebeSEric Cheng } else { 1915da14cebeSEric Cheng ldgimgm_t mgm; 1916da14cebeSEric Cheng 1917da14cebeSEric Cheng mgm.value = 0; 1918da14cebeSEric Cheng mgm.bits.ldw.arm = 1; 1919da14cebeSEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 1920da14cebeSEric Cheng 1921da14cebeSEric Cheng NXGE_REG_WR64(handle, 1922da14cebeSEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1923da14cebeSEric Cheng mgm.value); 1924da14cebeSEric Cheng } 1925678453a8Sspeer } 1926da14cebeSEric Cheng 1927da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1928da14cebeSEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p " 1929da14cebeSEric Cheng "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 193044961713Sgirish } 193163f531d1SSriharsha Basavapatna MUTEX_EXIT(&rcrp->lock); 193244961713Sgirish 193348056c53SMichael Speer if (mp != NULL) { 193463f531d1SSriharsha Basavapatna mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp, 193563f531d1SSriharsha Basavapatna rcrp->rcr_gen_num); 1936da14cebeSEric Cheng } 1937da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1938da14cebeSEric Cheng return (DDI_INTR_CLAIMED); 193944961713Sgirish } 194044961713Sgirish 194144961713Sgirish /* 194244961713Sgirish * This routine is the main packet receive processing function. 194344961713Sgirish * It gets the packet type, error code, and buffer related 194444961713Sgirish * information from the receive completion entry. 194544961713Sgirish * How many completion entries to process is based on the number of packets 194644961713Sgirish * queued by the hardware, a hardware maintained tail pointer 194744961713Sgirish * and a configurable receive packet count. 194844961713Sgirish * 194944961713Sgirish * A chain of message blocks will be created as result of processing 195044961713Sgirish * the completion entries. This chain of message blocks will be returned and 195144961713Sgirish * a hardware control status register will be updated with the number of 195244961713Sgirish * packets were removed from the hardware queue. 195344961713Sgirish * 1954da14cebeSEric Cheng * The RCR ring lock is held when entering this function. 195544961713Sgirish */ 1956678453a8Sspeer static mblk_t * 1957678453a8Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1958678453a8Sspeer int bytes_to_pickup) 195944961713Sgirish { 196044961713Sgirish npi_handle_t handle; 196144961713Sgirish uint8_t channel; 196244961713Sgirish uint32_t comp_rd_index; 196344961713Sgirish p_rcr_entry_t rcr_desc_rd_head_p; 196444961713Sgirish p_rcr_entry_t rcr_desc_rd_head_pp; 196544961713Sgirish p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 196644961713Sgirish uint16_t qlen, nrcr_read, npkt_read; 1967678453a8Sspeer uint32_t qlen_hw; 196844961713Sgirish boolean_t multi; 1969678453a8Sspeer rcrcfig_b_t rcr_cfg_b; 1970678453a8Sspeer int totallen = 0; 1971a3c5bd6dSspeer #if defined(_BIG_ENDIAN) 197244961713Sgirish npi_status_t rs = NPI_SUCCESS; 197344961713Sgirish #endif 197444961713Sgirish 1975da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 197652ccf843Smisaki "channel %d", rcr_p->rdc)); 197744961713Sgirish 197844961713Sgirish if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 197944961713Sgirish return (NULL); 198044961713Sgirish } 198144961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 198244961713Sgirish channel = rcr_p->rdc; 198344961713Sgirish 198444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 198552ccf843Smisaki "==> nxge_rx_pkts: START: rcr channel %d " 198652ccf843Smisaki "head_p $%p head_pp $%p index %d ", 198752ccf843Smisaki channel, rcr_p->rcr_desc_rd_head_p, 198852ccf843Smisaki rcr_p->rcr_desc_rd_head_pp, 198952ccf843Smisaki rcr_p->comp_rd_index)); 199044961713Sgirish 199144961713Sgirish 1992a3c5bd6dSspeer #if !defined(_BIG_ENDIAN) 199344961713Sgirish qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 199444961713Sgirish #else 199544961713Sgirish rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 199644961713Sgirish if (rs != NPI_SUCCESS) { 1997678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 199844961713Sgirish "channel %d, get qlen failed 0x%08x", 199952ccf843Smisaki channel, rs)); 200044961713Sgirish return (NULL); 200144961713Sgirish } 200244961713Sgirish #endif 200344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 200452ccf843Smisaki "qlen %d", channel, qlen)); 200544961713Sgirish 200644961713Sgirish 200744961713Sgirish 200844961713Sgirish if (!qlen) { 2009da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 201052ccf843Smisaki "==> nxge_rx_pkts:rcr channel %d " 201152ccf843Smisaki "qlen %d (no pkts)", channel, qlen)); 201244961713Sgirish 201344961713Sgirish return (NULL); 201444961713Sgirish } 201544961713Sgirish 201644961713Sgirish comp_rd_index = rcr_p->comp_rd_index; 201744961713Sgirish 201844961713Sgirish rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 201944961713Sgirish rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 202044961713Sgirish nrcr_read = npkt_read = 0; 202144961713Sgirish 202244961713Sgirish /* 202344961713Sgirish * Number of packets queued 202444961713Sgirish * (The jumbo or multi packet will be counted as only one 202544961713Sgirish * packets and it may take up more than one completion entry). 202644961713Sgirish */ 202744961713Sgirish qlen_hw = (qlen < nxge_max_rx_pkts) ? 202852ccf843Smisaki qlen : nxge_max_rx_pkts; 202944961713Sgirish head_mp = NULL; 203044961713Sgirish tail_mp = &head_mp; 203144961713Sgirish nmp = mp_cont = NULL; 203244961713Sgirish multi = B_FALSE; 203344961713Sgirish 2034a3c5bd6dSspeer while (qlen_hw) { 203544961713Sgirish 203644961713Sgirish #ifdef NXGE_DEBUG 203744961713Sgirish nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 203844961713Sgirish #endif 203944961713Sgirish /* 204044961713Sgirish * Process one completion ring entry. 204144961713Sgirish */ 204244961713Sgirish nxge_receive_packet(nxgep, 204352ccf843Smisaki rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 204444961713Sgirish 204544961713Sgirish /* 204644961713Sgirish * message chaining modes 204744961713Sgirish */ 204814ea4bb7Ssd if (nmp) { 204944961713Sgirish nmp->b_next = NULL; 205014ea4bb7Ssd if (!multi && !mp_cont) { /* frame fits a partition */ 205114ea4bb7Ssd *tail_mp = nmp; 205214ea4bb7Ssd tail_mp = &nmp->b_next; 2053678453a8Sspeer totallen += MBLKL(nmp); 205414ea4bb7Ssd nmp = NULL; 205514ea4bb7Ssd } else if (multi && !mp_cont) { /* first segment */ 205614ea4bb7Ssd *tail_mp = nmp; 205714ea4bb7Ssd tail_mp = &nmp->b_cont; 2058678453a8Sspeer totallen += MBLKL(nmp); 205914ea4bb7Ssd } else if (multi && mp_cont) { /* mid of multi segs */ 206014ea4bb7Ssd *tail_mp = mp_cont; 206114ea4bb7Ssd tail_mp = &mp_cont->b_cont; 2062678453a8Sspeer totallen += MBLKL(mp_cont); 206314ea4bb7Ssd } else if (!multi && mp_cont) { /* last segment */ 2064a3c5bd6dSspeer *tail_mp = mp_cont; 206514ea4bb7Ssd tail_mp = &nmp->b_next; 2066678453a8Sspeer totallen += MBLKL(mp_cont); 206714ea4bb7Ssd nmp = NULL; 206814ea4bb7Ssd } 206944961713Sgirish } 207044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 207152ccf843Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 207252ccf843Smisaki "before updating: multi %d " 207352ccf843Smisaki "nrcr_read %d " 207452ccf843Smisaki "npk read %d " 207552ccf843Smisaki "head_pp $%p index %d ", 207652ccf843Smisaki channel, 207752ccf843Smisaki multi, 207852ccf843Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 207952ccf843Smisaki comp_rd_index)); 208044961713Sgirish 208144961713Sgirish if (!multi) { 208244961713Sgirish qlen_hw--; 208344961713Sgirish npkt_read++; 208444961713Sgirish } 208544961713Sgirish 208644961713Sgirish /* 208744961713Sgirish * Update the next read entry. 208844961713Sgirish */ 208944961713Sgirish comp_rd_index = NEXT_ENTRY(comp_rd_index, 209052ccf843Smisaki rcr_p->comp_wrap_mask); 209144961713Sgirish 209244961713Sgirish rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 209352ccf843Smisaki rcr_p->rcr_desc_first_p, 209452ccf843Smisaki rcr_p->rcr_desc_last_p); 209544961713Sgirish 209644961713Sgirish nrcr_read++; 209744961713Sgirish 209844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 209952ccf843Smisaki "<== nxge_rx_pkts: (SAM, process one packet) " 210052ccf843Smisaki "nrcr_read %d", 210152ccf843Smisaki nrcr_read)); 210244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 210352ccf843Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 210452ccf843Smisaki "multi %d " 210552ccf843Smisaki "nrcr_read %d " 210652ccf843Smisaki "npk read %d " 210752ccf843Smisaki "head_pp $%p index %d ", 210852ccf843Smisaki channel, 210952ccf843Smisaki multi, 211052ccf843Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 211152ccf843Smisaki comp_rd_index)); 211244961713Sgirish 2113678453a8Sspeer if ((bytes_to_pickup != -1) && 2114678453a8Sspeer (totallen >= bytes_to_pickup)) { 2115678453a8Sspeer break; 2116678453a8Sspeer } 211744961713Sgirish } 211844961713Sgirish 211944961713Sgirish rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 212044961713Sgirish rcr_p->comp_rd_index = comp_rd_index; 212144961713Sgirish rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 212214ea4bb7Ssd if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 212352ccf843Smisaki (nxgep->intr_threshold != rcr_p->intr_threshold)) { 21247b26d9ffSSantwona Behera 21257b26d9ffSSantwona Behera rcr_p->intr_timeout = (nxgep->intr_timeout < 21267b26d9ffSSantwona Behera NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 21277b26d9ffSSantwona Behera nxgep->intr_timeout; 21287b26d9ffSSantwona Behera 21297b26d9ffSSantwona Behera rcr_p->intr_threshold = (nxgep->intr_threshold < 21307b26d9ffSSantwona Behera NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 21317b26d9ffSSantwona Behera nxgep->intr_threshold; 21327b26d9ffSSantwona Behera 213314ea4bb7Ssd rcr_cfg_b.value = 0x0ULL; 21347b26d9ffSSantwona Behera rcr_cfg_b.bits.ldw.entout = 1; 213514ea4bb7Ssd rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 213614ea4bb7Ssd rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 21377b26d9ffSSantwona Behera 213814ea4bb7Ssd RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 213952ccf843Smisaki channel, rcr_cfg_b.value); 214014ea4bb7Ssd } 214144961713Sgirish 214244961713Sgirish cs.bits.ldw.pktread = npkt_read; 214344961713Sgirish cs.bits.ldw.ptrread = nrcr_read; 214444961713Sgirish RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 214552ccf843Smisaki channel, cs.value); 214644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 214752ccf843Smisaki "==> nxge_rx_pkts: EXIT: rcr channel %d " 214852ccf843Smisaki "head_pp $%p index %016llx ", 214952ccf843Smisaki channel, 215052ccf843Smisaki rcr_p->rcr_desc_rd_head_pp, 215152ccf843Smisaki rcr_p->comp_rd_index)); 215244961713Sgirish /* 215344961713Sgirish * Update RCR buffer pointer read and number of packets 215444961713Sgirish * read. 215544961713Sgirish */ 215644961713Sgirish 2157da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2158da14cebeSEric Cheng "channel %d", rcr_p->rdc)); 2159da14cebeSEric Cheng 216044961713Sgirish return (head_mp); 216144961713Sgirish } 216244961713Sgirish 216344961713Sgirish void 216444961713Sgirish nxge_receive_packet(p_nxge_t nxgep, 216544961713Sgirish p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 216644961713Sgirish boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 216744961713Sgirish { 216844961713Sgirish p_mblk_t nmp = NULL; 216944961713Sgirish uint64_t multi; 217044961713Sgirish uint64_t dcf_err; 217144961713Sgirish uint8_t channel; 217244961713Sgirish 217344961713Sgirish boolean_t first_entry = B_TRUE; 217444961713Sgirish boolean_t is_tcp_udp = B_FALSE; 217544961713Sgirish boolean_t buffer_free = B_FALSE; 217644961713Sgirish boolean_t error_send_up = B_FALSE; 217744961713Sgirish uint8_t error_type; 217844961713Sgirish uint16_t l2_len; 217944961713Sgirish uint16_t skip_len; 218044961713Sgirish uint8_t pktbufsz_type; 218144961713Sgirish uint64_t rcr_entry; 218244961713Sgirish uint64_t *pkt_buf_addr_pp; 218344961713Sgirish uint64_t *pkt_buf_addr_p; 218444961713Sgirish uint32_t buf_offset; 218544961713Sgirish uint32_t bsize; 218644961713Sgirish uint32_t error_disp_cnt; 218744961713Sgirish uint32_t msg_index; 218844961713Sgirish p_rx_rbr_ring_t rx_rbr_p; 218944961713Sgirish p_rx_msg_t *rx_msg_ring_p; 219044961713Sgirish p_rx_msg_t rx_msg_p; 219144961713Sgirish uint16_t sw_offset_bytes = 0, hdr_size = 0; 219244961713Sgirish nxge_status_t status = NXGE_OK; 219344961713Sgirish boolean_t is_valid = B_FALSE; 219444961713Sgirish p_nxge_rx_ring_stats_t rdc_stats; 2195a3c5bd6dSspeer uint32_t bytes_read; 2196a3c5bd6dSspeer uint64_t pkt_type; 2197a3c5bd6dSspeer uint64_t frag; 21984202ea4bSsbehera boolean_t pkt_too_long_err = B_FALSE; 219944961713Sgirish #ifdef NXGE_DEBUG 220044961713Sgirish int dump_len; 220144961713Sgirish #endif 220244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 220344961713Sgirish first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 220444961713Sgirish 220544961713Sgirish rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 220644961713Sgirish 220744961713Sgirish multi = (rcr_entry & RCR_MULTI_MASK); 220844961713Sgirish dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 220944961713Sgirish pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 221044961713Sgirish 221144961713Sgirish error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 221244961713Sgirish frag = (rcr_entry & RCR_FRAG_MASK); 221344961713Sgirish 221444961713Sgirish l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 221544961713Sgirish 221644961713Sgirish pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 221752ccf843Smisaki RCR_PKTBUFSZ_SHIFT); 2218adfcba55Sjoycey #if defined(__i386) 2219adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 222052ccf843Smisaki RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2221adfcba55Sjoycey #else 222244961713Sgirish pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 222352ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT); 2224adfcba55Sjoycey #endif 222544961713Sgirish 222644961713Sgirish channel = rcr_p->rdc; 222744961713Sgirish 222844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 222952ccf843Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 223052ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 223152ccf843Smisaki "error_type 0x%x pkt_type 0x%x " 223252ccf843Smisaki "pktbufsz_type %d ", 223352ccf843Smisaki rcr_desc_rd_head_p, 223452ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 223552ccf843Smisaki multi, 223652ccf843Smisaki error_type, 223752ccf843Smisaki pkt_type, 223852ccf843Smisaki pktbufsz_type)); 223944961713Sgirish 224044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 224152ccf843Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 224252ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 224352ccf843Smisaki "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 224452ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 224552ccf843Smisaki multi, 224652ccf843Smisaki error_type, 224752ccf843Smisaki pkt_type)); 224844961713Sgirish 224944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 225052ccf843Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 225152ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 225252ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 225344961713Sgirish 225444961713Sgirish /* get the stats ptr */ 225544961713Sgirish rdc_stats = rcr_p->rdc_stats; 225644961713Sgirish 225744961713Sgirish if (!l2_len) { 225844961713Sgirish 225944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 226052ccf843Smisaki "<== nxge_receive_packet: failed: l2 length is 0.")); 226144961713Sgirish return; 226244961713Sgirish } 226344961713Sgirish 22644202ea4bSsbehera /* 2265da14cebeSEric Cheng * Software workaround for BMAC hardware limitation that allows 22664202ea4bSsbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 22674202ea4bSsbehera * instead of 0x2400 for jumbo. 22684202ea4bSsbehera */ 22694202ea4bSsbehera if (l2_len > nxgep->mac.maxframesize) { 22704202ea4bSsbehera pkt_too_long_err = B_TRUE; 22714202ea4bSsbehera } 22724202ea4bSsbehera 227356d930aeSspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 227456d930aeSspeer l2_len -= ETHERFCSL; 227556d930aeSspeer 227644961713Sgirish /* shift 6 bits to get the full io address */ 2277adfcba55Sjoycey #if defined(__i386) 2278adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 227952ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 2280adfcba55Sjoycey #else 228144961713Sgirish pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 228252ccf843Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 2283adfcba55Sjoycey #endif 228444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 228552ccf843Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 228652ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 228752ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 228844961713Sgirish 228944961713Sgirish rx_rbr_p = rcr_p->rx_rbr_p; 229044961713Sgirish rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 229144961713Sgirish 229244961713Sgirish if (first_entry) { 229344961713Sgirish hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 229452ccf843Smisaki RXDMA_HDR_SIZE_DEFAULT); 229544961713Sgirish 229644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 229752ccf843Smisaki "==> nxge_receive_packet: first entry 0x%016llx " 229852ccf843Smisaki "pkt_buf_addr_pp $%p l2_len %d hdr %d", 229952ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 230052ccf843Smisaki hdr_size)); 230144961713Sgirish } 230244961713Sgirish 230344961713Sgirish MUTEX_ENTER(&rx_rbr_p->lock); 230444961713Sgirish 230544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 230652ccf843Smisaki "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 230752ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 230852ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 230944961713Sgirish 231044961713Sgirish /* 231144961713Sgirish * Packet buffer address in the completion entry points 231244961713Sgirish * to the starting buffer address (offset 0). 231344961713Sgirish * Use the starting buffer address to locate the corresponding 231444961713Sgirish * kernel address. 231544961713Sgirish */ 231644961713Sgirish status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 231752ccf843Smisaki pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 231852ccf843Smisaki &buf_offset, 231952ccf843Smisaki &msg_index); 232044961713Sgirish 232144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 232252ccf843Smisaki "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 232352ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 232452ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 232544961713Sgirish 232644961713Sgirish if (status != NXGE_OK) { 232744961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 232844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 232952ccf843Smisaki "<== nxge_receive_packet: found vaddr failed %d", 233052ccf843Smisaki status)); 233144961713Sgirish return; 233244961713Sgirish } 233344961713Sgirish 233444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 233552ccf843Smisaki "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 233652ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 233752ccf843Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 233844961713Sgirish 233944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 234052ccf843Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 234152ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 234252ccf843Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 234344961713Sgirish 234444961713Sgirish rx_msg_p = rx_msg_ring_p[msg_index]; 234544961713Sgirish 234644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 234752ccf843Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 234852ccf843Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 234952ccf843Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 235044961713Sgirish 235144961713Sgirish switch (pktbufsz_type) { 235244961713Sgirish case RCR_PKTBUFSZ_0: 235344961713Sgirish bsize = rx_rbr_p->pkt_buf_size0_bytes; 235444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 235552ccf843Smisaki "==> nxge_receive_packet: 0 buf %d", bsize)); 235644961713Sgirish break; 235744961713Sgirish case RCR_PKTBUFSZ_1: 235844961713Sgirish bsize = rx_rbr_p->pkt_buf_size1_bytes; 235944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 236052ccf843Smisaki "==> nxge_receive_packet: 1 buf %d", bsize)); 236144961713Sgirish break; 236244961713Sgirish case RCR_PKTBUFSZ_2: 236344961713Sgirish bsize = rx_rbr_p->pkt_buf_size2_bytes; 236444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 236552ccf843Smisaki "==> nxge_receive_packet: 2 buf %d", bsize)); 236644961713Sgirish break; 236744961713Sgirish case RCR_SINGLE_BLOCK: 236844961713Sgirish bsize = rx_msg_p->block_size; 236944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 237052ccf843Smisaki "==> nxge_receive_packet: single %d", bsize)); 237144961713Sgirish 237244961713Sgirish break; 237344961713Sgirish default: 237444961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 237544961713Sgirish return; 237644961713Sgirish } 237744961713Sgirish 237844961713Sgirish DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 237952ccf843Smisaki (buf_offset + sw_offset_bytes), 238052ccf843Smisaki (hdr_size + l2_len), 238152ccf843Smisaki DDI_DMA_SYNC_FORCPU); 238244961713Sgirish 238344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 238452ccf843Smisaki "==> nxge_receive_packet: after first dump:usage count")); 238544961713Sgirish 238644961713Sgirish if (rx_msg_p->cur_usage_cnt == 0) { 238744961713Sgirish if (rx_rbr_p->rbr_use_bcopy) { 238844961713Sgirish atomic_inc_32(&rx_rbr_p->rbr_consumed); 238944961713Sgirish if (rx_rbr_p->rbr_consumed < 239052ccf843Smisaki rx_rbr_p->rbr_threshold_hi) { 239144961713Sgirish if (rx_rbr_p->rbr_threshold_lo == 0 || 239252ccf843Smisaki ((rx_rbr_p->rbr_consumed >= 239352ccf843Smisaki rx_rbr_p->rbr_threshold_lo) && 239452ccf843Smisaki (rx_rbr_p->rbr_bufsize_type >= 239552ccf843Smisaki pktbufsz_type))) { 239644961713Sgirish rx_msg_p->rx_use_bcopy = B_TRUE; 239744961713Sgirish } 239844961713Sgirish } else { 239944961713Sgirish rx_msg_p->rx_use_bcopy = B_TRUE; 240044961713Sgirish } 240144961713Sgirish } 240244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 240352ccf843Smisaki "==> nxge_receive_packet: buf %d (new block) ", 240452ccf843Smisaki bsize)); 240544961713Sgirish 240644961713Sgirish rx_msg_p->pkt_buf_size_code = pktbufsz_type; 240744961713Sgirish rx_msg_p->pkt_buf_size = bsize; 240844961713Sgirish rx_msg_p->cur_usage_cnt = 1; 240944961713Sgirish if (pktbufsz_type == RCR_SINGLE_BLOCK) { 241044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 241152ccf843Smisaki "==> nxge_receive_packet: buf %d " 241252ccf843Smisaki "(single block) ", 241352ccf843Smisaki bsize)); 241444961713Sgirish /* 241544961713Sgirish * Buffer can be reused once the free function 241644961713Sgirish * is called. 241744961713Sgirish */ 241844961713Sgirish rx_msg_p->max_usage_cnt = 1; 241944961713Sgirish buffer_free = B_TRUE; 242044961713Sgirish } else { 242144961713Sgirish rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 242244961713Sgirish if (rx_msg_p->max_usage_cnt == 1) { 242344961713Sgirish buffer_free = B_TRUE; 242444961713Sgirish } 242544961713Sgirish } 242644961713Sgirish } else { 242744961713Sgirish rx_msg_p->cur_usage_cnt++; 242844961713Sgirish if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 242944961713Sgirish buffer_free = B_TRUE; 243044961713Sgirish } 243144961713Sgirish } 243244961713Sgirish 243344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 243444961713Sgirish "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 243552ccf843Smisaki msg_index, l2_len, 243652ccf843Smisaki rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 243744961713Sgirish 24384202ea4bSsbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 243944961713Sgirish rdc_stats->ierrors++; 244044961713Sgirish if (dcf_err) { 244144961713Sgirish rdc_stats->dcf_err++; 244244961713Sgirish #ifdef NXGE_DEBUG 244344961713Sgirish if (!rdc_stats->dcf_err) { 244444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 244544961713Sgirish "nxge_receive_packet: channel %d dcf_err rcr" 244644961713Sgirish " 0x%llx", channel, rcr_entry)); 244744961713Sgirish } 244844961713Sgirish #endif 244944961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 245052ccf843Smisaki NXGE_FM_EREPORT_RDMC_DCF_ERR); 24514202ea4bSsbehera } else if (pkt_too_long_err) { 24524202ea4bSsbehera rdc_stats->pkt_too_long_err++; 24534202ea4bSsbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 24544202ea4bSsbehera " channel %d packet length [%d] > " 24554202ea4bSsbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 24564202ea4bSsbehera nxgep->mac.maxframesize)); 245744961713Sgirish } else { 245844961713Sgirish /* Update error stats */ 245944961713Sgirish error_disp_cnt = NXGE_ERROR_SHOW_MAX; 246044961713Sgirish rdc_stats->errlog.compl_err_type = error_type; 246144961713Sgirish 246244961713Sgirish switch (error_type) { 2463f6485eecSyc /* 2464f6485eecSyc * Do not send FMA ereport for RCR_L2_ERROR and 2465f6485eecSyc * RCR_L4_CSUM_ERROR because most likely they indicate 2466f6485eecSyc * back pressure rather than HW failures. 2467f6485eecSyc */ 246853f3d8ecSyc case RCR_L2_ERROR: 246953f3d8ecSyc rdc_stats->l2_err++; 247053f3d8ecSyc if (rdc_stats->l2_err < 247153f3d8ecSyc error_disp_cnt) { 247244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 247353f3d8ecSyc " nxge_receive_packet:" 247453f3d8ecSyc " channel %d RCR L2_ERROR", 247553f3d8ecSyc channel)); 247653f3d8ecSyc } 247753f3d8ecSyc break; 247853f3d8ecSyc case RCR_L4_CSUM_ERROR: 247953f3d8ecSyc error_send_up = B_TRUE; 248053f3d8ecSyc rdc_stats->l4_cksum_err++; 248153f3d8ecSyc if (rdc_stats->l4_cksum_err < 248253f3d8ecSyc error_disp_cnt) { 248353f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 248453f3d8ecSyc " nxge_receive_packet:" 248553f3d8ecSyc " channel %d" 248653f3d8ecSyc " RCR L4_CSUM_ERROR", channel)); 248753f3d8ecSyc } 248853f3d8ecSyc break; 2489f6485eecSyc /* 2490f6485eecSyc * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2491f6485eecSyc * RCR_ZCP_SOFT_ERROR because they reflect the same 2492f6485eecSyc * FFLP and ZCP errors that have been reported by 2493f6485eecSyc * nxge_fflp.c and nxge_zcp.c. 2494f6485eecSyc */ 249553f3d8ecSyc case RCR_FFLP_SOFT_ERROR: 249653f3d8ecSyc error_send_up = B_TRUE; 249753f3d8ecSyc rdc_stats->fflp_soft_err++; 249853f3d8ecSyc if (rdc_stats->fflp_soft_err < 249953f3d8ecSyc error_disp_cnt) { 250053f3d8ecSyc NXGE_ERROR_MSG((nxgep, 250153f3d8ecSyc NXGE_ERR_CTL, 250253f3d8ecSyc " nxge_receive_packet:" 250353f3d8ecSyc " channel %d" 250453f3d8ecSyc " RCR FFLP_SOFT_ERROR", channel)); 250553f3d8ecSyc } 250653f3d8ecSyc break; 250753f3d8ecSyc case RCR_ZCP_SOFT_ERROR: 250853f3d8ecSyc error_send_up = B_TRUE; 250953f3d8ecSyc rdc_stats->fflp_soft_err++; 251053f3d8ecSyc if (rdc_stats->zcp_soft_err < 251153f3d8ecSyc error_disp_cnt) 251253f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 251353f3d8ecSyc " nxge_receive_packet: Channel %d" 251453f3d8ecSyc " RCR ZCP_SOFT_ERROR", channel)); 251553f3d8ecSyc break; 251653f3d8ecSyc default: 251753f3d8ecSyc rdc_stats->rcr_unknown_err++; 251853f3d8ecSyc if (rdc_stats->rcr_unknown_err 251953f3d8ecSyc < error_disp_cnt) { 252053f3d8ecSyc NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 252153f3d8ecSyc " nxge_receive_packet: Channel %d" 252253f3d8ecSyc " RCR entry 0x%llx error 0x%x", 252353f3d8ecSyc rcr_entry, channel, error_type)); 252453f3d8ecSyc } 252553f3d8ecSyc break; 252644961713Sgirish } 252744961713Sgirish } 252844961713Sgirish 252944961713Sgirish /* 253044961713Sgirish * Update and repost buffer block if max usage 253144961713Sgirish * count is reached. 253244961713Sgirish */ 253344961713Sgirish if (error_send_up == B_FALSE) { 2534958cea9eSml atomic_inc_32(&rx_msg_p->ref_cnt); 253544961713Sgirish if (buffer_free == B_TRUE) { 253644961713Sgirish rx_msg_p->free = B_TRUE; 253744961713Sgirish } 253844961713Sgirish 253944961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 254044961713Sgirish nxge_freeb(rx_msg_p); 254144961713Sgirish return; 254244961713Sgirish } 254344961713Sgirish } 254444961713Sgirish 254544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 254652ccf843Smisaki "==> nxge_receive_packet: DMA sync second ")); 254744961713Sgirish 254853f3d8ecSyc bytes_read = rcr_p->rcvd_pkt_bytes; 254944961713Sgirish skip_len = sw_offset_bytes + hdr_size; 255044961713Sgirish if (!rx_msg_p->rx_use_bcopy) { 2551958cea9eSml /* 2552958cea9eSml * For loaned up buffers, the driver reference count 2553958cea9eSml * will be incremented first and then the free state. 2554958cea9eSml */ 255553f3d8ecSyc if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 255614ea4bb7Ssd if (first_entry) { 255714ea4bb7Ssd nmp->b_rptr = &nmp->b_rptr[skip_len]; 255853f3d8ecSyc if (l2_len < bsize - skip_len) { 255914ea4bb7Ssd nmp->b_wptr = &nmp->b_rptr[l2_len]; 256053f3d8ecSyc } else { 256153f3d8ecSyc nmp->b_wptr = &nmp->b_rptr[bsize 256253f3d8ecSyc - skip_len]; 256353f3d8ecSyc } 256414ea4bb7Ssd } else { 256553f3d8ecSyc if (l2_len - bytes_read < bsize) { 256614ea4bb7Ssd nmp->b_wptr = 256714ea4bb7Ssd &nmp->b_rptr[l2_len - bytes_read]; 256853f3d8ecSyc } else { 256953f3d8ecSyc nmp->b_wptr = &nmp->b_rptr[bsize]; 257053f3d8ecSyc } 257114ea4bb7Ssd } 257244961713Sgirish } 257353f3d8ecSyc } else { 257453f3d8ecSyc if (first_entry) { 257553f3d8ecSyc nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 257653f3d8ecSyc l2_len < bsize - skip_len ? 257753f3d8ecSyc l2_len : bsize - skip_len); 257853f3d8ecSyc } else { 257953f3d8ecSyc nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 258053f3d8ecSyc l2_len - bytes_read < bsize ? 258153f3d8ecSyc l2_len - bytes_read : bsize); 258253f3d8ecSyc } 258353f3d8ecSyc } 258453f3d8ecSyc if (nmp != NULL) { 2585f720bc57Syc if (first_entry) { 2586f720bc57Syc /* 2587f720bc57Syc * Jumbo packets may be received with more than one 2588f720bc57Syc * buffer, increment ipackets for the first entry only. 2589f720bc57Syc */ 2590f720bc57Syc rdc_stats->ipackets++; 2591f720bc57Syc 2592f720bc57Syc /* Update ibytes for kstat. */ 2593f720bc57Syc rdc_stats->ibytes += skip_len 2594f720bc57Syc + l2_len < bsize ? l2_len : bsize; 2595f720bc57Syc /* 2596f720bc57Syc * Update the number of bytes read so far for the 2597f720bc57Syc * current frame. 2598f720bc57Syc */ 259953f3d8ecSyc bytes_read = nmp->b_wptr - nmp->b_rptr; 2600f720bc57Syc } else { 2601f720bc57Syc rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2602f720bc57Syc l2_len - bytes_read : bsize; 260353f3d8ecSyc bytes_read += nmp->b_wptr - nmp->b_rptr; 2604f720bc57Syc } 260553f3d8ecSyc 260653f3d8ecSyc NXGE_DEBUG_MSG((nxgep, RX_CTL, 260753f3d8ecSyc "==> nxge_receive_packet after dupb: " 260853f3d8ecSyc "rbr consumed %d " 260953f3d8ecSyc "pktbufsz_type %d " 261053f3d8ecSyc "nmp $%p rptr $%p wptr $%p " 261153f3d8ecSyc "buf_offset %d bzise %d l2_len %d skip_len %d", 261253f3d8ecSyc rx_rbr_p->rbr_consumed, 261353f3d8ecSyc pktbufsz_type, 261453f3d8ecSyc nmp, nmp->b_rptr, nmp->b_wptr, 261553f3d8ecSyc buf_offset, bsize, l2_len, skip_len)); 261644961713Sgirish } else { 261744961713Sgirish cmn_err(CE_WARN, "!nxge_receive_packet: " 261852ccf843Smisaki "update stats (error)"); 26192e59129aSraghus atomic_inc_32(&rx_msg_p->ref_cnt); 26202e59129aSraghus if (buffer_free == B_TRUE) { 26212e59129aSraghus rx_msg_p->free = B_TRUE; 26222e59129aSraghus } 26232e59129aSraghus MUTEX_EXIT(&rx_rbr_p->lock); 26242e59129aSraghus nxge_freeb(rx_msg_p); 26252e59129aSraghus return; 262644961713Sgirish } 2627ee5416c9Syc 262844961713Sgirish if (buffer_free == B_TRUE) { 262944961713Sgirish rx_msg_p->free = B_TRUE; 263044961713Sgirish } 2631f720bc57Syc 263244961713Sgirish is_valid = (nmp != NULL); 263353f3d8ecSyc 263453f3d8ecSyc rcr_p->rcvd_pkt_bytes = bytes_read; 263553f3d8ecSyc 263644961713Sgirish MUTEX_EXIT(&rx_rbr_p->lock); 263744961713Sgirish 263844961713Sgirish if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 263944961713Sgirish atomic_inc_32(&rx_msg_p->ref_cnt); 264044961713Sgirish nxge_freeb(rx_msg_p); 264144961713Sgirish } 264244961713Sgirish 264344961713Sgirish if (is_valid) { 2644a3c5bd6dSspeer nmp->b_cont = NULL; 264544961713Sgirish if (first_entry) { 264644961713Sgirish *mp = nmp; 264744961713Sgirish *mp_cont = NULL; 264853f3d8ecSyc } else { 264944961713Sgirish *mp_cont = nmp; 265053f3d8ecSyc } 265144961713Sgirish } 265244961713Sgirish 265344961713Sgirish /* 2654f720bc57Syc * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2655f720bc57Syc * If a packet is not fragmented and no error bit is set, then 2656f720bc57Syc * L4 checksum is OK. 265744961713Sgirish */ 2658f720bc57Syc 265944961713Sgirish if (is_valid && !multi) { 2660678453a8Sspeer /* 2661b4d05839Sml * If the checksum flag nxge_chksum_offload 2662b4d05839Sml * is 1, TCP and UDP packets can be sent 2663678453a8Sspeer * up with good checksum. If the checksum flag 2664b4d05839Sml * is set to 0, checksum reporting will apply to 2665678453a8Sspeer * TCP packets only (workaround for a hardware bug). 2666b4d05839Sml * If the checksum flag nxge_cksum_offload is 2667b4d05839Sml * greater than 1, both TCP and UDP packets 2668b4d05839Sml * will not be reported its hardware checksum results. 2669678453a8Sspeer */ 2670b4d05839Sml if (nxge_cksum_offload == 1) { 2671678453a8Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 267252ccf843Smisaki pkt_type == RCR_PKT_IS_UDP) ? 267352ccf843Smisaki B_TRUE: B_FALSE); 2674b4d05839Sml } else if (!nxge_cksum_offload) { 2675678453a8Sspeer /* TCP checksum only. */ 2676678453a8Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 267752ccf843Smisaki B_TRUE: B_FALSE); 2678678453a8Sspeer } 267944961713Sgirish 268044961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 268152ccf843Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 268252ccf843Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 268344961713Sgirish 268444961713Sgirish if (is_tcp_udp && !frag && !error_type) { 268544961713Sgirish (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 268652ccf843Smisaki HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 268744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 268852ccf843Smisaki "==> nxge_receive_packet: Full tcp/udp cksum " 268952ccf843Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d " 269052ccf843Smisaki "error %d", 269152ccf843Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 269244961713Sgirish } 269344961713Sgirish } 269444961713Sgirish 269544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, 269652ccf843Smisaki "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 269744961713Sgirish 269844961713Sgirish *multi_p = (multi == RCR_MULTI_MASK); 269944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 270052ccf843Smisaki "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 270152ccf843Smisaki *multi_p, nmp, *mp, *mp_cont)); 270244961713Sgirish } 270344961713Sgirish 2704da14cebeSEric Cheng /* 2705da14cebeSEric Cheng * Enable polling for a ring. Interrupt for the ring is disabled when 2706da14cebeSEric Cheng * the nxge interrupt comes (see nxge_rx_intr). 2707da14cebeSEric Cheng */ 2708da14cebeSEric Cheng int 2709da14cebeSEric Cheng nxge_enable_poll(void *arg) 2710da14cebeSEric Cheng { 2711da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2712da14cebeSEric Cheng p_rx_rcr_ring_t ringp; 2713da14cebeSEric Cheng p_nxge_t nxgep; 2714da14cebeSEric Cheng p_nxge_ldg_t ldgp; 2715da14cebeSEric Cheng uint32_t channel; 2716da14cebeSEric Cheng 2717da14cebeSEric Cheng if (ring_handle == NULL) { 271863f531d1SSriharsha Basavapatna ASSERT(ring_handle != NULL); 2719da14cebeSEric Cheng return (0); 2720da14cebeSEric Cheng } 2721da14cebeSEric Cheng 2722da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2723da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2724da14cebeSEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2725da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2726da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2727da14cebeSEric Cheng ldgp = ringp->ldgp; 2728da14cebeSEric Cheng if (ldgp == NULL) { 2729da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2730da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2731da14cebeSEric Cheng ringp->rdc)); 2732da14cebeSEric Cheng return (0); 2733da14cebeSEric Cheng } 2734da14cebeSEric Cheng 2735da14cebeSEric Cheng MUTEX_ENTER(&ringp->lock); 2736da14cebeSEric Cheng /* enable polling */ 2737da14cebeSEric Cheng if (ringp->poll_flag == 0) { 2738da14cebeSEric Cheng ringp->poll_flag = 1; 2739da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2740da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d set poll flag to 1", 2741da14cebeSEric Cheng ringp->rdc)); 2742da14cebeSEric Cheng } 2743da14cebeSEric Cheng 2744da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2745da14cebeSEric Cheng return (0); 2746da14cebeSEric Cheng } 2747da14cebeSEric Cheng /* 2748da14cebeSEric Cheng * Disable polling for a ring and enable its interrupt. 2749da14cebeSEric Cheng */ 2750da14cebeSEric Cheng int 2751da14cebeSEric Cheng nxge_disable_poll(void *arg) 2752da14cebeSEric Cheng { 2753da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2754da14cebeSEric Cheng p_rx_rcr_ring_t ringp; 2755da14cebeSEric Cheng p_nxge_t nxgep; 2756da14cebeSEric Cheng uint32_t channel; 2757da14cebeSEric Cheng 2758da14cebeSEric Cheng if (ring_handle == NULL) { 275963f531d1SSriharsha Basavapatna ASSERT(ring_handle != NULL); 2760da14cebeSEric Cheng return (0); 2761da14cebeSEric Cheng } 2762da14cebeSEric Cheng 2763da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2764da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2765da14cebeSEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2766da14cebeSEric Cheng 2767da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2768da14cebeSEric Cheng "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2769da14cebeSEric Cheng 2770da14cebeSEric Cheng MUTEX_ENTER(&ringp->lock); 2771da14cebeSEric Cheng 2772da14cebeSEric Cheng /* disable polling: enable interrupt */ 2773da14cebeSEric Cheng if (ringp->poll_flag) { 2774da14cebeSEric Cheng npi_handle_t handle; 2775da14cebeSEric Cheng rx_dma_ctl_stat_t cs; 2776da14cebeSEric Cheng uint8_t channel; 2777da14cebeSEric Cheng p_nxge_ldg_t ldgp; 2778da14cebeSEric Cheng 2779da14cebeSEric Cheng /* 2780da14cebeSEric Cheng * Get the control and status for this channel. 2781da14cebeSEric Cheng */ 2782da14cebeSEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 2783da14cebeSEric Cheng channel = ringp->rdc; 2784da14cebeSEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2785da14cebeSEric Cheng channel, &cs.value); 2786da14cebeSEric Cheng 2787da14cebeSEric Cheng /* 2788da14cebeSEric Cheng * Enable mailbox update 2789da14cebeSEric Cheng * Since packets were not read and the hardware uses 2790da14cebeSEric Cheng * bits pktread and ptrread to update the queue 2791da14cebeSEric Cheng * length, we need to set both bits to 0. 2792da14cebeSEric Cheng */ 2793da14cebeSEric Cheng cs.bits.ldw.pktread = 0; 2794da14cebeSEric Cheng cs.bits.ldw.ptrread = 0; 2795da14cebeSEric Cheng cs.bits.hdw.mex = 1; 2796da14cebeSEric Cheng RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2797da14cebeSEric Cheng cs.value); 2798da14cebeSEric Cheng 2799da14cebeSEric Cheng /* 2800da14cebeSEric Cheng * Rearm this logical group if this is a single device 2801da14cebeSEric Cheng * group. 2802da14cebeSEric Cheng */ 2803da14cebeSEric Cheng ldgp = ringp->ldgp; 2804da14cebeSEric Cheng if (ldgp == NULL) { 2805da14cebeSEric Cheng ringp->poll_flag = 0; 2806da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2807da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2808da14cebeSEric Cheng "==> nxge_disable_poll: no ldgp rdc %d " 2809da14cebeSEric Cheng "(still set poll to 0", ringp->rdc)); 2810da14cebeSEric Cheng return (0); 2811da14cebeSEric Cheng } 2812da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2813da14cebeSEric Cheng "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2814da14cebeSEric Cheng ringp->rdc, ldgp)); 2815da14cebeSEric Cheng if (ldgp->nldvs == 1) { 281663f531d1SSriharsha Basavapatna if (isLDOMguest(nxgep)) { 281763f531d1SSriharsha Basavapatna ldgp->arm = B_TRUE; 281863f531d1SSriharsha Basavapatna nxge_hio_ldgimgn(nxgep, ldgp); 281963f531d1SSriharsha Basavapatna } else { 282063f531d1SSriharsha Basavapatna ldgimgm_t mgm; 282163f531d1SSriharsha Basavapatna mgm.value = 0; 282263f531d1SSriharsha Basavapatna mgm.bits.ldw.arm = 1; 282363f531d1SSriharsha Basavapatna mgm.bits.ldw.timer = ldgp->ldg_timer; 282463f531d1SSriharsha Basavapatna NXGE_REG_WR64(handle, 282563f531d1SSriharsha Basavapatna LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 282663f531d1SSriharsha Basavapatna mgm.value); 282763f531d1SSriharsha Basavapatna } 2828da14cebeSEric Cheng } 2829da14cebeSEric Cheng ringp->poll_flag = 0; 2830da14cebeSEric Cheng } 2831da14cebeSEric Cheng 2832da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2833da14cebeSEric Cheng return (0); 2834da14cebeSEric Cheng } 2835da14cebeSEric Cheng 2836da14cebeSEric Cheng /* 2837da14cebeSEric Cheng * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2838da14cebeSEric Cheng */ 2839da14cebeSEric Cheng mblk_t * 2840da14cebeSEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup) 2841da14cebeSEric Cheng { 2842da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2843da14cebeSEric Cheng p_rx_rcr_ring_t rcr_p; 2844da14cebeSEric Cheng p_nxge_t nxgep; 2845da14cebeSEric Cheng npi_handle_t handle; 2846da14cebeSEric Cheng rx_dma_ctl_stat_t cs; 2847da14cebeSEric Cheng mblk_t *mblk; 2848da14cebeSEric Cheng p_nxge_ldv_t ldvp; 2849da14cebeSEric Cheng uint32_t channel; 2850da14cebeSEric Cheng 2851da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2852da14cebeSEric Cheng 2853da14cebeSEric Cheng /* 2854da14cebeSEric Cheng * Get the control and status for this channel. 2855da14cebeSEric Cheng */ 2856da14cebeSEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 2857da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2858da14cebeSEric Cheng rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2859da14cebeSEric Cheng MUTEX_ENTER(&rcr_p->lock); 2860da14cebeSEric Cheng ASSERT(rcr_p->poll_flag == 1); 2861da14cebeSEric Cheng 2862da14cebeSEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2863da14cebeSEric Cheng 2864da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2865da14cebeSEric Cheng "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2866da14cebeSEric Cheng rcr_p->rdc, rcr_p->poll_flag)); 2867da14cebeSEric Cheng mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2868da14cebeSEric Cheng 2869da14cebeSEric Cheng ldvp = rcr_p->ldvp; 2870da14cebeSEric Cheng /* error events. */ 2871da14cebeSEric Cheng if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2872da14cebeSEric Cheng (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2873da14cebeSEric Cheng } 2874da14cebeSEric Cheng 2875da14cebeSEric Cheng MUTEX_EXIT(&rcr_p->lock); 2876da14cebeSEric Cheng 2877da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2878da14cebeSEric Cheng "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 2879da14cebeSEric Cheng return (mblk); 2880da14cebeSEric Cheng } 2881da14cebeSEric Cheng 2882da14cebeSEric Cheng 288344961713Sgirish /*ARGSUSED*/ 288444961713Sgirish static nxge_status_t 2885678453a8Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 288644961713Sgirish { 288744961713Sgirish p_nxge_rx_ring_stats_t rdc_stats; 288844961713Sgirish npi_handle_t handle; 288944961713Sgirish npi_status_t rs; 289044961713Sgirish boolean_t rxchan_fatal = B_FALSE; 289144961713Sgirish boolean_t rxport_fatal = B_FALSE; 289244961713Sgirish uint8_t portn; 289344961713Sgirish nxge_status_t status = NXGE_OK; 289444961713Sgirish uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 289544961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 289644961713Sgirish 289744961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 289844961713Sgirish portn = nxgep->mac.portnum; 2899678453a8Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 290044961713Sgirish 290144961713Sgirish if (cs.bits.hdw.rbr_tmout) { 290244961713Sgirish rdc_stats->rx_rbr_tmout++; 290344961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 290452ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 290544961713Sgirish rxchan_fatal = B_TRUE; 290644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 290752ccf843Smisaki "==> nxge_rx_err_evnts: rx_rbr_timeout")); 290844961713Sgirish } 290944961713Sgirish if (cs.bits.hdw.rsp_cnt_err) { 291044961713Sgirish rdc_stats->rsp_cnt_err++; 291144961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 291252ccf843Smisaki NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 291344961713Sgirish rxchan_fatal = B_TRUE; 291444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 291552ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 291652ccf843Smisaki "rsp_cnt_err", channel)); 291744961713Sgirish } 291844961713Sgirish if (cs.bits.hdw.byte_en_bus) { 291944961713Sgirish rdc_stats->byte_en_bus++; 292044961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 292152ccf843Smisaki NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 292244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 292352ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 292452ccf843Smisaki "fatal error: byte_en_bus", channel)); 292544961713Sgirish rxchan_fatal = B_TRUE; 292644961713Sgirish } 292744961713Sgirish if (cs.bits.hdw.rsp_dat_err) { 292844961713Sgirish rdc_stats->rsp_dat_err++; 292944961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 293052ccf843Smisaki NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 293144961713Sgirish rxchan_fatal = B_TRUE; 293244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 293352ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 293452ccf843Smisaki "fatal error: rsp_dat_err", channel)); 293544961713Sgirish } 293644961713Sgirish if (cs.bits.hdw.rcr_ack_err) { 293744961713Sgirish rdc_stats->rcr_ack_err++; 293844961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 293952ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 294044961713Sgirish rxchan_fatal = B_TRUE; 294144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 294252ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 294352ccf843Smisaki "fatal error: rcr_ack_err", channel)); 294444961713Sgirish } 294544961713Sgirish if (cs.bits.hdw.dc_fifo_err) { 294644961713Sgirish rdc_stats->dc_fifo_err++; 294744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 294852ccf843Smisaki NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 294944961713Sgirish /* This is not a fatal error! */ 295044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 295152ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 295252ccf843Smisaki "dc_fifo_err", channel)); 295344961713Sgirish rxport_fatal = B_TRUE; 295444961713Sgirish } 295544961713Sgirish if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 295644961713Sgirish if ((rs = npi_rxdma_ring_perr_stat_get(handle, 295752ccf843Smisaki &rdc_stats->errlog.pre_par, 295852ccf843Smisaki &rdc_stats->errlog.sha_par)) 295952ccf843Smisaki != NPI_SUCCESS) { 296044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 296152ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 296252ccf843Smisaki "rcr_sha_par: get perr", channel)); 296344961713Sgirish return (NXGE_ERROR | rs); 296444961713Sgirish } 296544961713Sgirish if (cs.bits.hdw.rcr_sha_par) { 296644961713Sgirish rdc_stats->rcr_sha_par++; 296744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 296852ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 296944961713Sgirish rxchan_fatal = B_TRUE; 297044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 297152ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 297252ccf843Smisaki "fatal error: rcr_sha_par", channel)); 297344961713Sgirish } 297444961713Sgirish if (cs.bits.hdw.rbr_pre_par) { 297544961713Sgirish rdc_stats->rbr_pre_par++; 297644961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 297752ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 297844961713Sgirish rxchan_fatal = B_TRUE; 297944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 298052ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 298152ccf843Smisaki "fatal error: rbr_pre_par", channel)); 298244961713Sgirish } 298344961713Sgirish } 298463e23a19Syc /* 298563e23a19Syc * The Following 4 status bits are for information, the system 298663e23a19Syc * is running fine. There is no need to send FMA ereports or 298763e23a19Syc * log messages. 298863e23a19Syc */ 298944961713Sgirish if (cs.bits.hdw.port_drop_pkt) { 299044961713Sgirish rdc_stats->port_drop_pkt++; 299144961713Sgirish } 299244961713Sgirish if (cs.bits.hdw.wred_drop) { 299344961713Sgirish rdc_stats->wred_drop++; 299444961713Sgirish } 299544961713Sgirish if (cs.bits.hdw.rbr_pre_empty) { 299644961713Sgirish rdc_stats->rbr_pre_empty++; 299744961713Sgirish } 299844961713Sgirish if (cs.bits.hdw.rcr_shadow_full) { 299944961713Sgirish rdc_stats->rcr_shadow_full++; 300044961713Sgirish } 300144961713Sgirish if (cs.bits.hdw.config_err) { 300244961713Sgirish rdc_stats->config_err++; 300344961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 300452ccf843Smisaki NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 300544961713Sgirish rxchan_fatal = B_TRUE; 300644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 300752ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 300852ccf843Smisaki "config error", channel)); 300944961713Sgirish } 301044961713Sgirish if (cs.bits.hdw.rcrincon) { 301144961713Sgirish rdc_stats->rcrincon++; 301244961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 301352ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCRINCON); 301444961713Sgirish rxchan_fatal = B_TRUE; 301544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 301652ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 301752ccf843Smisaki "fatal error: rcrincon error", channel)); 301844961713Sgirish } 301944961713Sgirish if (cs.bits.hdw.rcrfull) { 302044961713Sgirish rdc_stats->rcrfull++; 302144961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 302252ccf843Smisaki NXGE_FM_EREPORT_RDMC_RCRFULL); 302344961713Sgirish rxchan_fatal = B_TRUE; 302444961713Sgirish if (rdc_stats->rcrfull < error_disp_cnt) 302544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 302652ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 302752ccf843Smisaki "fatal error: rcrfull error", channel)); 302844961713Sgirish } 302944961713Sgirish if (cs.bits.hdw.rbr_empty) { 303063e23a19Syc /* 303163e23a19Syc * This bit is for information, there is no need 303263e23a19Syc * send FMA ereport or log a message. 303363e23a19Syc */ 303444961713Sgirish rdc_stats->rbr_empty++; 303544961713Sgirish } 303644961713Sgirish if (cs.bits.hdw.rbrfull) { 303744961713Sgirish rdc_stats->rbrfull++; 303844961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 303952ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBRFULL); 304044961713Sgirish rxchan_fatal = B_TRUE; 304144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 304252ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 304352ccf843Smisaki "fatal error: rbr_full error", channel)); 304444961713Sgirish } 304544961713Sgirish if (cs.bits.hdw.rbrlogpage) { 304644961713Sgirish rdc_stats->rbrlogpage++; 304744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 304852ccf843Smisaki NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 304944961713Sgirish rxchan_fatal = B_TRUE; 305044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 305152ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 305252ccf843Smisaki "fatal error: rbr logical page error", channel)); 305344961713Sgirish } 305444961713Sgirish if (cs.bits.hdw.cfiglogpage) { 305544961713Sgirish rdc_stats->cfiglogpage++; 305644961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 305752ccf843Smisaki NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 305844961713Sgirish rxchan_fatal = B_TRUE; 305944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 306052ccf843Smisaki "==> nxge_rx_err_evnts(channel %d): " 306152ccf843Smisaki "fatal error: cfig logical page error", channel)); 306244961713Sgirish } 306344961713Sgirish 306444961713Sgirish if (rxport_fatal) { 306544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3066678453a8Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 3067678453a8Sspeer portn)); 3068678453a8Sspeer if (isLDOMguest(nxgep)) { 3069678453a8Sspeer status = NXGE_ERROR; 3070678453a8Sspeer } else { 3071678453a8Sspeer status = nxge_ipp_fatal_err_recover(nxgep); 3072678453a8Sspeer if (status == NXGE_OK) { 3073678453a8Sspeer FM_SERVICE_RESTORED(nxgep); 3074678453a8Sspeer } 307544961713Sgirish } 307644961713Sgirish } 307744961713Sgirish 307844961713Sgirish if (rxchan_fatal) { 307944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3080678453a8Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3081678453a8Sspeer channel)); 3082678453a8Sspeer if (isLDOMguest(nxgep)) { 3083678453a8Sspeer status = NXGE_ERROR; 3084678453a8Sspeer } else { 3085678453a8Sspeer status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3086678453a8Sspeer if (status == NXGE_OK) { 3087678453a8Sspeer FM_SERVICE_RESTORED(nxgep); 3088678453a8Sspeer } 308944961713Sgirish } 309044961713Sgirish } 309144961713Sgirish 309244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 309344961713Sgirish 309444961713Sgirish return (status); 309544961713Sgirish } 309644961713Sgirish 3097678453a8Sspeer /* 3098678453a8Sspeer * nxge_rdc_hvio_setup 3099678453a8Sspeer * 3100678453a8Sspeer * This code appears to setup some Hypervisor variables. 3101678453a8Sspeer * 3102678453a8Sspeer * Arguments: 3103678453a8Sspeer * nxgep 3104678453a8Sspeer * channel 3105678453a8Sspeer * 3106678453a8Sspeer * Notes: 3107678453a8Sspeer * What does NIU_LP_WORKAROUND mean? 3108678453a8Sspeer * 3109678453a8Sspeer * NPI/NXGE function calls: 3110678453a8Sspeer * na 3111678453a8Sspeer * 3112678453a8Sspeer * Context: 3113678453a8Sspeer * Any domain 3114678453a8Sspeer */ 3115678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3116678453a8Sspeer static void 3117678453a8Sspeer nxge_rdc_hvio_setup( 3118678453a8Sspeer nxge_t *nxgep, int channel) 311944961713Sgirish { 3120678453a8Sspeer nxge_dma_common_t *dma_common; 3121678453a8Sspeer nxge_dma_common_t *dma_control; 3122678453a8Sspeer rx_rbr_ring_t *ring; 3123678453a8Sspeer 3124678453a8Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3125678453a8Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3126678453a8Sspeer 3127678453a8Sspeer ring->hv_set = B_FALSE; 3128678453a8Sspeer 3129678453a8Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3130678453a8Sspeer dma_common->orig_ioaddr_pp; 3131678453a8Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 3132678453a8Sspeer dma_common->orig_alength; 3133678453a8Sspeer 3134678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3135678453a8Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3136678453a8Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 3137678453a8Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3138678453a8Sspeer dma_common->orig_alength, dma_common->orig_alength)); 3139678453a8Sspeer 3140678453a8Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3141678453a8Sspeer 3142678453a8Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 3143678453a8Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 3144678453a8Sspeer ring->hv_rx_cntl_ioaddr_size = 3145678453a8Sspeer (uint64_t)dma_control->orig_alength; 3146678453a8Sspeer 3147678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3148678453a8Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3149678453a8Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 3150678453a8Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3151678453a8Sspeer dma_control->orig_alength, dma_control->orig_alength)); 3152678453a8Sspeer } 315344961713Sgirish #endif 315444961713Sgirish 3155678453a8Sspeer /* 3156678453a8Sspeer * nxge_map_rxdma 3157678453a8Sspeer * 3158678453a8Sspeer * Map an RDC into our kernel space. 3159678453a8Sspeer * 3160678453a8Sspeer * Arguments: 3161678453a8Sspeer * nxgep 3162678453a8Sspeer * channel The channel to map. 3163678453a8Sspeer * 3164678453a8Sspeer * Notes: 3165678453a8Sspeer * 1. Allocate & initialise a memory pool, if necessary. 3166678453a8Sspeer * 2. Allocate however many receive buffers are required. 3167678453a8Sspeer * 3. Setup buffers, descriptors, and mailbox. 3168678453a8Sspeer * 3169678453a8Sspeer * NPI/NXGE function calls: 3170678453a8Sspeer * nxge_alloc_rx_mem_pool() 3171678453a8Sspeer * nxge_alloc_rbb() 3172678453a8Sspeer * nxge_map_rxdma_channel() 3173678453a8Sspeer * 3174678453a8Sspeer * Registers accessed: 3175678453a8Sspeer * 3176678453a8Sspeer * Context: 3177678453a8Sspeer * Any domain 3178678453a8Sspeer */ 3179678453a8Sspeer static nxge_status_t 3180678453a8Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 3181678453a8Sspeer { 3182678453a8Sspeer nxge_dma_common_t **data; 3183678453a8Sspeer nxge_dma_common_t **control; 3184678453a8Sspeer rx_rbr_ring_t **rbr_ring; 3185678453a8Sspeer rx_rcr_ring_t **rcr_ring; 3186678453a8Sspeer rx_mbox_t **mailbox; 3187678453a8Sspeer uint32_t chunks; 318844961713Sgirish 3189678453a8Sspeer nxge_status_t status; 319044961713Sgirish 3191678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 319244961713Sgirish 3193678453a8Sspeer if (!nxgep->rx_buf_pool_p) { 3194678453a8Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3195678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3196678453a8Sspeer "<== nxge_map_rxdma: buf not allocated")); 3197678453a8Sspeer return (NXGE_ERROR); 3198678453a8Sspeer } 319944961713Sgirish } 320044961713Sgirish 3201678453a8Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3202678453a8Sspeer return (NXGE_ERROR); 320314ea4bb7Ssd 320444961713Sgirish /* 3205678453a8Sspeer * Map descriptors from the buffer polls for each dma channel. 320644961713Sgirish */ 320744961713Sgirish 3208678453a8Sspeer /* 3209678453a8Sspeer * Set up and prepare buffer blocks, descriptors 3210678453a8Sspeer * and mailbox. 3211678453a8Sspeer */ 3212678453a8Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3213678453a8Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3214678453a8Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 321544961713Sgirish 3216678453a8Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3217678453a8Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 321844961713Sgirish 3219678453a8Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 322044961713Sgirish 3221678453a8Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3222678453a8Sspeer chunks, control, rcr_ring, mailbox); 3223678453a8Sspeer if (status != NXGE_OK) { 3224678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 322552ccf843Smisaki "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 322652ccf843Smisaki "returned 0x%x", 322752ccf843Smisaki channel, status)); 3228678453a8Sspeer return (status); 3229678453a8Sspeer } 3230678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3231678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3232678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3233678453a8Sspeer &nxgep->statsp->rdc_stats[channel]; 323444961713Sgirish 3235678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3236678453a8Sspeer if (!isLDOMguest(nxgep)) 3237678453a8Sspeer nxge_rdc_hvio_setup(nxgep, channel); 3238678453a8Sspeer #endif 323944961713Sgirish 324044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3241678453a8Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 324244961713Sgirish 324344961713Sgirish return (status); 324444961713Sgirish } 324544961713Sgirish 324644961713Sgirish static void 3247678453a8Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 324844961713Sgirish { 3249678453a8Sspeer rx_rbr_ring_t *rbr_ring; 3250678453a8Sspeer rx_rcr_ring_t *rcr_ring; 3251678453a8Sspeer rx_mbox_t *mailbox; 325244961713Sgirish 3253678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 325444961713Sgirish 3255678453a8Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3256678453a8Sspeer !nxgep->rx_mbox_areas_p) 325744961713Sgirish return; 325844961713Sgirish 3259678453a8Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3260678453a8Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3261678453a8Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 326244961713Sgirish 3263678453a8Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 3264678453a8Sspeer return; 326544961713Sgirish 3266678453a8Sspeer (void) nxge_unmap_rxdma_channel( 326752ccf843Smisaki nxgep, channel, rbr_ring, rcr_ring, mailbox); 326844961713Sgirish 3269678453a8Sspeer nxge_free_rxb(nxgep, channel); 327044961713Sgirish 3271678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 327244961713Sgirish } 327344961713Sgirish 327444961713Sgirish nxge_status_t 327544961713Sgirish nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 327644961713Sgirish p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 327744961713Sgirish uint32_t num_chunks, 327844961713Sgirish p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 327944961713Sgirish p_rx_mbox_t *rx_mbox_p) 328044961713Sgirish { 328144961713Sgirish int status = NXGE_OK; 328244961713Sgirish 328344961713Sgirish /* 328444961713Sgirish * Set up and prepare buffer blocks, descriptors 328544961713Sgirish * and mailbox. 328644961713Sgirish */ 328744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 328852ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d)", channel)); 328944961713Sgirish /* 329044961713Sgirish * Receive buffer blocks 329144961713Sgirish */ 329244961713Sgirish status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 329352ccf843Smisaki dma_buf_p, rbr_p, num_chunks); 329444961713Sgirish if (status != NXGE_OK) { 329544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 329652ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d): " 329752ccf843Smisaki "map buffer failed 0x%x", channel, status)); 329844961713Sgirish goto nxge_map_rxdma_channel_exit; 329944961713Sgirish } 330044961713Sgirish 330144961713Sgirish /* 330244961713Sgirish * Receive block ring, completion ring and mailbox. 330344961713Sgirish */ 330444961713Sgirish status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 330552ccf843Smisaki dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 330644961713Sgirish if (status != NXGE_OK) { 330744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 330852ccf843Smisaki "==> nxge_map_rxdma_channel (channel %d): " 330952ccf843Smisaki "map config failed 0x%x", channel, status)); 331044961713Sgirish goto nxge_map_rxdma_channel_fail2; 331144961713Sgirish } 331244961713Sgirish 331344961713Sgirish goto nxge_map_rxdma_channel_exit; 331444961713Sgirish 331544961713Sgirish nxge_map_rxdma_channel_fail3: 331644961713Sgirish /* Free rbr, rcr */ 331744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 331852ccf843Smisaki "==> nxge_map_rxdma_channel: free rbr/rcr " 331952ccf843Smisaki "(status 0x%x channel %d)", 332052ccf843Smisaki status, channel)); 332144961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(nxgep, 332252ccf843Smisaki *rcr_p, *rx_mbox_p); 332344961713Sgirish 332444961713Sgirish nxge_map_rxdma_channel_fail2: 332544961713Sgirish /* Free buffer blocks */ 332644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 332752ccf843Smisaki "==> nxge_map_rxdma_channel: free rx buffers" 332852ccf843Smisaki "(nxgep 0x%x status 0x%x channel %d)", 332952ccf843Smisaki nxgep, status, channel)); 333044961713Sgirish nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 333144961713Sgirish 333256d930aeSspeer status = NXGE_ERROR; 333356d930aeSspeer 333444961713Sgirish nxge_map_rxdma_channel_exit: 333544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 333652ccf843Smisaki "<== nxge_map_rxdma_channel: " 333752ccf843Smisaki "(nxgep 0x%x status 0x%x channel %d)", 333852ccf843Smisaki nxgep, status, channel)); 333944961713Sgirish 334044961713Sgirish return (status); 334144961713Sgirish } 334244961713Sgirish 334344961713Sgirish /*ARGSUSED*/ 334444961713Sgirish static void 334544961713Sgirish nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 334644961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 334744961713Sgirish { 334844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 334952ccf843Smisaki "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 335044961713Sgirish 335144961713Sgirish /* 335244961713Sgirish * unmap receive block ring, completion ring and mailbox. 335344961713Sgirish */ 335444961713Sgirish (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 335552ccf843Smisaki rcr_p, rx_mbox_p); 335644961713Sgirish 335744961713Sgirish /* unmap buffer blocks */ 335844961713Sgirish (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 335944961713Sgirish 336044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 336144961713Sgirish } 336244961713Sgirish 336344961713Sgirish /*ARGSUSED*/ 336444961713Sgirish static nxge_status_t 336544961713Sgirish nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 336644961713Sgirish p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 336744961713Sgirish p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 336844961713Sgirish { 336944961713Sgirish p_rx_rbr_ring_t rbrp; 337044961713Sgirish p_rx_rcr_ring_t rcrp; 337144961713Sgirish p_rx_mbox_t mboxp; 337244961713Sgirish p_nxge_dma_common_t cntl_dmap; 337344961713Sgirish p_nxge_dma_common_t dmap; 337444961713Sgirish p_rx_msg_t *rx_msg_ring; 337544961713Sgirish p_rx_msg_t rx_msg_p; 337644961713Sgirish p_rbr_cfig_a_t rcfga_p; 337744961713Sgirish p_rbr_cfig_b_t rcfgb_p; 337844961713Sgirish p_rcrcfig_a_t cfga_p; 337944961713Sgirish p_rcrcfig_b_t cfgb_p; 338044961713Sgirish p_rxdma_cfig1_t cfig1_p; 338144961713Sgirish p_rxdma_cfig2_t cfig2_p; 338244961713Sgirish p_rbr_kick_t kick_p; 338344961713Sgirish uint32_t dmaaddrp; 338444961713Sgirish uint32_t *rbr_vaddrp; 338544961713Sgirish uint32_t bkaddr; 338644961713Sgirish nxge_status_t status = NXGE_OK; 338744961713Sgirish int i; 338844961713Sgirish uint32_t nxge_port_rcr_size; 338944961713Sgirish 339044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 339152ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring")); 339244961713Sgirish 339344961713Sgirish cntl_dmap = *dma_cntl_p; 339444961713Sgirish 339544961713Sgirish /* Map in the receive block ring */ 339644961713Sgirish rbrp = *rbr_p; 339744961713Sgirish dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 339844961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 339944961713Sgirish /* 340044961713Sgirish * Zero out buffer block ring descriptors. 340144961713Sgirish */ 340244961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 340344961713Sgirish 340444961713Sgirish rcfga_p = &(rbrp->rbr_cfga); 340544961713Sgirish rcfgb_p = &(rbrp->rbr_cfgb); 340644961713Sgirish kick_p = &(rbrp->rbr_kick); 340744961713Sgirish rcfga_p->value = 0; 340844961713Sgirish rcfgb_p->value = 0; 340944961713Sgirish kick_p->value = 0; 341044961713Sgirish rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 341144961713Sgirish rcfga_p->value = (rbrp->rbr_addr & 341252ccf843Smisaki (RBR_CFIG_A_STDADDR_MASK | 341352ccf843Smisaki RBR_CFIG_A_STDADDR_BASE_MASK)); 341444961713Sgirish rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 341544961713Sgirish 341644961713Sgirish rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 341744961713Sgirish rcfgb_p->bits.ldw.vld0 = 1; 341844961713Sgirish rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 341944961713Sgirish rcfgb_p->bits.ldw.vld1 = 1; 342044961713Sgirish rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 342144961713Sgirish rcfgb_p->bits.ldw.vld2 = 1; 342244961713Sgirish rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 342344961713Sgirish 342444961713Sgirish /* 342544961713Sgirish * For each buffer block, enter receive block address to the ring. 342644961713Sgirish */ 342744961713Sgirish rbr_vaddrp = (uint32_t *)dmap->kaddrp; 342844961713Sgirish rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 342944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 343052ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 343152ccf843Smisaki "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 343244961713Sgirish 343344961713Sgirish rx_msg_ring = rbrp->rx_msg_ring; 343444961713Sgirish for (i = 0; i < rbrp->tnblocks; i++) { 343544961713Sgirish rx_msg_p = rx_msg_ring[i]; 343644961713Sgirish rx_msg_p->nxgep = nxgep; 343744961713Sgirish rx_msg_p->rx_rbr_p = rbrp; 343844961713Sgirish bkaddr = (uint32_t) 343952ccf843Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 344052ccf843Smisaki >> RBR_BKADDR_SHIFT)); 344144961713Sgirish rx_msg_p->free = B_FALSE; 344244961713Sgirish rx_msg_p->max_usage_cnt = 0xbaddcafe; 344344961713Sgirish 344444961713Sgirish *rbr_vaddrp++ = bkaddr; 344544961713Sgirish } 344644961713Sgirish 344744961713Sgirish kick_p->bits.ldw.bkadd = rbrp->rbb_max; 344844961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 344944961713Sgirish 345044961713Sgirish rbrp->rbr_rd_index = 0; 345144961713Sgirish 345244961713Sgirish rbrp->rbr_consumed = 0; 345344961713Sgirish rbrp->rbr_use_bcopy = B_TRUE; 345444961713Sgirish rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 345544961713Sgirish /* 345644961713Sgirish * Do bcopy on packets greater than bcopy size once 345744961713Sgirish * the lo threshold is reached. 345844961713Sgirish * This lo threshold should be less than the hi threshold. 345944961713Sgirish * 346044961713Sgirish * Do bcopy on every packet once the hi threshold is reached. 346144961713Sgirish */ 346244961713Sgirish if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 346344961713Sgirish /* default it to use hi */ 346444961713Sgirish nxge_rx_threshold_lo = nxge_rx_threshold_hi; 346544961713Sgirish } 346644961713Sgirish 346744961713Sgirish if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 346844961713Sgirish nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 346944961713Sgirish } 347044961713Sgirish rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 347144961713Sgirish 347244961713Sgirish switch (nxge_rx_threshold_hi) { 347344961713Sgirish default: 347444961713Sgirish case NXGE_RX_COPY_NONE: 347544961713Sgirish /* Do not do bcopy at all */ 347644961713Sgirish rbrp->rbr_use_bcopy = B_FALSE; 347744961713Sgirish rbrp->rbr_threshold_hi = rbrp->rbb_max; 347844961713Sgirish break; 347944961713Sgirish 348044961713Sgirish case NXGE_RX_COPY_1: 348144961713Sgirish case NXGE_RX_COPY_2: 348244961713Sgirish case NXGE_RX_COPY_3: 348344961713Sgirish case NXGE_RX_COPY_4: 348444961713Sgirish case NXGE_RX_COPY_5: 348544961713Sgirish case NXGE_RX_COPY_6: 348644961713Sgirish case NXGE_RX_COPY_7: 348744961713Sgirish rbrp->rbr_threshold_hi = 348852ccf843Smisaki rbrp->rbb_max * 348952ccf843Smisaki (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 349044961713Sgirish break; 349144961713Sgirish 349244961713Sgirish case NXGE_RX_COPY_ALL: 349344961713Sgirish rbrp->rbr_threshold_hi = 0; 349444961713Sgirish break; 349544961713Sgirish } 349644961713Sgirish 349744961713Sgirish switch (nxge_rx_threshold_lo) { 349844961713Sgirish default: 349944961713Sgirish case NXGE_RX_COPY_NONE: 350044961713Sgirish /* Do not do bcopy at all */ 350144961713Sgirish if (rbrp->rbr_use_bcopy) { 350244961713Sgirish rbrp->rbr_use_bcopy = B_FALSE; 350344961713Sgirish } 350444961713Sgirish rbrp->rbr_threshold_lo = rbrp->rbb_max; 350544961713Sgirish break; 350644961713Sgirish 350744961713Sgirish case NXGE_RX_COPY_1: 350844961713Sgirish case NXGE_RX_COPY_2: 350944961713Sgirish case NXGE_RX_COPY_3: 351044961713Sgirish case NXGE_RX_COPY_4: 351144961713Sgirish case NXGE_RX_COPY_5: 351244961713Sgirish case NXGE_RX_COPY_6: 351344961713Sgirish case NXGE_RX_COPY_7: 351444961713Sgirish rbrp->rbr_threshold_lo = 351552ccf843Smisaki rbrp->rbb_max * 351652ccf843Smisaki (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 351744961713Sgirish break; 351844961713Sgirish 351944961713Sgirish case NXGE_RX_COPY_ALL: 352044961713Sgirish rbrp->rbr_threshold_lo = 0; 352144961713Sgirish break; 352244961713Sgirish } 352344961713Sgirish 352444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 352552ccf843Smisaki "nxge_map_rxdma_channel_cfg_ring: channel %d " 352652ccf843Smisaki "rbb_max %d " 352752ccf843Smisaki "rbrp->rbr_bufsize_type %d " 352852ccf843Smisaki "rbb_threshold_hi %d " 352952ccf843Smisaki "rbb_threshold_lo %d", 353052ccf843Smisaki dma_channel, 353152ccf843Smisaki rbrp->rbb_max, 353252ccf843Smisaki rbrp->rbr_bufsize_type, 353352ccf843Smisaki rbrp->rbr_threshold_hi, 353452ccf843Smisaki rbrp->rbr_threshold_lo)); 353544961713Sgirish 353644961713Sgirish rbrp->page_valid.value = 0; 353744961713Sgirish rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 353844961713Sgirish rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 353944961713Sgirish rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 354044961713Sgirish rbrp->page_hdl.value = 0; 354144961713Sgirish 354244961713Sgirish rbrp->page_valid.bits.ldw.page0 = 1; 354344961713Sgirish rbrp->page_valid.bits.ldw.page1 = 1; 354444961713Sgirish 354544961713Sgirish /* Map in the receive completion ring */ 354644961713Sgirish rcrp = (p_rx_rcr_ring_t) 354752ccf843Smisaki KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 354844961713Sgirish rcrp->rdc = dma_channel; 354944961713Sgirish 355044961713Sgirish nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 355144961713Sgirish rcrp->comp_size = nxge_port_rcr_size; 355244961713Sgirish rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 355344961713Sgirish 355444961713Sgirish rcrp->max_receive_pkts = nxge_max_rx_pkts; 355544961713Sgirish 355644961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 355744961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 355852ccf843Smisaki sizeof (rcr_entry_t)); 355944961713Sgirish rcrp->comp_rd_index = 0; 356044961713Sgirish rcrp->comp_wt_index = 0; 356144961713Sgirish rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 356252ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3563adfcba55Sjoycey #if defined(__i386) 356452ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 356552ccf843Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3566adfcba55Sjoycey #else 356752ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 356852ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3569adfcba55Sjoycey #endif 357044961713Sgirish 357144961713Sgirish rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 357252ccf843Smisaki (nxge_port_rcr_size - 1); 357344961713Sgirish rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 357452ccf843Smisaki (nxge_port_rcr_size - 1); 357544961713Sgirish 357644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 357752ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 357852ccf843Smisaki "channel %d " 357952ccf843Smisaki "rbr_vaddrp $%p " 358052ccf843Smisaki "rcr_desc_rd_head_p $%p " 358152ccf843Smisaki "rcr_desc_rd_head_pp $%p " 358252ccf843Smisaki "rcr_desc_rd_last_p $%p " 358352ccf843Smisaki "rcr_desc_rd_last_pp $%p ", 358452ccf843Smisaki dma_channel, 358552ccf843Smisaki rbr_vaddrp, 358652ccf843Smisaki rcrp->rcr_desc_rd_head_p, 358752ccf843Smisaki rcrp->rcr_desc_rd_head_pp, 358852ccf843Smisaki rcrp->rcr_desc_last_p, 358952ccf843Smisaki rcrp->rcr_desc_last_pp)); 359044961713Sgirish 359144961713Sgirish /* 359244961713Sgirish * Zero out buffer block ring descriptors. 359344961713Sgirish */ 359444961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 35957b26d9ffSSantwona Behera 35967b26d9ffSSantwona Behera rcrp->intr_timeout = (nxgep->intr_timeout < 35977b26d9ffSSantwona Behera NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 35987b26d9ffSSantwona Behera nxgep->intr_timeout; 35997b26d9ffSSantwona Behera 36007b26d9ffSSantwona Behera rcrp->intr_threshold = (nxgep->intr_threshold < 36017b26d9ffSSantwona Behera NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 36027b26d9ffSSantwona Behera nxgep->intr_threshold; 36037b26d9ffSSantwona Behera 360444961713Sgirish rcrp->full_hdr_flag = B_FALSE; 360544961713Sgirish rcrp->sw_priv_hdr_len = 0; 360644961713Sgirish 360744961713Sgirish cfga_p = &(rcrp->rcr_cfga); 360844961713Sgirish cfgb_p = &(rcrp->rcr_cfgb); 360944961713Sgirish cfga_p->value = 0; 361044961713Sgirish cfgb_p->value = 0; 361144961713Sgirish rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 361244961713Sgirish cfga_p->value = (rcrp->rcr_addr & 361352ccf843Smisaki (RCRCFIG_A_STADDR_MASK | 361452ccf843Smisaki RCRCFIG_A_STADDR_BASE_MASK)); 361544961713Sgirish 361644961713Sgirish rcfga_p->value |= ((uint64_t)rcrp->comp_size << 361752ccf843Smisaki RCRCFIG_A_LEN_SHIF); 361844961713Sgirish 361944961713Sgirish /* 362044961713Sgirish * Timeout should be set based on the system clock divider. 36217b26d9ffSSantwona Behera * A timeout value of 1 assumes that the 362244961713Sgirish * granularity (1000) is 3 microseconds running at 300MHz. 362344961713Sgirish */ 362414ea4bb7Ssd cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 362514ea4bb7Ssd cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 362644961713Sgirish cfgb_p->bits.ldw.entout = 1; 362744961713Sgirish 362844961713Sgirish /* Map in the mailbox */ 362944961713Sgirish mboxp = (p_rx_mbox_t) 363052ccf843Smisaki KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 363144961713Sgirish dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 363244961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 363344961713Sgirish cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 363444961713Sgirish cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 363544961713Sgirish cfig1_p->value = cfig2_p->value = 0; 363644961713Sgirish 363744961713Sgirish mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 363844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 363952ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 364052ccf843Smisaki "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 364152ccf843Smisaki dma_channel, cfig1_p->value, cfig2_p->value, 364252ccf843Smisaki mboxp->mbox_addr)); 364344961713Sgirish 364444961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 364552ccf843Smisaki & 0xfff); 364644961713Sgirish cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 364744961713Sgirish 364844961713Sgirish 364944961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 365044961713Sgirish dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 365152ccf843Smisaki RXDMA_CFIG2_MBADDR_L_MASK); 365244961713Sgirish 365344961713Sgirish cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 365444961713Sgirish 365544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 365652ccf843Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 365752ccf843Smisaki "channel %d damaddrp $%p " 365852ccf843Smisaki "cfg1 0x%016llx cfig2 0x%016llx", 365952ccf843Smisaki dma_channel, dmaaddrp, 366052ccf843Smisaki cfig1_p->value, cfig2_p->value)); 366144961713Sgirish 366244961713Sgirish cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 366344961713Sgirish cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 366444961713Sgirish 366544961713Sgirish rbrp->rx_rcr_p = rcrp; 366644961713Sgirish rcrp->rx_rbr_p = rbrp; 366744961713Sgirish *rcr_p = rcrp; 366844961713Sgirish *rx_mbox_p = mboxp; 366944961713Sgirish 367044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 367152ccf843Smisaki "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 367244961713Sgirish 367344961713Sgirish return (status); 367444961713Sgirish } 367544961713Sgirish 367644961713Sgirish /*ARGSUSED*/ 367744961713Sgirish static void 367844961713Sgirish nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 367944961713Sgirish p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 368044961713Sgirish { 368144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 368252ccf843Smisaki "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 368352ccf843Smisaki rcr_p->rdc)); 368444961713Sgirish 368544961713Sgirish KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 368644961713Sgirish KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 368744961713Sgirish 368844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 368952ccf843Smisaki "<== nxge_unmap_rxdma_channel_cfg_ring")); 369044961713Sgirish } 369144961713Sgirish 369244961713Sgirish static nxge_status_t 369344961713Sgirish nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 369444961713Sgirish p_nxge_dma_common_t *dma_buf_p, 369544961713Sgirish p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 369644961713Sgirish { 369744961713Sgirish p_rx_rbr_ring_t rbrp; 369844961713Sgirish p_nxge_dma_common_t dma_bufp, tmp_bufp; 369944961713Sgirish p_rx_msg_t *rx_msg_ring; 370044961713Sgirish p_rx_msg_t rx_msg_p; 370144961713Sgirish p_mblk_t mblk_p; 370244961713Sgirish 370344961713Sgirish rxring_info_t *ring_info; 370444961713Sgirish nxge_status_t status = NXGE_OK; 370544961713Sgirish int i, j, index; 370644961713Sgirish uint32_t size, bsize, nblocks, nmsgs; 370744961713Sgirish 370844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 370952ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d", 371052ccf843Smisaki channel)); 371144961713Sgirish 371244961713Sgirish dma_bufp = tmp_bufp = *dma_buf_p; 371344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 371452ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 371552ccf843Smisaki "chunks bufp 0x%016llx", 371652ccf843Smisaki channel, num_chunks, dma_bufp)); 371744961713Sgirish 371844961713Sgirish nmsgs = 0; 371944961713Sgirish for (i = 0; i < num_chunks; i++, tmp_bufp++) { 372044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 372152ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 372252ccf843Smisaki "bufp 0x%016llx nblocks %d nmsgs %d", 372352ccf843Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 372444961713Sgirish nmsgs += tmp_bufp->nblocks; 372544961713Sgirish } 372644961713Sgirish if (!nmsgs) { 372756d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 372852ccf843Smisaki "<== nxge_map_rxdma_channel_buf_ring: channel %d " 372952ccf843Smisaki "no msg blocks", 373052ccf843Smisaki channel)); 373144961713Sgirish status = NXGE_ERROR; 373244961713Sgirish goto nxge_map_rxdma_channel_buf_ring_exit; 373344961713Sgirish } 373444961713Sgirish 3735007969e0Stm rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 373644961713Sgirish 373744961713Sgirish size = nmsgs * sizeof (p_rx_msg_t); 373844961713Sgirish rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 373944961713Sgirish ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 374052ccf843Smisaki KM_SLEEP); 374144961713Sgirish 374244961713Sgirish MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 374352ccf843Smisaki (void *)nxgep->interrupt_cookie); 374444961713Sgirish MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 374552ccf843Smisaki (void *)nxgep->interrupt_cookie); 374644961713Sgirish rbrp->rdc = channel; 374744961713Sgirish rbrp->num_blocks = num_chunks; 374844961713Sgirish rbrp->tnblocks = nmsgs; 374944961713Sgirish rbrp->rbb_max = nmsgs; 375044961713Sgirish rbrp->rbr_max_size = nmsgs; 375144961713Sgirish rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 375244961713Sgirish 375344961713Sgirish /* 375444961713Sgirish * Buffer sizes suggested by NIU architect. 375544961713Sgirish * 256, 512 and 2K. 375644961713Sgirish */ 375744961713Sgirish 375844961713Sgirish rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 375944961713Sgirish rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 376044961713Sgirish rbrp->npi_pkt_buf_size0 = SIZE_256B; 376144961713Sgirish 376244961713Sgirish rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 376344961713Sgirish rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 376444961713Sgirish rbrp->npi_pkt_buf_size1 = SIZE_1KB; 376544961713Sgirish 376644961713Sgirish rbrp->block_size = nxgep->rx_default_block_size; 376744961713Sgirish 376848056c53SMichael Speer if (!nxgep->mac.is_jumbo) { 376944961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 377044961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 377144961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_2KB; 377244961713Sgirish } else { 377344961713Sgirish if (rbrp->block_size >= 0x2000) { 377444961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 377544961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 377644961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_8KB; 377744961713Sgirish } else { 377844961713Sgirish rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 377944961713Sgirish rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 378044961713Sgirish rbrp->npi_pkt_buf_size2 = SIZE_4KB; 378144961713Sgirish } 378244961713Sgirish } 378344961713Sgirish 378444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 378552ccf843Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 378652ccf843Smisaki "actual rbr max %d rbb_max %d nmsgs %d " 378752ccf843Smisaki "rbrp->block_size %d default_block_size %d " 378852ccf843Smisaki "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 378952ccf843Smisaki channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 379052ccf843Smisaki rbrp->block_size, nxgep->rx_default_block_size, 379152ccf843Smisaki nxge_rbr_size, nxge_rbr_spare_size)); 379244961713Sgirish 379344961713Sgirish /* Map in buffers from the buffer pool. */ 379444961713Sgirish index = 0; 379544961713Sgirish for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 379644961713Sgirish bsize = dma_bufp->block_size; 379744961713Sgirish nblocks = dma_bufp->nblocks; 3798adfcba55Sjoycey #if defined(__i386) 3799adfcba55Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3800adfcba55Sjoycey #else 380144961713Sgirish ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3802adfcba55Sjoycey #endif 380344961713Sgirish ring_info->buffer[i].buf_index = i; 380444961713Sgirish ring_info->buffer[i].buf_size = dma_bufp->alength; 380544961713Sgirish ring_info->buffer[i].start_index = index; 3806adfcba55Sjoycey #if defined(__i386) 3807adfcba55Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3808adfcba55Sjoycey #else 380944961713Sgirish ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3810adfcba55Sjoycey #endif 381144961713Sgirish 381244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 381352ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: map channel %d " 381452ccf843Smisaki "chunk %d" 381552ccf843Smisaki " nblocks %d chunk_size %x block_size 0x%x " 381652ccf843Smisaki "dma_bufp $%p", channel, i, 381752ccf843Smisaki dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 381852ccf843Smisaki dma_bufp)); 381944961713Sgirish 382044961713Sgirish for (j = 0; j < nblocks; j++) { 382144961713Sgirish if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 382252ccf843Smisaki dma_bufp)) == NULL) { 382356d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 382452ccf843Smisaki "allocb failed (index %d i %d j %d)", 382552ccf843Smisaki index, i, j)); 382656d930aeSspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 382744961713Sgirish } 382844961713Sgirish rx_msg_ring[index] = rx_msg_p; 382944961713Sgirish rx_msg_p->block_index = index; 383044961713Sgirish rx_msg_p->shifted_addr = (uint32_t) 383152ccf843Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 383252ccf843Smisaki RBR_BKADDR_SHIFT)); 383344961713Sgirish 383444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 383552ccf843Smisaki "index %d j %d rx_msg_p $%p mblk %p", 383652ccf843Smisaki index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 383744961713Sgirish 383844961713Sgirish mblk_p = rx_msg_p->rx_mblk_p; 383944961713Sgirish mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3840007969e0Stm 3841007969e0Stm rbrp->rbr_ref_cnt++; 384244961713Sgirish index++; 384344961713Sgirish rx_msg_p->buf_dma.dma_channel = channel; 384444961713Sgirish } 3845678453a8Sspeer 3846678453a8Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3847678453a8Sspeer if (dma_bufp->contig_alloc_type) { 3848678453a8Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3849678453a8Sspeer } 3850678453a8Sspeer 3851678453a8Sspeer if (dma_bufp->kmem_alloc_type) { 3852678453a8Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 3853678453a8Sspeer } 3854678453a8Sspeer 3855678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3856678453a8Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 3857678453a8Sspeer "chunk %d" 3858678453a8Sspeer " nblocks %d chunk_size %x block_size 0x%x " 3859678453a8Sspeer "dma_bufp $%p", 3860678453a8Sspeer channel, i, 3861678453a8Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3862678453a8Sspeer dma_bufp)); 386344961713Sgirish } 386444961713Sgirish if (i < rbrp->num_blocks) { 386544961713Sgirish goto nxge_map_rxdma_channel_buf_ring_fail1; 386644961713Sgirish } 386744961713Sgirish 386844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 386952ccf843Smisaki "nxge_map_rxdma_channel_buf_ring: done buf init " 387052ccf843Smisaki "channel %d msg block entries %d", 387152ccf843Smisaki channel, index)); 387244961713Sgirish ring_info->block_size_mask = bsize - 1; 387344961713Sgirish rbrp->rx_msg_ring = rx_msg_ring; 387444961713Sgirish rbrp->dma_bufp = dma_buf_p; 387544961713Sgirish rbrp->ring_info = ring_info; 387644961713Sgirish 387744961713Sgirish status = nxge_rxbuf_index_info_init(nxgep, rbrp); 387844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 387952ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: " 388052ccf843Smisaki "channel %d done buf info init", channel)); 388144961713Sgirish 3882007969e0Stm /* 3883007969e0Stm * Finally, permit nxge_freeb() to call nxge_post_page(). 3884007969e0Stm */ 3885007969e0Stm rbrp->rbr_state = RBR_POSTING; 3886007969e0Stm 388744961713Sgirish *rbr_p = rbrp; 388844961713Sgirish goto nxge_map_rxdma_channel_buf_ring_exit; 388944961713Sgirish 389044961713Sgirish nxge_map_rxdma_channel_buf_ring_fail1: 389144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 389252ccf843Smisaki " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 389352ccf843Smisaki channel, status)); 389444961713Sgirish 389544961713Sgirish index--; 389644961713Sgirish for (; index >= 0; index--) { 389744961713Sgirish rx_msg_p = rx_msg_ring[index]; 389844961713Sgirish if (rx_msg_p != NULL) { 389914ea4bb7Ssd freeb(rx_msg_p->rx_mblk_p); 390044961713Sgirish rx_msg_ring[index] = NULL; 390144961713Sgirish } 390244961713Sgirish } 390344961713Sgirish nxge_map_rxdma_channel_buf_ring_fail: 390444961713Sgirish MUTEX_DESTROY(&rbrp->post_lock); 390544961713Sgirish MUTEX_DESTROY(&rbrp->lock); 390644961713Sgirish KMEM_FREE(ring_info, sizeof (rxring_info_t)); 390744961713Sgirish KMEM_FREE(rx_msg_ring, size); 390844961713Sgirish KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 390944961713Sgirish 391056d930aeSspeer status = NXGE_ERROR; 391156d930aeSspeer 391244961713Sgirish nxge_map_rxdma_channel_buf_ring_exit: 391344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 391452ccf843Smisaki "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 391544961713Sgirish 391644961713Sgirish return (status); 391744961713Sgirish } 391844961713Sgirish 391944961713Sgirish /*ARGSUSED*/ 392044961713Sgirish static void 392144961713Sgirish nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 392244961713Sgirish p_rx_rbr_ring_t rbr_p) 392344961713Sgirish { 392444961713Sgirish p_rx_msg_t *rx_msg_ring; 392544961713Sgirish p_rx_msg_t rx_msg_p; 392644961713Sgirish rxring_info_t *ring_info; 392744961713Sgirish int i; 392844961713Sgirish uint32_t size; 392944961713Sgirish #ifdef NXGE_DEBUG 393044961713Sgirish int num_chunks; 393144961713Sgirish #endif 393244961713Sgirish 393344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 393452ccf843Smisaki "==> nxge_unmap_rxdma_channel_buf_ring")); 393544961713Sgirish if (rbr_p == NULL) { 393644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 393752ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 393844961713Sgirish return; 393944961713Sgirish } 394044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 394152ccf843Smisaki "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 394252ccf843Smisaki rbr_p->rdc)); 394344961713Sgirish 394444961713Sgirish rx_msg_ring = rbr_p->rx_msg_ring; 394544961713Sgirish ring_info = rbr_p->ring_info; 394644961713Sgirish 394744961713Sgirish if (rx_msg_ring == NULL || ring_info == NULL) { 394852ccf843Smisaki NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 394952ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: " 395052ccf843Smisaki "rx_msg_ring $%p ring_info $%p", 395152ccf843Smisaki rx_msg_p, ring_info)); 395244961713Sgirish return; 395344961713Sgirish } 395444961713Sgirish 395544961713Sgirish #ifdef NXGE_DEBUG 395644961713Sgirish num_chunks = rbr_p->num_blocks; 395744961713Sgirish #endif 395844961713Sgirish size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 395944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 396052ccf843Smisaki " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 396152ccf843Smisaki "tnblocks %d (max %d) size ptrs %d ", 396252ccf843Smisaki rbr_p->rdc, num_chunks, 396352ccf843Smisaki rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 396444961713Sgirish 396544961713Sgirish for (i = 0; i < rbr_p->tnblocks; i++) { 396644961713Sgirish rx_msg_p = rx_msg_ring[i]; 396744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 396852ccf843Smisaki " nxge_unmap_rxdma_channel_buf_ring: " 396952ccf843Smisaki "rx_msg_p $%p", 397052ccf843Smisaki rx_msg_p)); 397144961713Sgirish if (rx_msg_p != NULL) { 397214ea4bb7Ssd freeb(rx_msg_p->rx_mblk_p); 397344961713Sgirish rx_msg_ring[i] = NULL; 397444961713Sgirish } 397544961713Sgirish } 397644961713Sgirish 3977007969e0Stm /* 3978007969e0Stm * We no longer may use the mutex <post_lock>. By setting 3979007969e0Stm * <rbr_state> to anything but POSTING, we prevent 3980007969e0Stm * nxge_post_page() from accessing a dead mutex. 3981007969e0Stm */ 3982007969e0Stm rbr_p->rbr_state = RBR_UNMAPPING; 398344961713Sgirish MUTEX_DESTROY(&rbr_p->post_lock); 3984007969e0Stm 398544961713Sgirish MUTEX_DESTROY(&rbr_p->lock); 3986007969e0Stm 3987007969e0Stm if (rbr_p->rbr_ref_cnt == 0) { 3988678453a8Sspeer /* 3989678453a8Sspeer * This is the normal state of affairs. 3990678453a8Sspeer * Need to free the following buffers: 3991678453a8Sspeer * - data buffers 3992678453a8Sspeer * - rx_msg ring 3993678453a8Sspeer * - ring_info 3994678453a8Sspeer * - rbr ring 3995678453a8Sspeer */ 3996678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 3997678453a8Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 3998678453a8Sspeer nxge_rxdma_databuf_free(rbr_p); 3999678453a8Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4000678453a8Sspeer KMEM_FREE(rx_msg_ring, size); 4001007969e0Stm KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4002007969e0Stm } else { 4003007969e0Stm /* 4004007969e0Stm * Some of our buffers are still being used. 4005007969e0Stm * Therefore, tell nxge_freeb() this ring is 4006007969e0Stm * unmapped, so it may free <rbr_p> for us. 4007007969e0Stm */ 4008007969e0Stm rbr_p->rbr_state = RBR_UNMAPPED; 4009007969e0Stm NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4010007969e0Stm "unmap_rxdma_buf_ring: %d %s outstanding.", 4011007969e0Stm rbr_p->rbr_ref_cnt, 4012007969e0Stm rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4013007969e0Stm } 401444961713Sgirish 401544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 401652ccf843Smisaki "<== nxge_unmap_rxdma_channel_buf_ring")); 401744961713Sgirish } 401844961713Sgirish 4019678453a8Sspeer /* 4020678453a8Sspeer * nxge_rxdma_hw_start_common 4021678453a8Sspeer * 4022678453a8Sspeer * Arguments: 4023678453a8Sspeer * nxgep 4024678453a8Sspeer * 4025678453a8Sspeer * Notes: 4026678453a8Sspeer * 4027678453a8Sspeer * NPI/NXGE function calls: 4028678453a8Sspeer * nxge_init_fzc_rx_common(); 4029678453a8Sspeer * nxge_init_fzc_rxdma_port(); 4030678453a8Sspeer * 4031678453a8Sspeer * Registers accessed: 4032678453a8Sspeer * 4033678453a8Sspeer * Context: 4034678453a8Sspeer * Service domain 4035678453a8Sspeer */ 403644961713Sgirish static nxge_status_t 403744961713Sgirish nxge_rxdma_hw_start_common(p_nxge_t nxgep) 403844961713Sgirish { 403944961713Sgirish nxge_status_t status = NXGE_OK; 404044961713Sgirish 404144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 404244961713Sgirish 404344961713Sgirish /* 404444961713Sgirish * Load the sharable parameters by writing to the 404544961713Sgirish * function zero control registers. These FZC registers 404644961713Sgirish * should be initialized only once for the entire chip. 404744961713Sgirish */ 404844961713Sgirish (void) nxge_init_fzc_rx_common(nxgep); 404944961713Sgirish 405044961713Sgirish /* 405144961713Sgirish * Initialize the RXDMA port specific FZC control configurations. 405244961713Sgirish * These FZC registers are pertaining to each port. 405344961713Sgirish */ 405444961713Sgirish (void) nxge_init_fzc_rxdma_port(nxgep); 405544961713Sgirish 405644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 405744961713Sgirish 405844961713Sgirish return (status); 405944961713Sgirish } 406044961713Sgirish 406144961713Sgirish static nxge_status_t 4062678453a8Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 406344961713Sgirish { 406444961713Sgirish int i, ndmas; 406544961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 406644961713Sgirish p_rx_rbr_ring_t *rbr_rings; 406744961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 406844961713Sgirish p_rx_rcr_ring_t *rcr_rings; 406944961713Sgirish p_rx_mbox_areas_t rx_mbox_areas_p; 407044961713Sgirish p_rx_mbox_t *rx_mbox_p; 407144961713Sgirish nxge_status_t status = NXGE_OK; 407244961713Sgirish 407344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 407444961713Sgirish 407544961713Sgirish rx_rbr_rings = nxgep->rx_rbr_rings; 407644961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 407744961713Sgirish if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 407844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 407952ccf843Smisaki "<== nxge_rxdma_hw_start: NULL ring pointers")); 408044961713Sgirish return (NXGE_ERROR); 408144961713Sgirish } 408244961713Sgirish ndmas = rx_rbr_rings->ndmas; 408344961713Sgirish if (ndmas == 0) { 408444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 408552ccf843Smisaki "<== nxge_rxdma_hw_start: no dma channel allocated")); 408644961713Sgirish return (NXGE_ERROR); 408744961713Sgirish } 408844961713Sgirish 408944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 409052ccf843Smisaki "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 409144961713Sgirish 409244961713Sgirish rbr_rings = rx_rbr_rings->rbr_rings; 409344961713Sgirish rcr_rings = rx_rcr_rings->rcr_rings; 409444961713Sgirish rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 409544961713Sgirish if (rx_mbox_areas_p) { 409644961713Sgirish rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 409744961713Sgirish } 409844961713Sgirish 4099678453a8Sspeer i = channel; 4100678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 410152ccf843Smisaki "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 410252ccf843Smisaki ndmas, channel)); 4103678453a8Sspeer status = nxge_rxdma_start_channel(nxgep, channel, 4104678453a8Sspeer (p_rx_rbr_ring_t)rbr_rings[i], 4105678453a8Sspeer (p_rx_rcr_ring_t)rcr_rings[i], 4106678453a8Sspeer (p_rx_mbox_t)rx_mbox_p[i]); 4107678453a8Sspeer if (status != NXGE_OK) { 4108678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4109678453a8Sspeer "==> nxge_rxdma_hw_start: disable " 4110678453a8Sspeer "(status 0x%x channel %d)", status, channel)); 4111678453a8Sspeer return (status); 411244961713Sgirish } 411344961713Sgirish 411444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 411552ccf843Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 411652ccf843Smisaki rx_rbr_rings, rx_rcr_rings)); 411744961713Sgirish 411844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 411952ccf843Smisaki "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 412044961713Sgirish 412144961713Sgirish return (status); 412244961713Sgirish } 412344961713Sgirish 412444961713Sgirish static void 4125678453a8Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 412644961713Sgirish { 412744961713Sgirish p_rx_rbr_rings_t rx_rbr_rings; 412844961713Sgirish p_rx_rcr_rings_t rx_rcr_rings; 412944961713Sgirish 413044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 413144961713Sgirish 413244961713Sgirish rx_rbr_rings = nxgep->rx_rbr_rings; 413344961713Sgirish rx_rcr_rings = nxgep->rx_rcr_rings; 413444961713Sgirish if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 413544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 413652ccf843Smisaki "<== nxge_rxdma_hw_stop: NULL ring pointers")); 413744961713Sgirish return; 413844961713Sgirish } 413944961713Sgirish 414044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 414152ccf843Smisaki "==> nxge_rxdma_hw_stop(channel %d)", 414252ccf843Smisaki channel)); 4143678453a8Sspeer (void) nxge_rxdma_stop_channel(nxgep, channel); 414444961713Sgirish 414544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 414652ccf843Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 414752ccf843Smisaki rx_rbr_rings, rx_rcr_rings)); 414844961713Sgirish 414944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 415044961713Sgirish } 415144961713Sgirish 415244961713Sgirish 415344961713Sgirish static nxge_status_t 415444961713Sgirish nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 415544961713Sgirish p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 415644961713Sgirish 415744961713Sgirish { 415844961713Sgirish npi_handle_t handle; 415944961713Sgirish npi_status_t rs = NPI_SUCCESS; 416044961713Sgirish rx_dma_ctl_stat_t cs; 416144961713Sgirish rx_dma_ent_msk_t ent_mask; 416244961713Sgirish nxge_status_t status = NXGE_OK; 416344961713Sgirish 416444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 416544961713Sgirish 416644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 416744961713Sgirish 416844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 416944961713Sgirish "npi handle addr $%p acc $%p", 417044961713Sgirish nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 417144961713Sgirish 4172678453a8Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 4173678453a8Sspeer if (!isLDOMguest(nxgep)) { 4174678453a8Sspeer rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4175678453a8Sspeer if (rs != NPI_SUCCESS) { 4176678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4177678453a8Sspeer "==> nxge_init_fzc_rdc: " 4178678453a8Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4179678453a8Sspeer channel, rs)); 4180678453a8Sspeer return (NXGE_ERROR | rs); 4181678453a8Sspeer } 4182678453a8Sspeer 4183678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4184678453a8Sspeer "==> nxge_rxdma_start_channel: reset done: channel %d", 4185678453a8Sspeer channel)); 418644961713Sgirish } 418744961713Sgirish 4188678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4189678453a8Sspeer if (isLDOMguest(nxgep)) 4190678453a8Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 4191678453a8Sspeer #endif 419244961713Sgirish 419344961713Sgirish /* 419444961713Sgirish * Initialize the RXDMA channel specific FZC control 419544961713Sgirish * configurations. These FZC registers are pertaining 419644961713Sgirish * to each RX channel (logical pages). 419744961713Sgirish */ 4198678453a8Sspeer if (!isLDOMguest(nxgep)) { 4199678453a8Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4200678453a8Sspeer if (status != NXGE_OK) { 4201678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4202678453a8Sspeer "==> nxge_rxdma_start_channel: " 4203678453a8Sspeer "init fzc rxdma failed (0x%08x channel %d)", 4204678453a8Sspeer status, channel)); 4205678453a8Sspeer return (status); 4206678453a8Sspeer } 420744961713Sgirish 4208678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4209678453a8Sspeer "==> nxge_rxdma_start_channel: fzc done")); 4210678453a8Sspeer } 421144961713Sgirish 421244961713Sgirish /* Set up the interrupt event masks. */ 421344961713Sgirish ent_mask.value = 0; 421444961713Sgirish ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 421544961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4216678453a8Sspeer &ent_mask); 421744961713Sgirish if (rs != NPI_SUCCESS) { 421844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 421944961713Sgirish "==> nxge_rxdma_start_channel: " 4220678453a8Sspeer "init rxdma event masks failed " 4221678453a8Sspeer "(0x%08x channel %d)", 422244961713Sgirish status, channel)); 422344961713Sgirish return (NXGE_ERROR | rs); 422444961713Sgirish } 422544961713Sgirish 4226678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4227678453a8Sspeer "==> nxge_rxdma_start_channel: " 422844961713Sgirish "event done: channel %d (mask 0x%016llx)", 422944961713Sgirish channel, ent_mask.value)); 423044961713Sgirish 423144961713Sgirish /* Initialize the receive DMA control and status register */ 423244961713Sgirish cs.value = 0; 423344961713Sgirish cs.bits.hdw.mex = 1; 423444961713Sgirish cs.bits.hdw.rcrthres = 1; 423544961713Sgirish cs.bits.hdw.rcrto = 1; 423644961713Sgirish cs.bits.hdw.rbr_empty = 1; 423744961713Sgirish status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 423844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 423944961713Sgirish "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 424044961713Sgirish if (status != NXGE_OK) { 424144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 424244961713Sgirish "==> nxge_rxdma_start_channel: " 424344961713Sgirish "init rxdma control register failed (0x%08x channel %d", 424444961713Sgirish status, channel)); 424544961713Sgirish return (status); 424644961713Sgirish } 424744961713Sgirish 424844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 424944961713Sgirish "control done - channel %d cs 0x%016llx", channel, cs.value)); 425044961713Sgirish 425144961713Sgirish /* 425244961713Sgirish * Load RXDMA descriptors, buffers, mailbox, 425344961713Sgirish * initialise the receive DMA channels and 425444961713Sgirish * enable each DMA channel. 425544961713Sgirish */ 425644961713Sgirish status = nxge_enable_rxdma_channel(nxgep, 4257678453a8Sspeer channel, rbr_p, rcr_p, mbox_p); 425844961713Sgirish 425944961713Sgirish if (status != NXGE_OK) { 426044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4261678453a8Sspeer " nxge_rxdma_start_channel: " 4262678453a8Sspeer " enable rxdma failed (0x%08x channel %d)", 4263678453a8Sspeer status, channel)); 426444961713Sgirish return (status); 426544961713Sgirish } 426644961713Sgirish 4267678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4268678453a8Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 4269678453a8Sspeer 4270678453a8Sspeer if (isLDOMguest(nxgep)) { 4271678453a8Sspeer /* Add interrupt handler for this channel. */ 4272*ef523517SMichael Speer status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel); 4273*ef523517SMichael Speer if (status != NXGE_OK) { 4274678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4275678453a8Sspeer " nxge_rxdma_start_channel: " 4276678453a8Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 4277*ef523517SMichael Speer status, channel)); 4278*ef523517SMichael Speer return (status); 4279678453a8Sspeer } 4280678453a8Sspeer } 4281678453a8Sspeer 428244961713Sgirish ent_mask.value = 0; 428344961713Sgirish ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 428444961713Sgirish RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 428544961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 428644961713Sgirish &ent_mask); 428744961713Sgirish if (rs != NPI_SUCCESS) { 428844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 428944961713Sgirish "==> nxge_rxdma_start_channel: " 429044961713Sgirish "init rxdma event masks failed (0x%08x channel %d)", 429144961713Sgirish status, channel)); 429244961713Sgirish return (NXGE_ERROR | rs); 429344961713Sgirish } 429444961713Sgirish 429544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 429644961713Sgirish "control done - channel %d cs 0x%016llx", channel, cs.value)); 429744961713Sgirish 429844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 429944961713Sgirish 430044961713Sgirish return (NXGE_OK); 430144961713Sgirish } 430244961713Sgirish 430344961713Sgirish static nxge_status_t 430444961713Sgirish nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 430544961713Sgirish { 430644961713Sgirish npi_handle_t handle; 430744961713Sgirish npi_status_t rs = NPI_SUCCESS; 430844961713Sgirish rx_dma_ctl_stat_t cs; 430944961713Sgirish rx_dma_ent_msk_t ent_mask; 431044961713Sgirish nxge_status_t status = NXGE_OK; 431144961713Sgirish 431244961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 431344961713Sgirish 431444961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 431544961713Sgirish 431644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 431752ccf843Smisaki "npi handle addr $%p acc $%p", 431852ccf843Smisaki nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 431944961713Sgirish 4320330cd344SMichael Speer if (!isLDOMguest(nxgep)) { 4321330cd344SMichael Speer /* 4322330cd344SMichael Speer * Stop RxMAC = A.9.2.6 4323330cd344SMichael Speer */ 4324330cd344SMichael Speer if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4325330cd344SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4326330cd344SMichael Speer "nxge_rxdma_stop_channel: " 4327330cd344SMichael Speer "Failed to disable RxMAC")); 4328330cd344SMichael Speer } 4329330cd344SMichael Speer 4330330cd344SMichael Speer /* 4331330cd344SMichael Speer * Drain IPP Port = A.9.3.6 4332330cd344SMichael Speer */ 4333330cd344SMichael Speer (void) nxge_ipp_drain(nxgep); 4334330cd344SMichael Speer } 4335330cd344SMichael Speer 433644961713Sgirish /* Reset RXDMA channel */ 433744961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 433844961713Sgirish if (rs != NPI_SUCCESS) { 433944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 434052ccf843Smisaki " nxge_rxdma_stop_channel: " 434152ccf843Smisaki " reset rxdma failed (0x%08x channel %d)", 434252ccf843Smisaki rs, channel)); 434344961713Sgirish return (NXGE_ERROR | rs); 434444961713Sgirish } 434544961713Sgirish 434644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 434752ccf843Smisaki "==> nxge_rxdma_stop_channel: reset done")); 434844961713Sgirish 434944961713Sgirish /* Set up the interrupt event masks. */ 435044961713Sgirish ent_mask.value = RX_DMA_ENT_MSK_ALL; 435144961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, 435252ccf843Smisaki &ent_mask); 435344961713Sgirish if (rs != NPI_SUCCESS) { 435444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 435552ccf843Smisaki "==> nxge_rxdma_stop_channel: " 435652ccf843Smisaki "set rxdma event masks failed (0x%08x channel %d)", 435752ccf843Smisaki rs, channel)); 435844961713Sgirish return (NXGE_ERROR | rs); 435944961713Sgirish } 436044961713Sgirish 436144961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 436252ccf843Smisaki "==> nxge_rxdma_stop_channel: event done")); 436344961713Sgirish 4364330cd344SMichael Speer /* 4365330cd344SMichael Speer * Initialize the receive DMA control and status register 4366330cd344SMichael Speer */ 436744961713Sgirish cs.value = 0; 4368330cd344SMichael Speer status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 436944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 437052ccf843Smisaki " to default (all 0s) 0x%08x", cs.value)); 437144961713Sgirish if (status != NXGE_OK) { 437244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 437352ccf843Smisaki " nxge_rxdma_stop_channel: init rxdma" 437452ccf843Smisaki " control register failed (0x%08x channel %d", 437552ccf843Smisaki status, channel)); 437644961713Sgirish return (status); 437744961713Sgirish } 437844961713Sgirish 437944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, 438052ccf843Smisaki "==> nxge_rxdma_stop_channel: control done")); 438144961713Sgirish 4382330cd344SMichael Speer /* 4383330cd344SMichael Speer * Make sure channel is disabled. 4384330cd344SMichael Speer */ 438544961713Sgirish status = nxge_disable_rxdma_channel(nxgep, channel); 4386da14cebeSEric Cheng 438744961713Sgirish if (status != NXGE_OK) { 438844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 438952ccf843Smisaki " nxge_rxdma_stop_channel: " 439052ccf843Smisaki " init enable rxdma failed (0x%08x channel %d)", 439152ccf843Smisaki status, channel)); 439244961713Sgirish return (status); 439344961713Sgirish } 439444961713Sgirish 4395330cd344SMichael Speer if (!isLDOMguest(nxgep)) { 4396330cd344SMichael Speer /* 4397330cd344SMichael Speer * Enable RxMAC = A.9.2.10 4398330cd344SMichael Speer */ 4399330cd344SMichael Speer if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4400330cd344SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4401330cd344SMichael Speer "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4402330cd344SMichael Speer } 4403330cd344SMichael Speer } 4404330cd344SMichael Speer 440544961713Sgirish NXGE_DEBUG_MSG((nxgep, 440652ccf843Smisaki RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 440744961713Sgirish 440844961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 440944961713Sgirish 441044961713Sgirish return (NXGE_OK); 441144961713Sgirish } 441244961713Sgirish 441344961713Sgirish nxge_status_t 441444961713Sgirish nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 441544961713Sgirish { 441644961713Sgirish npi_handle_t handle; 441744961713Sgirish p_nxge_rdc_sys_stats_t statsp; 441844961713Sgirish rx_ctl_dat_fifo_stat_t stat; 441944961713Sgirish uint32_t zcp_err_status; 442044961713Sgirish uint32_t ipp_err_status; 442144961713Sgirish nxge_status_t status = NXGE_OK; 442244961713Sgirish npi_status_t rs = NPI_SUCCESS; 442344961713Sgirish boolean_t my_err = B_FALSE; 442444961713Sgirish 442544961713Sgirish handle = nxgep->npi_handle; 442644961713Sgirish statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 442744961713Sgirish 442844961713Sgirish rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 442944961713Sgirish 443044961713Sgirish if (rs != NPI_SUCCESS) 443144961713Sgirish return (NXGE_ERROR | rs); 443244961713Sgirish 443344961713Sgirish if (stat.bits.ldw.id_mismatch) { 443444961713Sgirish statsp->id_mismatch++; 443544961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 443652ccf843Smisaki NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 443744961713Sgirish /* Global fatal error encountered */ 443844961713Sgirish } 443944961713Sgirish 444044961713Sgirish if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 444144961713Sgirish switch (nxgep->mac.portnum) { 444244961713Sgirish case 0: 444344961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 444452ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 444544961713Sgirish my_err = B_TRUE; 444644961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 444744961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 444844961713Sgirish } 444944961713Sgirish break; 445044961713Sgirish case 1: 445144961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 445252ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 445344961713Sgirish my_err = B_TRUE; 445444961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 445544961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 445644961713Sgirish } 445744961713Sgirish break; 445844961713Sgirish case 2: 445944961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 446052ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 446144961713Sgirish my_err = B_TRUE; 446244961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 446344961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 446444961713Sgirish } 446544961713Sgirish break; 446644961713Sgirish case 3: 446744961713Sgirish if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 446852ccf843Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 446944961713Sgirish my_err = B_TRUE; 447044961713Sgirish zcp_err_status = stat.bits.ldw.zcp_eop_err; 447144961713Sgirish ipp_err_status = stat.bits.ldw.ipp_eop_err; 447244961713Sgirish } 447344961713Sgirish break; 447444961713Sgirish default: 447544961713Sgirish return (NXGE_ERROR); 447644961713Sgirish } 447744961713Sgirish } 447844961713Sgirish 447944961713Sgirish if (my_err) { 448044961713Sgirish status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 448152ccf843Smisaki zcp_err_status); 448244961713Sgirish if (status != NXGE_OK) 448344961713Sgirish return (status); 448444961713Sgirish } 448544961713Sgirish 448644961713Sgirish return (NXGE_OK); 448744961713Sgirish } 448844961713Sgirish 448944961713Sgirish static nxge_status_t 449044961713Sgirish nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 449144961713Sgirish uint32_t zcp_status) 449244961713Sgirish { 449344961713Sgirish boolean_t rxport_fatal = B_FALSE; 449444961713Sgirish p_nxge_rdc_sys_stats_t statsp; 449544961713Sgirish nxge_status_t status = NXGE_OK; 449644961713Sgirish uint8_t portn; 449744961713Sgirish 449844961713Sgirish portn = nxgep->mac.portnum; 449944961713Sgirish statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 450044961713Sgirish 450144961713Sgirish if (ipp_status & (0x1 << portn)) { 450244961713Sgirish statsp->ipp_eop_err++; 450344961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 450452ccf843Smisaki NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 450544961713Sgirish rxport_fatal = B_TRUE; 450644961713Sgirish } 450744961713Sgirish 450844961713Sgirish if (zcp_status & (0x1 << portn)) { 450944961713Sgirish statsp->zcp_eop_err++; 451044961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 451152ccf843Smisaki NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 451244961713Sgirish rxport_fatal = B_TRUE; 451344961713Sgirish } 451444961713Sgirish 451544961713Sgirish if (rxport_fatal) { 451644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 451752ccf843Smisaki " nxge_rxdma_handle_port_error: " 451852ccf843Smisaki " fatal error on Port #%d\n", 451952ccf843Smisaki portn)); 452044961713Sgirish status = nxge_rx_port_fatal_err_recover(nxgep); 452144961713Sgirish if (status == NXGE_OK) { 452244961713Sgirish FM_SERVICE_RESTORED(nxgep); 452344961713Sgirish } 452444961713Sgirish } 452544961713Sgirish 452644961713Sgirish return (status); 452744961713Sgirish } 452844961713Sgirish 452944961713Sgirish static nxge_status_t 453044961713Sgirish nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 453144961713Sgirish { 453244961713Sgirish npi_handle_t handle; 453344961713Sgirish npi_status_t rs = NPI_SUCCESS; 453444961713Sgirish nxge_status_t status = NXGE_OK; 453544961713Sgirish p_rx_rbr_ring_t rbrp; 453644961713Sgirish p_rx_rcr_ring_t rcrp; 453744961713Sgirish p_rx_mbox_t mboxp; 453844961713Sgirish rx_dma_ent_msk_t ent_mask; 453944961713Sgirish p_nxge_dma_common_t dmap; 454044961713Sgirish uint32_t ref_cnt; 454144961713Sgirish p_rx_msg_t rx_msg_p; 454244961713Sgirish int i; 454344961713Sgirish uint32_t nxge_port_rcr_size; 454444961713Sgirish 454544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 454644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 454752ccf843Smisaki "Recovering from RxDMAChannel#%d error...", channel)); 454844961713Sgirish 454944961713Sgirish /* 455044961713Sgirish * Stop the dma channel waits for the stop done. 455144961713Sgirish * If the stop done bit is not set, then create 455244961713Sgirish * an error. 455344961713Sgirish */ 455444961713Sgirish 455544961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 455644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 455744961713Sgirish 45583587e8e2SMichael Speer rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel]; 45593587e8e2SMichael Speer rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel]; 456044961713Sgirish 456144961713Sgirish MUTEX_ENTER(&rbrp->lock); 456244961713Sgirish MUTEX_ENTER(&rbrp->post_lock); 456344961713Sgirish 456444961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 456544961713Sgirish 456644961713Sgirish rs = npi_rxdma_cfg_rdc_disable(handle, channel); 456744961713Sgirish if (rs != NPI_SUCCESS) { 456844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 456952ccf843Smisaki "nxge_disable_rxdma_channel:failed")); 457044961713Sgirish goto fail; 457144961713Sgirish } 457244961713Sgirish 457344961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 457444961713Sgirish 457544961713Sgirish /* Disable interrupt */ 457644961713Sgirish ent_mask.value = RX_DMA_ENT_MSK_ALL; 457744961713Sgirish rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 457844961713Sgirish if (rs != NPI_SUCCESS) { 457944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 458052ccf843Smisaki "nxge_rxdma_stop_channel: " 458152ccf843Smisaki "set rxdma event masks failed (channel %d)", 458252ccf843Smisaki channel)); 458344961713Sgirish } 458444961713Sgirish 458544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 458644961713Sgirish 458744961713Sgirish /* Reset RXDMA channel */ 458844961713Sgirish rs = npi_rxdma_cfg_rdc_reset(handle, channel); 458944961713Sgirish if (rs != NPI_SUCCESS) { 459044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 459152ccf843Smisaki "nxge_rxdma_fatal_err_recover: " 459252ccf843Smisaki " reset rxdma failed (channel %d)", channel)); 459344961713Sgirish goto fail; 459444961713Sgirish } 459544961713Sgirish 459644961713Sgirish nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 459744961713Sgirish 45983587e8e2SMichael Speer mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 459944961713Sgirish 460044961713Sgirish rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 460144961713Sgirish rbrp->rbr_rd_index = 0; 460244961713Sgirish 460344961713Sgirish rcrp->comp_rd_index = 0; 460444961713Sgirish rcrp->comp_wt_index = 0; 460544961713Sgirish rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 460652ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4607adfcba55Sjoycey #if defined(__i386) 460852ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 460952ccf843Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4610adfcba55Sjoycey #else 461152ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 461252ccf843Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4613adfcba55Sjoycey #endif 461444961713Sgirish 461544961713Sgirish rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 461652ccf843Smisaki (nxge_port_rcr_size - 1); 461744961713Sgirish rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 461852ccf843Smisaki (nxge_port_rcr_size - 1); 461944961713Sgirish 462044961713Sgirish dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 462144961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 462244961713Sgirish 462344961713Sgirish cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 462444961713Sgirish 462544961713Sgirish for (i = 0; i < rbrp->rbr_max_size; i++) { 462644961713Sgirish rx_msg_p = rbrp->rx_msg_ring[i]; 462744961713Sgirish ref_cnt = rx_msg_p->ref_cnt; 462844961713Sgirish if (ref_cnt != 1) { 4629a3c5bd6dSspeer if (rx_msg_p->cur_usage_cnt != 463052ccf843Smisaki rx_msg_p->max_usage_cnt) { 463144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 463252ccf843Smisaki "buf[%d]: cur_usage_cnt = %d " 463352ccf843Smisaki "max_usage_cnt = %d\n", i, 463452ccf843Smisaki rx_msg_p->cur_usage_cnt, 463552ccf843Smisaki rx_msg_p->max_usage_cnt)); 4636a3c5bd6dSspeer } else { 4637a3c5bd6dSspeer /* Buffer can be re-posted */ 4638a3c5bd6dSspeer rx_msg_p->free = B_TRUE; 4639a3c5bd6dSspeer rx_msg_p->cur_usage_cnt = 0; 4640a3c5bd6dSspeer rx_msg_p->max_usage_cnt = 0xbaddcafe; 4641a3c5bd6dSspeer rx_msg_p->pkt_buf_size = 0; 4642a3c5bd6dSspeer } 464344961713Sgirish } 464444961713Sgirish } 464544961713Sgirish 464644961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 464744961713Sgirish 464844961713Sgirish status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 464944961713Sgirish if (status != NXGE_OK) { 465044961713Sgirish goto fail; 465144961713Sgirish } 465244961713Sgirish 465344961713Sgirish MUTEX_EXIT(&rbrp->post_lock); 465444961713Sgirish MUTEX_EXIT(&rbrp->lock); 465544961713Sgirish 465644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 465752ccf843Smisaki "Recovery Successful, RxDMAChannel#%d Restored", 465852ccf843Smisaki channel)); 465944961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 466044961713Sgirish return (NXGE_OK); 4661*ef523517SMichael Speer 466244961713Sgirish fail: 466344961713Sgirish MUTEX_EXIT(&rbrp->post_lock); 466444961713Sgirish MUTEX_EXIT(&rbrp->lock); 466544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 466644961713Sgirish return (NXGE_ERROR | rs); 466744961713Sgirish } 466844961713Sgirish 466944961713Sgirish nxge_status_t 467044961713Sgirish nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 467144961713Sgirish { 4672678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 4673678453a8Sspeer nxge_status_t status = NXGE_OK; 4674*ef523517SMichael Speer p_rx_rcr_ring_t rcrp; 4675678453a8Sspeer int rdc; 467644961713Sgirish 467744961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 467844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 467952ccf843Smisaki "Recovering from RxPort error...")); 4680678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 468144961713Sgirish 468244961713Sgirish if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 468344961713Sgirish goto fail; 468444961713Sgirish 468544961713Sgirish NXGE_DELAY(1000); 468644961713Sgirish 4687678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 468844961713Sgirish 4689678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4690678453a8Sspeer if ((1 << rdc) & set->owned.map) { 4691*ef523517SMichael Speer rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc]; 4692*ef523517SMichael Speer if (rcrp != NULL) { 4693*ef523517SMichael Speer MUTEX_ENTER(&rcrp->lock); 4694*ef523517SMichael Speer if (nxge_rxdma_fatal_err_recover(nxgep, 4695*ef523517SMichael Speer rdc) != NXGE_OK) { 4696*ef523517SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4697*ef523517SMichael Speer "Could not recover " 4698*ef523517SMichael Speer "channel %d", rdc)); 4699*ef523517SMichael Speer } 4700*ef523517SMichael Speer MUTEX_EXIT(&rcrp->lock); 4701678453a8Sspeer } 470244961713Sgirish } 470344961713Sgirish } 470444961713Sgirish 4705678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 470644961713Sgirish 470744961713Sgirish /* Reset IPP */ 470844961713Sgirish if (nxge_ipp_reset(nxgep) != NXGE_OK) { 470944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 471052ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 471152ccf843Smisaki "Failed to reset IPP")); 471244961713Sgirish goto fail; 471344961713Sgirish } 471444961713Sgirish 471544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 471644961713Sgirish 471744961713Sgirish /* Reset RxMAC */ 471844961713Sgirish if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 471944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 472052ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 472152ccf843Smisaki "Failed to reset RxMAC")); 472244961713Sgirish goto fail; 472344961713Sgirish } 472444961713Sgirish 472544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 472644961713Sgirish 472744961713Sgirish /* Re-Initialize IPP */ 472844961713Sgirish if (nxge_ipp_init(nxgep) != NXGE_OK) { 472944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 473052ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 473152ccf843Smisaki "Failed to init IPP")); 473244961713Sgirish goto fail; 473344961713Sgirish } 473444961713Sgirish 473544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 473644961713Sgirish 473744961713Sgirish /* Re-Initialize RxMAC */ 473844961713Sgirish if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 473944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 474052ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 474152ccf843Smisaki "Failed to reset RxMAC")); 474244961713Sgirish goto fail; 474344961713Sgirish } 474444961713Sgirish 474544961713Sgirish NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 474644961713Sgirish 474744961713Sgirish /* Re-enable RxMAC */ 474844961713Sgirish if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 474944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 475052ccf843Smisaki "nxge_rx_port_fatal_err_recover: " 475152ccf843Smisaki "Failed to enable RxMAC")); 475244961713Sgirish goto fail; 475344961713Sgirish } 475444961713Sgirish 475544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 475652ccf843Smisaki "Recovery Successful, RxPort Restored")); 475744961713Sgirish 475844961713Sgirish return (NXGE_OK); 475944961713Sgirish fail: 476044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 476144961713Sgirish return (status); 476244961713Sgirish } 476344961713Sgirish 476444961713Sgirish void 476544961713Sgirish nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 476644961713Sgirish { 476744961713Sgirish rx_dma_ctl_stat_t cs; 476844961713Sgirish rx_ctl_dat_fifo_stat_t cdfs; 476944961713Sgirish 477044961713Sgirish switch (err_id) { 477144961713Sgirish case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 477244961713Sgirish case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 477344961713Sgirish case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 477444961713Sgirish case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 477544961713Sgirish case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 477644961713Sgirish case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 477744961713Sgirish case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 477844961713Sgirish case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 477944961713Sgirish case NXGE_FM_EREPORT_RDMC_RCRINCON: 478044961713Sgirish case NXGE_FM_EREPORT_RDMC_RCRFULL: 478144961713Sgirish case NXGE_FM_EREPORT_RDMC_RBRFULL: 478244961713Sgirish case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 478344961713Sgirish case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 478444961713Sgirish case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 478544961713Sgirish RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 478652ccf843Smisaki chan, &cs.value); 478744961713Sgirish if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 478844961713Sgirish cs.bits.hdw.rcr_ack_err = 1; 478944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 479044961713Sgirish cs.bits.hdw.dc_fifo_err = 1; 479144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 479244961713Sgirish cs.bits.hdw.rcr_sha_par = 1; 479344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 479444961713Sgirish cs.bits.hdw.rbr_pre_par = 1; 479544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 479644961713Sgirish cs.bits.hdw.rbr_tmout = 1; 479744961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 479844961713Sgirish cs.bits.hdw.rsp_cnt_err = 1; 479944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 480044961713Sgirish cs.bits.hdw.byte_en_bus = 1; 480144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 480244961713Sgirish cs.bits.hdw.rsp_dat_err = 1; 480344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 480444961713Sgirish cs.bits.hdw.config_err = 1; 480544961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 480644961713Sgirish cs.bits.hdw.rcrincon = 1; 480744961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 480844961713Sgirish cs.bits.hdw.rcrfull = 1; 480944961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 481044961713Sgirish cs.bits.hdw.rbrfull = 1; 481144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 481244961713Sgirish cs.bits.hdw.rbrlogpage = 1; 481344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 481444961713Sgirish cs.bits.hdw.cfiglogpage = 1; 4815adfcba55Sjoycey #if defined(__i386) 4816adfcba55Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 481752ccf843Smisaki cs.value); 4818adfcba55Sjoycey #else 481944961713Sgirish cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 482052ccf843Smisaki cs.value); 4821adfcba55Sjoycey #endif 482244961713Sgirish RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 482352ccf843Smisaki chan, cs.value); 482444961713Sgirish break; 482544961713Sgirish case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 482644961713Sgirish case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 482744961713Sgirish case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 482844961713Sgirish cdfs.value = 0; 482944961713Sgirish if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 483044961713Sgirish cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 483144961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 483244961713Sgirish cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 483344961713Sgirish else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 483444961713Sgirish cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4835adfcba55Sjoycey #if defined(__i386) 4836adfcba55Sjoycey cmn_err(CE_NOTE, 483752ccf843Smisaki "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 483852ccf843Smisaki cdfs.value); 4839adfcba55Sjoycey #else 484044961713Sgirish cmn_err(CE_NOTE, 484152ccf843Smisaki "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 484252ccf843Smisaki cdfs.value); 4843adfcba55Sjoycey #endif 4844678453a8Sspeer NXGE_REG_WR64(nxgep->npi_handle, 4845678453a8Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 484644961713Sgirish break; 484744961713Sgirish case NXGE_FM_EREPORT_RDMC_DCF_ERR: 484844961713Sgirish break; 484953f3d8ecSyc case NXGE_FM_EREPORT_RDMC_RCR_ERR: 485044961713Sgirish break; 485144961713Sgirish } 485244961713Sgirish } 4853678453a8Sspeer 4854678453a8Sspeer static void 4855678453a8Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4856678453a8Sspeer { 4857678453a8Sspeer rxring_info_t *ring_info; 4858678453a8Sspeer int index; 4859678453a8Sspeer uint32_t chunk_size; 4860678453a8Sspeer uint64_t kaddr; 4861678453a8Sspeer uint_t num_blocks; 4862678453a8Sspeer 4863678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4864678453a8Sspeer 4865678453a8Sspeer if (rbr_p == NULL) { 4866678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4867678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4868678453a8Sspeer return; 4869678453a8Sspeer } 4870678453a8Sspeer 4871678453a8Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4872e759c33aSMichael Speer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4873e759c33aSMichael Speer "<== nxge_rxdma_databuf_free: DDI")); 4874678453a8Sspeer return; 4875678453a8Sspeer } 4876678453a8Sspeer 4877678453a8Sspeer ring_info = rbr_p->ring_info; 4878678453a8Sspeer if (ring_info == NULL) { 4879678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4880678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 4881678453a8Sspeer return; 4882678453a8Sspeer } 4883678453a8Sspeer num_blocks = rbr_p->num_blocks; 4884678453a8Sspeer for (index = 0; index < num_blocks; index++) { 4885678453a8Sspeer kaddr = ring_info->buffer[index].kaddr; 4886678453a8Sspeer chunk_size = ring_info->buffer[index].buf_size; 4887678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4888678453a8Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 4889678453a8Sspeer "kaddrp $%p chunk size %d", 4890678453a8Sspeer index, kaddr, chunk_size)); 4891678453a8Sspeer if (kaddr == NULL) continue; 4892678453a8Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4893678453a8Sspeer ring_info->buffer[index].kaddr = NULL; 4894678453a8Sspeer } 4895678453a8Sspeer 4896678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4897678453a8Sspeer } 4898678453a8Sspeer 4899678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4900678453a8Sspeer extern void contig_mem_free(void *, size_t); 4901678453a8Sspeer #endif 4902678453a8Sspeer 4903678453a8Sspeer void 4904678453a8Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4905678453a8Sspeer { 4906678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4907678453a8Sspeer 4908678453a8Sspeer if (kaddr == NULL || !buf_size) { 4909678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4910678453a8Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4911678453a8Sspeer kaddr, buf_size)); 4912678453a8Sspeer return; 4913678453a8Sspeer } 4914678453a8Sspeer 4915678453a8Sspeer switch (alloc_type) { 4916678453a8Sspeer case KMEM_ALLOC: 4917678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4918678453a8Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 4919678453a8Sspeer kaddr, buf_size)); 4920678453a8Sspeer #if defined(__i386) 4921678453a8Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4922678453a8Sspeer #else 4923678453a8Sspeer KMEM_FREE((void *)kaddr, buf_size); 4924678453a8Sspeer #endif 4925678453a8Sspeer break; 4926678453a8Sspeer 4927678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4928678453a8Sspeer case CONTIG_MEM_ALLOC: 4929678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4930678453a8Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4931678453a8Sspeer kaddr, buf_size)); 4932678453a8Sspeer contig_mem_free((void *)kaddr, buf_size); 4933678453a8Sspeer break; 4934678453a8Sspeer #endif 4935678453a8Sspeer 4936678453a8Sspeer default: 4937678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4938678453a8Sspeer "<== nxge_free_buf: unsupported alloc type %d", 4939678453a8Sspeer alloc_type)); 4940678453a8Sspeer return; 4941678453a8Sspeer } 4942678453a8Sspeer 4943678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 4944678453a8Sspeer } 4945