144961713Sgirish /* 244961713Sgirish * CDDL HEADER START 344961713Sgirish * 444961713Sgirish * The contents of this file are subject to the terms of the 544961713Sgirish * Common Development and Distribution License (the "License"). 644961713Sgirish * You may not use this file except in compliance with the License. 744961713Sgirish * 844961713Sgirish * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 944961713Sgirish * or http://www.opensolaris.org/os/licensing. 1044961713Sgirish * See the License for the specific language governing permissions 1144961713Sgirish * and limitations under the License. 1244961713Sgirish * 1344961713Sgirish * When distributing Covered Code, include this CDDL HEADER in each 1444961713Sgirish * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1544961713Sgirish * If applicable, add the following below this CDDL HEADER, with the 1644961713Sgirish * fields enclosed by brackets "[]" replaced with your own identifying 1744961713Sgirish * information: Portions Copyright [yyyy] [name of copyright owner] 1844961713Sgirish * 1944961713Sgirish * CDDL HEADER END 2044961713Sgirish */ 2144961713Sgirish /* 22678453a8Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 2344961713Sgirish * Use is subject to license terms. 2444961713Sgirish */ 2544961713Sgirish 2644961713Sgirish #pragma ident "%Z%%M% %I% %E% SMI" 2744961713Sgirish 2844961713Sgirish #include <sys/nxge/nxge_impl.h> 2944961713Sgirish #include <sys/nxge/nxge_txdma.h> 30678453a8Sspeer #include <sys/nxge/nxge_hio.h> 31678453a8Sspeer #include <npi_tx_rd64.h> 32678453a8Sspeer #include <npi_tx_wr64.h> 3344961713Sgirish #include <sys/llc1.h> 3444961713Sgirish 3544961713Sgirish uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 3644961713Sgirish uint32_t nxge_tx_minfree = 32; 3744961713Sgirish uint32_t nxge_tx_intr_thres = 0; 3844961713Sgirish uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 3944961713Sgirish uint32_t nxge_tx_tiny_pack = 1; 4044961713Sgirish uint32_t nxge_tx_use_bcopy = 1; 4144961713Sgirish 4244961713Sgirish extern uint32_t nxge_tx_ring_size; 4344961713Sgirish extern uint32_t nxge_bcopy_thresh; 4444961713Sgirish extern uint32_t nxge_dvma_thresh; 4544961713Sgirish extern uint32_t nxge_dma_stream_thresh; 4644961713Sgirish extern dma_method_t nxge_force_dma; 47*b4d05839Sml extern uint32_t nxge_cksum_offload; 4844961713Sgirish 4944961713Sgirish /* Device register access attributes for PIO. */ 5044961713Sgirish extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 5144961713Sgirish /* Device descriptor access attributes for DMA. */ 5244961713Sgirish extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 5344961713Sgirish /* Device buffer access attributes for DMA. */ 5444961713Sgirish extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 5544961713Sgirish extern ddi_dma_attr_t nxge_desc_dma_attr; 5644961713Sgirish extern ddi_dma_attr_t nxge_tx_dma_attr; 5744961713Sgirish 581f8914d5Sml extern int nxge_serial_tx(mblk_t *mp, void *arg); 591f8914d5Sml 60678453a8Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int); 6144961713Sgirish 62678453a8Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 6344961713Sgirish 6444961713Sgirish static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 6544961713Sgirish p_nxge_dma_common_t *, p_tx_ring_t *, 6644961713Sgirish uint32_t, p_nxge_dma_common_t *, 6744961713Sgirish p_tx_mbox_t *); 68678453a8Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 6944961713Sgirish 7044961713Sgirish static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 7144961713Sgirish p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 7244961713Sgirish static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 7344961713Sgirish 7444961713Sgirish static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 7544961713Sgirish p_nxge_dma_common_t *, p_tx_ring_t, 7644961713Sgirish p_tx_mbox_t *); 7744961713Sgirish static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 7844961713Sgirish p_tx_ring_t, p_tx_mbox_t); 7944961713Sgirish 8044961713Sgirish static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 8144961713Sgirish p_tx_ring_t, p_tx_mbox_t); 82678453a8Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 8344961713Sgirish 8444961713Sgirish static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 8544961713Sgirish static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 8644961713Sgirish p_nxge_ldv_t, tx_cs_t); 8744961713Sgirish static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 8844961713Sgirish static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 8944961713Sgirish uint16_t, p_tx_ring_t); 9044961713Sgirish 91678453a8Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 92678453a8Sspeer p_tx_ring_t ring_p, uint16_t channel); 93678453a8Sspeer 9444961713Sgirish nxge_status_t 9544961713Sgirish nxge_init_txdma_channels(p_nxge_t nxgep) 9644961713Sgirish { 97678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 98678453a8Sspeer int i, count; 99678453a8Sspeer 100678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 101678453a8Sspeer 102678453a8Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 103678453a8Sspeer if ((1 << i) & set->lg.map) { 104678453a8Sspeer int tdc; 105678453a8Sspeer nxge_grp_t *group = set->group[i]; 106678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 107678453a8Sspeer if ((1 << tdc) & group->map) { 108678453a8Sspeer if ((nxge_grp_dc_add(nxgep, 109678453a8Sspeer (vr_handle_t)group, 110678453a8Sspeer VP_BOUND_TX, tdc))) 111678453a8Sspeer return (NXGE_ERROR); 112678453a8Sspeer } 113678453a8Sspeer } 114678453a8Sspeer } 115678453a8Sspeer if (++count == set->lg.count) 116678453a8Sspeer break; 117678453a8Sspeer } 118678453a8Sspeer 119678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 120678453a8Sspeer 121678453a8Sspeer return (NXGE_OK); 122678453a8Sspeer } 123678453a8Sspeer 124678453a8Sspeer nxge_status_t 125678453a8Sspeer nxge_init_txdma_channel( 126678453a8Sspeer p_nxge_t nxge, 127678453a8Sspeer int channel) 128678453a8Sspeer { 129678453a8Sspeer nxge_status_t status; 13044961713Sgirish 131678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 13244961713Sgirish 133678453a8Sspeer status = nxge_map_txdma(nxge, channel); 13444961713Sgirish if (status != NXGE_OK) { 135678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 136678453a8Sspeer "<== nxge_init_txdma_channel: status 0x%x", status)); 137678453a8Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 13844961713Sgirish return (status); 13944961713Sgirish } 14044961713Sgirish 141678453a8Sspeer status = nxge_txdma_hw_start(nxge, channel); 14244961713Sgirish if (status != NXGE_OK) { 143678453a8Sspeer (void) nxge_unmap_txdma_channel(nxge, channel); 144678453a8Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 14544961713Sgirish return (status); 14644961713Sgirish } 14744961713Sgirish 148678453a8Sspeer if (!nxge->statsp->tdc_ksp[channel]) 149678453a8Sspeer nxge_setup_tdc_kstats(nxge, channel); 15044961713Sgirish 151678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 152678453a8Sspeer 153678453a8Sspeer return (status); 15444961713Sgirish } 15544961713Sgirish 15644961713Sgirish void 15744961713Sgirish nxge_uninit_txdma_channels(p_nxge_t nxgep) 15844961713Sgirish { 159678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 160678453a8Sspeer int tdc; 161678453a8Sspeer 162678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 163678453a8Sspeer 164678453a8Sspeer if (set->owned.map == 0) { 165678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 166678453a8Sspeer "nxge_uninit_txdma_channels: no channels")); 167678453a8Sspeer return; 168678453a8Sspeer } 169678453a8Sspeer 170678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 171678453a8Sspeer if ((1 << tdc) & set->owned.map) { 172678453a8Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 173678453a8Sspeer } 174678453a8Sspeer } 175678453a8Sspeer 176678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 177678453a8Sspeer } 178678453a8Sspeer 179678453a8Sspeer void 180678453a8Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 181678453a8Sspeer { 182678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 183678453a8Sspeer 184678453a8Sspeer if (nxgep->statsp->tdc_ksp[channel]) { 185678453a8Sspeer kstat_delete(nxgep->statsp->tdc_ksp[channel]); 186678453a8Sspeer nxgep->statsp->tdc_ksp[channel] = 0; 187678453a8Sspeer } 18844961713Sgirish 189678453a8Sspeer (void) nxge_txdma_stop_channel(nxgep, channel); 190678453a8Sspeer nxge_unmap_txdma_channel(nxgep, channel); 19144961713Sgirish 19244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 193678453a8Sspeer "<== nxge_uninit_txdma_channel")); 19444961713Sgirish } 19544961713Sgirish 19644961713Sgirish void 19744961713Sgirish nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 19844961713Sgirish uint32_t entries, uint32_t size) 19944961713Sgirish { 20044961713Sgirish size_t tsize; 20144961713Sgirish *dest_p = *src_p; 20244961713Sgirish tsize = size * entries; 20344961713Sgirish dest_p->alength = tsize; 20444961713Sgirish dest_p->nblocks = entries; 20544961713Sgirish dest_p->block_size = size; 20644961713Sgirish dest_p->offset += tsize; 20744961713Sgirish 20844961713Sgirish src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 20944961713Sgirish src_p->alength -= tsize; 21044961713Sgirish src_p->dma_cookie.dmac_laddress += tsize; 21144961713Sgirish src_p->dma_cookie.dmac_size -= tsize; 21244961713Sgirish } 21344961713Sgirish 214678453a8Sspeer /* 215678453a8Sspeer * nxge_reset_txdma_channel 216678453a8Sspeer * 217678453a8Sspeer * Reset a TDC. 218678453a8Sspeer * 219678453a8Sspeer * Arguments: 220678453a8Sspeer * nxgep 221678453a8Sspeer * channel The channel to reset. 222678453a8Sspeer * reg_data The current TX_CS. 223678453a8Sspeer * 224678453a8Sspeer * Notes: 225678453a8Sspeer * 226678453a8Sspeer * NPI/NXGE function calls: 227678453a8Sspeer * npi_txdma_channel_reset() 228678453a8Sspeer * npi_txdma_channel_control() 229678453a8Sspeer * 230678453a8Sspeer * Registers accessed: 231678453a8Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 232678453a8Sspeer * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 233678453a8Sspeer * 234678453a8Sspeer * Context: 235678453a8Sspeer * Any domain 236678453a8Sspeer */ 23744961713Sgirish nxge_status_t 23844961713Sgirish nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 23944961713Sgirish { 24044961713Sgirish npi_status_t rs = NPI_SUCCESS; 24144961713Sgirish nxge_status_t status = NXGE_OK; 24244961713Sgirish npi_handle_t handle; 24344961713Sgirish 24444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 24544961713Sgirish 24644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 24744961713Sgirish if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 24844961713Sgirish rs = npi_txdma_channel_reset(handle, channel); 24944961713Sgirish } else { 25044961713Sgirish rs = npi_txdma_channel_control(handle, TXDMA_RESET, 25144961713Sgirish channel); 25244961713Sgirish } 25344961713Sgirish 25444961713Sgirish if (rs != NPI_SUCCESS) { 25544961713Sgirish status = NXGE_ERROR | rs; 25644961713Sgirish } 25744961713Sgirish 25844961713Sgirish /* 25944961713Sgirish * Reset the tail (kick) register to 0. 26044961713Sgirish * (Hardware will not reset it. Tx overflow fatal 26144961713Sgirish * error if tail is not set to 0 after reset! 26244961713Sgirish */ 26344961713Sgirish TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 26444961713Sgirish 26544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 26644961713Sgirish return (status); 26744961713Sgirish } 26844961713Sgirish 269678453a8Sspeer /* 270678453a8Sspeer * nxge_init_txdma_channel_event_mask 271678453a8Sspeer * 272678453a8Sspeer * Enable interrupts for a set of events. 273678453a8Sspeer * 274678453a8Sspeer * Arguments: 275678453a8Sspeer * nxgep 276678453a8Sspeer * channel The channel to map. 277678453a8Sspeer * mask_p The events to enable. 278678453a8Sspeer * 279678453a8Sspeer * Notes: 280678453a8Sspeer * 281678453a8Sspeer * NPI/NXGE function calls: 282678453a8Sspeer * npi_txdma_event_mask() 283678453a8Sspeer * 284678453a8Sspeer * Registers accessed: 285678453a8Sspeer * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 286678453a8Sspeer * 287678453a8Sspeer * Context: 288678453a8Sspeer * Any domain 289678453a8Sspeer */ 29044961713Sgirish nxge_status_t 29144961713Sgirish nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 29244961713Sgirish p_tx_dma_ent_msk_t mask_p) 29344961713Sgirish { 29444961713Sgirish npi_handle_t handle; 29544961713Sgirish npi_status_t rs = NPI_SUCCESS; 29644961713Sgirish nxge_status_t status = NXGE_OK; 29744961713Sgirish 29844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 29944961713Sgirish "<== nxge_init_txdma_channel_event_mask")); 30044961713Sgirish 30144961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 30244961713Sgirish rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 30344961713Sgirish if (rs != NPI_SUCCESS) { 30444961713Sgirish status = NXGE_ERROR | rs; 30544961713Sgirish } 30644961713Sgirish 30744961713Sgirish return (status); 30844961713Sgirish } 30944961713Sgirish 310678453a8Sspeer /* 311678453a8Sspeer * nxge_init_txdma_channel_cntl_stat 312678453a8Sspeer * 313678453a8Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 314678453a8Sspeer * 315678453a8Sspeer * Arguments: 316678453a8Sspeer * nxgep 317678453a8Sspeer * channel The channel to stop. 318678453a8Sspeer * 319678453a8Sspeer * Notes: 320678453a8Sspeer * 321678453a8Sspeer * NPI/NXGE function calls: 322678453a8Sspeer * npi_txdma_control_status() 323678453a8Sspeer * 324678453a8Sspeer * Registers accessed: 325678453a8Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 326678453a8Sspeer * 327678453a8Sspeer * Context: 328678453a8Sspeer * Any domain 329678453a8Sspeer */ 33044961713Sgirish nxge_status_t 33144961713Sgirish nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 33244961713Sgirish uint64_t reg_data) 33344961713Sgirish { 33444961713Sgirish npi_handle_t handle; 33544961713Sgirish npi_status_t rs = NPI_SUCCESS; 33644961713Sgirish nxge_status_t status = NXGE_OK; 33744961713Sgirish 33844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 33944961713Sgirish "<== nxge_init_txdma_channel_cntl_stat")); 34044961713Sgirish 34144961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 34244961713Sgirish rs = npi_txdma_control_status(handle, OP_SET, channel, 34344961713Sgirish (p_tx_cs_t)®_data); 34444961713Sgirish 34544961713Sgirish if (rs != NPI_SUCCESS) { 34644961713Sgirish status = NXGE_ERROR | rs; 34744961713Sgirish } 34844961713Sgirish 34944961713Sgirish return (status); 35044961713Sgirish } 35144961713Sgirish 352678453a8Sspeer /* 353678453a8Sspeer * nxge_enable_txdma_channel 354678453a8Sspeer * 355678453a8Sspeer * Enable a TDC. 356678453a8Sspeer * 357678453a8Sspeer * Arguments: 358678453a8Sspeer * nxgep 359678453a8Sspeer * channel The channel to enable. 360678453a8Sspeer * tx_desc_p channel's transmit descriptor ring. 361678453a8Sspeer * mbox_p channel's mailbox, 362678453a8Sspeer * 363678453a8Sspeer * Notes: 364678453a8Sspeer * 365678453a8Sspeer * NPI/NXGE function calls: 366678453a8Sspeer * npi_txdma_ring_config() 367678453a8Sspeer * npi_txdma_mbox_config() 368678453a8Sspeer * npi_txdma_channel_init_enable() 369678453a8Sspeer * 370678453a8Sspeer * Registers accessed: 371678453a8Sspeer * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 372678453a8Sspeer * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 373678453a8Sspeer * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 374678453a8Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 375678453a8Sspeer * 376678453a8Sspeer * Context: 377678453a8Sspeer * Any domain 378678453a8Sspeer */ 37944961713Sgirish nxge_status_t 38044961713Sgirish nxge_enable_txdma_channel(p_nxge_t nxgep, 38144961713Sgirish uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 38244961713Sgirish { 38344961713Sgirish npi_handle_t handle; 38444961713Sgirish npi_status_t rs = NPI_SUCCESS; 38544961713Sgirish nxge_status_t status = NXGE_OK; 38644961713Sgirish 38744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 38844961713Sgirish 38944961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 39044961713Sgirish /* 39144961713Sgirish * Use configuration data composed at init time. 39244961713Sgirish * Write to hardware the transmit ring configurations. 39344961713Sgirish */ 39444961713Sgirish rs = npi_txdma_ring_config(handle, OP_SET, channel, 395678453a8Sspeer (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 39644961713Sgirish 39744961713Sgirish if (rs != NPI_SUCCESS) { 39844961713Sgirish return (NXGE_ERROR | rs); 39944961713Sgirish } 40044961713Sgirish 401678453a8Sspeer if (isLDOMguest(nxgep)) { 402678453a8Sspeer /* Add interrupt handler for this channel. */ 403678453a8Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 404678453a8Sspeer return (NXGE_ERROR); 405678453a8Sspeer } 406678453a8Sspeer 40744961713Sgirish /* Write to hardware the mailbox */ 40844961713Sgirish rs = npi_txdma_mbox_config(handle, OP_SET, channel, 40944961713Sgirish (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 41044961713Sgirish 41144961713Sgirish if (rs != NPI_SUCCESS) { 41244961713Sgirish return (NXGE_ERROR | rs); 41344961713Sgirish } 41444961713Sgirish 41544961713Sgirish /* Start the DMA engine. */ 41644961713Sgirish rs = npi_txdma_channel_init_enable(handle, channel); 41744961713Sgirish 41844961713Sgirish if (rs != NPI_SUCCESS) { 41944961713Sgirish return (NXGE_ERROR | rs); 42044961713Sgirish } 42144961713Sgirish 42244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 42344961713Sgirish 42444961713Sgirish return (status); 42544961713Sgirish } 42644961713Sgirish 42744961713Sgirish void 42844961713Sgirish nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 42944961713Sgirish boolean_t l4_cksum, int pkt_len, uint8_t npads, 430*b4d05839Sml p_tx_pkt_hdr_all_t pkthdrp, 431*b4d05839Sml t_uscalar_t start_offset, 432*b4d05839Sml t_uscalar_t stuff_offset) 43344961713Sgirish { 43444961713Sgirish p_tx_pkt_header_t hdrp; 43544961713Sgirish p_mblk_t nmp; 43644961713Sgirish uint64_t tmp; 43744961713Sgirish size_t mblk_len; 43844961713Sgirish size_t iph_len; 43944961713Sgirish size_t hdrs_size; 44044961713Sgirish uint8_t hdrs_buf[sizeof (struct ether_header) + 44144961713Sgirish 64 + sizeof (uint32_t)]; 442ae2d3f74Smisaki uint8_t *cursor; 44344961713Sgirish uint8_t *ip_buf; 44444961713Sgirish uint16_t eth_type; 44544961713Sgirish uint8_t ipproto; 44644961713Sgirish boolean_t is_vlan = B_FALSE; 44744961713Sgirish size_t eth_hdr_size; 44844961713Sgirish 44944961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 45044961713Sgirish 45144961713Sgirish /* 45244961713Sgirish * Caller should zero out the headers first. 45344961713Sgirish */ 45444961713Sgirish hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 45544961713Sgirish 45644961713Sgirish if (fill_len) { 45744961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 45844961713Sgirish "==> nxge_fill_tx_hdr: pkt_len %d " 45944961713Sgirish "npads %d", pkt_len, npads)); 46044961713Sgirish tmp = (uint64_t)pkt_len; 46144961713Sgirish hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 46244961713Sgirish goto fill_tx_header_done; 46344961713Sgirish } 46444961713Sgirish 465*b4d05839Sml hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 46644961713Sgirish 46744961713Sgirish /* 46844961713Sgirish * mp is the original data packet (does not include the 46944961713Sgirish * Neptune transmit header). 47044961713Sgirish */ 47144961713Sgirish nmp = mp; 47244961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 47344961713Sgirish "mp $%p b_rptr $%p len %d", 474ae2d3f74Smisaki mp, nmp->b_rptr, MBLKL(nmp))); 475ae2d3f74Smisaki /* copy ether_header from mblk to hdrs_buf */ 476ae2d3f74Smisaki cursor = &hdrs_buf[0]; 477ae2d3f74Smisaki tmp = sizeof (struct ether_vlan_header); 478ae2d3f74Smisaki while ((nmp != NULL) && (tmp > 0)) { 479ae2d3f74Smisaki size_t buflen; 480ae2d3f74Smisaki mblk_len = MBLKL(nmp); 4817c29db66Smisaki buflen = min((size_t)tmp, mblk_len); 482ae2d3f74Smisaki bcopy(nmp->b_rptr, cursor, buflen); 483ae2d3f74Smisaki cursor += buflen; 484ae2d3f74Smisaki tmp -= buflen; 485ae2d3f74Smisaki nmp = nmp->b_cont; 486ae2d3f74Smisaki } 487ae2d3f74Smisaki 488ae2d3f74Smisaki nmp = mp; 489ae2d3f74Smisaki mblk_len = MBLKL(nmp); 49044961713Sgirish ip_buf = NULL; 49144961713Sgirish eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 49244961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 49344961713Sgirish "ether type 0x%x", eth_type, hdrp->value)); 49444961713Sgirish 49544961713Sgirish if (eth_type < ETHERMTU) { 49644961713Sgirish tmp = 1ull; 49744961713Sgirish hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 49844961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 49944961713Sgirish "value 0x%llx", hdrp->value)); 50044961713Sgirish if (*(hdrs_buf + sizeof (struct ether_header)) 50144961713Sgirish == LLC_SNAP_SAP) { 50244961713Sgirish eth_type = ntohs(*((uint16_t *)(hdrs_buf + 50344961713Sgirish sizeof (struct ether_header) + 6))); 50444961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 50544961713Sgirish "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 50644961713Sgirish eth_type)); 50744961713Sgirish } else { 50844961713Sgirish goto fill_tx_header_done; 50944961713Sgirish } 51044961713Sgirish } else if (eth_type == VLAN_ETHERTYPE) { 51144961713Sgirish tmp = 1ull; 51244961713Sgirish hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 51344961713Sgirish 51444961713Sgirish eth_type = ntohs(((struct ether_vlan_header *) 51544961713Sgirish hdrs_buf)->ether_type); 51644961713Sgirish is_vlan = B_TRUE; 51744961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 51844961713Sgirish "value 0x%llx", hdrp->value)); 51944961713Sgirish } 52044961713Sgirish 52144961713Sgirish if (!is_vlan) { 52244961713Sgirish eth_hdr_size = sizeof (struct ether_header); 52344961713Sgirish } else { 52444961713Sgirish eth_hdr_size = sizeof (struct ether_vlan_header); 52544961713Sgirish } 52644961713Sgirish 52744961713Sgirish switch (eth_type) { 52844961713Sgirish case ETHERTYPE_IP: 52944961713Sgirish if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 53044961713Sgirish ip_buf = nmp->b_rptr + eth_hdr_size; 53144961713Sgirish mblk_len -= eth_hdr_size; 53244961713Sgirish iph_len = ((*ip_buf) & 0x0f); 53344961713Sgirish if (mblk_len > (iph_len + sizeof (uint32_t))) { 53444961713Sgirish ip_buf = nmp->b_rptr; 53544961713Sgirish ip_buf += eth_hdr_size; 53644961713Sgirish } else { 53744961713Sgirish ip_buf = NULL; 53844961713Sgirish } 53944961713Sgirish 54044961713Sgirish } 54144961713Sgirish if (ip_buf == NULL) { 54244961713Sgirish hdrs_size = 0; 54344961713Sgirish ((p_ether_header_t)hdrs_buf)->ether_type = 0; 54444961713Sgirish while ((nmp) && (hdrs_size < 54544961713Sgirish sizeof (hdrs_buf))) { 54644961713Sgirish mblk_len = (size_t)nmp->b_wptr - 54744961713Sgirish (size_t)nmp->b_rptr; 54844961713Sgirish if (mblk_len >= 54944961713Sgirish (sizeof (hdrs_buf) - hdrs_size)) 55044961713Sgirish mblk_len = sizeof (hdrs_buf) - 55144961713Sgirish hdrs_size; 55244961713Sgirish bcopy(nmp->b_rptr, 55344961713Sgirish &hdrs_buf[hdrs_size], mblk_len); 55444961713Sgirish hdrs_size += mblk_len; 55544961713Sgirish nmp = nmp->b_cont; 55644961713Sgirish } 55744961713Sgirish ip_buf = hdrs_buf; 55844961713Sgirish ip_buf += eth_hdr_size; 55944961713Sgirish iph_len = ((*ip_buf) & 0x0f); 56044961713Sgirish } 56144961713Sgirish 56244961713Sgirish ipproto = ip_buf[9]; 56344961713Sgirish 56444961713Sgirish tmp = (uint64_t)iph_len; 56544961713Sgirish hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 56644961713Sgirish tmp = (uint64_t)(eth_hdr_size >> 1); 56744961713Sgirish hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 56844961713Sgirish 56944961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 57044961713Sgirish " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 57144961713Sgirish "tmp 0x%x", 57244961713Sgirish iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 57344961713Sgirish ipproto, tmp)); 57444961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 57544961713Sgirish "value 0x%llx", hdrp->value)); 57644961713Sgirish 57744961713Sgirish break; 57844961713Sgirish 57944961713Sgirish case ETHERTYPE_IPV6: 58044961713Sgirish hdrs_size = 0; 58144961713Sgirish ((p_ether_header_t)hdrs_buf)->ether_type = 0; 58244961713Sgirish while ((nmp) && (hdrs_size < 58344961713Sgirish sizeof (hdrs_buf))) { 58444961713Sgirish mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 58544961713Sgirish if (mblk_len >= 58644961713Sgirish (sizeof (hdrs_buf) - hdrs_size)) 58744961713Sgirish mblk_len = sizeof (hdrs_buf) - 58844961713Sgirish hdrs_size; 58944961713Sgirish bcopy(nmp->b_rptr, 59044961713Sgirish &hdrs_buf[hdrs_size], mblk_len); 59144961713Sgirish hdrs_size += mblk_len; 59244961713Sgirish nmp = nmp->b_cont; 59344961713Sgirish } 59444961713Sgirish ip_buf = hdrs_buf; 59544961713Sgirish ip_buf += eth_hdr_size; 59644961713Sgirish 59744961713Sgirish tmp = 1ull; 59844961713Sgirish hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 59944961713Sgirish 60044961713Sgirish tmp = (eth_hdr_size >> 1); 60144961713Sgirish hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 60244961713Sgirish 60344961713Sgirish /* byte 6 is the next header protocol */ 60444961713Sgirish ipproto = ip_buf[6]; 60544961713Sgirish 60644961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 60744961713Sgirish " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 60844961713Sgirish iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 60944961713Sgirish ipproto)); 61044961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 61144961713Sgirish "value 0x%llx", hdrp->value)); 61244961713Sgirish 61344961713Sgirish break; 61444961713Sgirish 61544961713Sgirish default: 61644961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 61744961713Sgirish goto fill_tx_header_done; 61844961713Sgirish } 61944961713Sgirish 62044961713Sgirish switch (ipproto) { 62144961713Sgirish case IPPROTO_TCP: 62244961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 623*b4d05839Sml "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 62444961713Sgirish if (l4_cksum) { 625*b4d05839Sml hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 626*b4d05839Sml hdrp->value |= 627*b4d05839Sml (((uint64_t)(start_offset >> 1)) << 628*b4d05839Sml TX_PKT_HEADER_L4START_SHIFT); 629*b4d05839Sml hdrp->value |= 630*b4d05839Sml (((uint64_t)(stuff_offset >> 1)) << 631*b4d05839Sml TX_PKT_HEADER_L4STUFF_SHIFT); 632*b4d05839Sml 63344961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 634*b4d05839Sml "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 635*b4d05839Sml "value 0x%llx", hdrp->value)); 63644961713Sgirish } 63744961713Sgirish 63844961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 639*b4d05839Sml "value 0x%llx", hdrp->value)); 64044961713Sgirish break; 64144961713Sgirish 64244961713Sgirish case IPPROTO_UDP: 64344961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 64444961713Sgirish if (l4_cksum) { 645*b4d05839Sml if (!nxge_cksum_offload) { 646*b4d05839Sml uint16_t *up; 647*b4d05839Sml uint16_t cksum; 648*b4d05839Sml t_uscalar_t stuff_len; 649*b4d05839Sml 650*b4d05839Sml /* 651*b4d05839Sml * The checksum field has the 652*b4d05839Sml * partial checksum. 653*b4d05839Sml * IP_CSUM() macro calls ip_cksum() which 654*b4d05839Sml * can add in the partial checksum. 655*b4d05839Sml */ 656*b4d05839Sml cksum = IP_CSUM(mp, start_offset, 0); 657*b4d05839Sml stuff_len = stuff_offset; 658*b4d05839Sml nmp = mp; 659*b4d05839Sml mblk_len = MBLKL(nmp); 660*b4d05839Sml while ((nmp != NULL) && 661*b4d05839Sml (mblk_len < stuff_len)) { 662*b4d05839Sml stuff_len -= mblk_len; 663*b4d05839Sml nmp = nmp->b_cont; 664*b4d05839Sml } 665*b4d05839Sml ASSERT(nmp); 666*b4d05839Sml up = (uint16_t *)(nmp->b_rptr + stuff_len); 667*b4d05839Sml 668*b4d05839Sml *up = cksum; 669*b4d05839Sml hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 670*b4d05839Sml NXGE_DEBUG_MSG((NULL, TX_CTL, 671*b4d05839Sml "==> nxge_tx_pkt_hdr_init: UDP offset %d " 672*b4d05839Sml "use sw cksum " 673*b4d05839Sml "write to $%p cksum 0x%x content up 0x%x", 674*b4d05839Sml stuff_len, 675*b4d05839Sml up, 676*b4d05839Sml cksum, 677*b4d05839Sml *up)); 678*b4d05839Sml } else { 679*b4d05839Sml /* Hardware will compute the full checksum */ 680*b4d05839Sml hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 681*b4d05839Sml hdrp->value |= 682*b4d05839Sml (((uint64_t)(start_offset >> 1)) << 683*b4d05839Sml TX_PKT_HEADER_L4START_SHIFT); 684*b4d05839Sml hdrp->value |= 685*b4d05839Sml (((uint64_t)(stuff_offset >> 1)) << 686*b4d05839Sml TX_PKT_HEADER_L4STUFF_SHIFT); 687*b4d05839Sml 688*b4d05839Sml NXGE_DEBUG_MSG((NULL, TX_CTL, 689*b4d05839Sml "==> nxge_tx_pkt_hdr_init: UDP offset %d " 690*b4d05839Sml " use partial checksum " 691*b4d05839Sml "cksum 0x%x ", 692*b4d05839Sml "value 0x%llx", 693*b4d05839Sml stuff_offset, 694*b4d05839Sml IP_CSUM(mp, start_offset, 0), 695*b4d05839Sml hdrp->value)); 696*b4d05839Sml } 69744961713Sgirish } 698*b4d05839Sml 69944961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 70044961713Sgirish "==> nxge_tx_pkt_hdr_init: UDP" 70144961713Sgirish "value 0x%llx", hdrp->value)); 70244961713Sgirish break; 70344961713Sgirish 70444961713Sgirish default: 70544961713Sgirish goto fill_tx_header_done; 70644961713Sgirish } 70744961713Sgirish 70844961713Sgirish fill_tx_header_done: 70944961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 71044961713Sgirish "==> nxge_fill_tx_hdr: pkt_len %d " 71144961713Sgirish "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 71244961713Sgirish 71344961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 71444961713Sgirish } 71544961713Sgirish 71644961713Sgirish /*ARGSUSED*/ 71744961713Sgirish p_mblk_t 71844961713Sgirish nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 71944961713Sgirish { 72044961713Sgirish p_mblk_t newmp = NULL; 72144961713Sgirish 72244961713Sgirish if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 72344961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 72444961713Sgirish "<== nxge_tx_pkt_header_reserve: allocb failed")); 72544961713Sgirish return (NULL); 72644961713Sgirish } 72744961713Sgirish 72844961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 72944961713Sgirish "==> nxge_tx_pkt_header_reserve: get new mp")); 73044961713Sgirish DB_TYPE(newmp) = M_DATA; 73144961713Sgirish newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 73244961713Sgirish linkb(newmp, mp); 73344961713Sgirish newmp->b_rptr -= TX_PKT_HEADER_SIZE; 73444961713Sgirish 73544961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 73644961713Sgirish "b_rptr $%p b_wptr $%p", 73744961713Sgirish newmp->b_rptr, newmp->b_wptr)); 73844961713Sgirish 73944961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 74044961713Sgirish "<== nxge_tx_pkt_header_reserve: use new mp")); 74144961713Sgirish 74244961713Sgirish return (newmp); 74344961713Sgirish } 74444961713Sgirish 74544961713Sgirish int 74644961713Sgirish nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 74744961713Sgirish { 74844961713Sgirish uint_t nmblks; 74944961713Sgirish ssize_t len; 75044961713Sgirish uint_t pkt_len; 75144961713Sgirish p_mblk_t nmp, bmp, tmp; 75244961713Sgirish uint8_t *b_wptr; 75344961713Sgirish 75444961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 75544961713Sgirish "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 75644961713Sgirish "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 75744961713Sgirish 75844961713Sgirish nmp = mp; 75944961713Sgirish bmp = mp; 76044961713Sgirish nmblks = 0; 76144961713Sgirish pkt_len = 0; 76244961713Sgirish *tot_xfer_len_p = 0; 76344961713Sgirish 76444961713Sgirish while (nmp) { 76544961713Sgirish len = MBLKL(nmp); 76644961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 76744961713Sgirish "len %d pkt_len %d nmblks %d tot_xfer_len %d", 76844961713Sgirish len, pkt_len, nmblks, 76944961713Sgirish *tot_xfer_len_p)); 77044961713Sgirish 77144961713Sgirish if (len <= 0) { 77244961713Sgirish bmp = nmp; 77344961713Sgirish nmp = nmp->b_cont; 77444961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 77544961713Sgirish "==> nxge_tx_pkt_nmblocks: " 77644961713Sgirish "len (0) pkt_len %d nmblks %d", 77744961713Sgirish pkt_len, nmblks)); 77844961713Sgirish continue; 77944961713Sgirish } 78044961713Sgirish 78144961713Sgirish *tot_xfer_len_p += len; 78244961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 78344961713Sgirish "len %d pkt_len %d nmblks %d tot_xfer_len %d", 78444961713Sgirish len, pkt_len, nmblks, 78544961713Sgirish *tot_xfer_len_p)); 78644961713Sgirish 78744961713Sgirish if (len < nxge_bcopy_thresh) { 78844961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 78944961713Sgirish "==> nxge_tx_pkt_nmblocks: " 79044961713Sgirish "len %d (< thresh) pkt_len %d nmblks %d", 79144961713Sgirish len, pkt_len, nmblks)); 79244961713Sgirish if (pkt_len == 0) 79344961713Sgirish nmblks++; 79444961713Sgirish pkt_len += len; 79544961713Sgirish if (pkt_len >= nxge_bcopy_thresh) { 79644961713Sgirish pkt_len = 0; 79744961713Sgirish len = 0; 79844961713Sgirish nmp = bmp; 79944961713Sgirish } 80044961713Sgirish } else { 80144961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 80244961713Sgirish "==> nxge_tx_pkt_nmblocks: " 80344961713Sgirish "len %d (> thresh) pkt_len %d nmblks %d", 80444961713Sgirish len, pkt_len, nmblks)); 80544961713Sgirish pkt_len = 0; 80644961713Sgirish nmblks++; 80744961713Sgirish /* 80844961713Sgirish * Hardware limits the transfer length to 4K. 80944961713Sgirish * If len is more than 4K, we need to break 81044961713Sgirish * it up to at most 2 more blocks. 81144961713Sgirish */ 81244961713Sgirish if (len > TX_MAX_TRANSFER_LENGTH) { 81344961713Sgirish uint32_t nsegs; 81444961713Sgirish 815678453a8Sspeer nsegs = 1; 81644961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 81744961713Sgirish "==> nxge_tx_pkt_nmblocks: " 81844961713Sgirish "len %d pkt_len %d nmblks %d nsegs %d", 81944961713Sgirish len, pkt_len, nmblks, nsegs)); 82044961713Sgirish if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 82144961713Sgirish ++nsegs; 82244961713Sgirish } 82344961713Sgirish do { 82444961713Sgirish b_wptr = nmp->b_rptr + 82544961713Sgirish TX_MAX_TRANSFER_LENGTH; 82644961713Sgirish nmp->b_wptr = b_wptr; 82744961713Sgirish if ((tmp = dupb(nmp)) == NULL) { 82844961713Sgirish return (0); 82944961713Sgirish } 83044961713Sgirish tmp->b_rptr = b_wptr; 83144961713Sgirish tmp->b_wptr = nmp->b_wptr; 83244961713Sgirish tmp->b_cont = nmp->b_cont; 83344961713Sgirish nmp->b_cont = tmp; 83444961713Sgirish nmblks++; 83544961713Sgirish if (--nsegs) { 83644961713Sgirish nmp = tmp; 83744961713Sgirish } 83844961713Sgirish } while (nsegs); 83944961713Sgirish nmp = tmp; 84044961713Sgirish } 84144961713Sgirish } 84244961713Sgirish 84344961713Sgirish /* 84444961713Sgirish * Hardware limits the transmit gather pointers to 15. 84544961713Sgirish */ 84644961713Sgirish if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 84744961713Sgirish TX_MAX_GATHER_POINTERS) { 84844961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 84944961713Sgirish "==> nxge_tx_pkt_nmblocks: pull msg - " 85044961713Sgirish "len %d pkt_len %d nmblks %d", 85144961713Sgirish len, pkt_len, nmblks)); 85244961713Sgirish /* Pull all message blocks from b_cont */ 85344961713Sgirish if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 85444961713Sgirish return (0); 85544961713Sgirish } 85644961713Sgirish freemsg(nmp->b_cont); 85744961713Sgirish nmp->b_cont = tmp; 85844961713Sgirish pkt_len = 0; 85944961713Sgirish } 86044961713Sgirish bmp = nmp; 86144961713Sgirish nmp = nmp->b_cont; 86244961713Sgirish } 86344961713Sgirish 86444961713Sgirish NXGE_DEBUG_MSG((NULL, TX_CTL, 86544961713Sgirish "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 86644961713Sgirish "nmblks %d len %d tot_xfer_len %d", 86744961713Sgirish mp->b_rptr, mp->b_wptr, nmblks, 86844961713Sgirish MBLKL(mp), *tot_xfer_len_p)); 86944961713Sgirish 87044961713Sgirish return (nmblks); 87144961713Sgirish } 87244961713Sgirish 87344961713Sgirish boolean_t 87444961713Sgirish nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 87544961713Sgirish { 87644961713Sgirish boolean_t status = B_TRUE; 87744961713Sgirish p_nxge_dma_common_t tx_desc_dma_p; 87844961713Sgirish nxge_dma_common_t desc_area; 87944961713Sgirish p_tx_desc_t tx_desc_ring_vp; 88044961713Sgirish p_tx_desc_t tx_desc_p; 88144961713Sgirish p_tx_desc_t tx_desc_pp; 88244961713Sgirish tx_desc_t r_tx_desc; 88344961713Sgirish p_tx_msg_t tx_msg_ring; 88444961713Sgirish p_tx_msg_t tx_msg_p; 88544961713Sgirish npi_handle_t handle; 88644961713Sgirish tx_ring_hdl_t tx_head; 88744961713Sgirish uint32_t pkt_len; 88844961713Sgirish uint_t tx_rd_index; 88944961713Sgirish uint16_t head_index, tail_index; 89044961713Sgirish uint8_t tdc; 89144961713Sgirish boolean_t head_wrap, tail_wrap; 89244961713Sgirish p_nxge_tx_ring_stats_t tdc_stats; 89344961713Sgirish int rc; 89444961713Sgirish 89544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 89644961713Sgirish 89744961713Sgirish status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 89844961713Sgirish (nmblks != 0)); 89944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 90044961713Sgirish "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 90144961713Sgirish tx_ring_p->descs_pending, nxge_reclaim_pending, 90244961713Sgirish nmblks)); 90344961713Sgirish if (!status) { 90444961713Sgirish tx_desc_dma_p = &tx_ring_p->tdc_desc; 90544961713Sgirish desc_area = tx_ring_p->tdc_desc; 90644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 90744961713Sgirish tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 90844961713Sgirish tx_desc_ring_vp = 90944961713Sgirish (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 91044961713Sgirish tx_rd_index = tx_ring_p->rd_index; 91144961713Sgirish tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 91244961713Sgirish tx_msg_ring = tx_ring_p->tx_msg_ring; 91344961713Sgirish tx_msg_p = &tx_msg_ring[tx_rd_index]; 91444961713Sgirish tdc = tx_ring_p->tdc; 91544961713Sgirish tdc_stats = tx_ring_p->tdc_stats; 91644961713Sgirish if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 91744961713Sgirish tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 91844961713Sgirish } 91944961713Sgirish 92044961713Sgirish tail_index = tx_ring_p->wr_index; 92144961713Sgirish tail_wrap = tx_ring_p->wr_index_wrap; 92244961713Sgirish 92344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 92444961713Sgirish "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 92544961713Sgirish "tail_index %d tail_wrap %d " 92644961713Sgirish "tx_desc_p $%p ($%p) ", 92744961713Sgirish tdc, tx_rd_index, tail_index, tail_wrap, 92844961713Sgirish tx_desc_p, (*(uint64_t *)tx_desc_p))); 92944961713Sgirish /* 93044961713Sgirish * Read the hardware maintained transmit head 93144961713Sgirish * and wrap around bit. 93244961713Sgirish */ 93344961713Sgirish TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 93444961713Sgirish head_index = tx_head.bits.ldw.head; 93544961713Sgirish head_wrap = tx_head.bits.ldw.wrap; 93644961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 93744961713Sgirish "==> nxge_txdma_reclaim: " 93844961713Sgirish "tx_rd_index %d tail %d tail_wrap %d " 93944961713Sgirish "head %d wrap %d", 94044961713Sgirish tx_rd_index, tail_index, tail_wrap, 94144961713Sgirish head_index, head_wrap)); 94244961713Sgirish 94344961713Sgirish if (head_index == tail_index) { 94444961713Sgirish if (TXDMA_RING_EMPTY(head_index, head_wrap, 94544961713Sgirish tail_index, tail_wrap) && 94644961713Sgirish (head_index == tx_rd_index)) { 94744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 94844961713Sgirish "==> nxge_txdma_reclaim: EMPTY")); 94944961713Sgirish return (B_TRUE); 95044961713Sgirish } 95144961713Sgirish 95244961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 95344961713Sgirish "==> nxge_txdma_reclaim: Checking " 95444961713Sgirish "if ring full")); 95544961713Sgirish if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 95644961713Sgirish tail_wrap)) { 95744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 95844961713Sgirish "==> nxge_txdma_reclaim: full")); 95944961713Sgirish return (B_FALSE); 96044961713Sgirish } 96144961713Sgirish } 96244961713Sgirish 96344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 96444961713Sgirish "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 96544961713Sgirish 96644961713Sgirish tx_desc_pp = &r_tx_desc; 96744961713Sgirish while ((tx_rd_index != head_index) && 96844961713Sgirish (tx_ring_p->descs_pending != 0)) { 96944961713Sgirish 97044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 97144961713Sgirish "==> nxge_txdma_reclaim: Checking if pending")); 97244961713Sgirish 97344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 97444961713Sgirish "==> nxge_txdma_reclaim: " 97544961713Sgirish "descs_pending %d ", 97644961713Sgirish tx_ring_p->descs_pending)); 97744961713Sgirish 97844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 97944961713Sgirish "==> nxge_txdma_reclaim: " 98044961713Sgirish "(tx_rd_index %d head_index %d " 98144961713Sgirish "(tx_desc_p $%p)", 98244961713Sgirish tx_rd_index, head_index, 98344961713Sgirish tx_desc_p)); 98444961713Sgirish 98544961713Sgirish tx_desc_pp->value = tx_desc_p->value; 98644961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 98744961713Sgirish "==> nxge_txdma_reclaim: " 98844961713Sgirish "(tx_rd_index %d head_index %d " 98944961713Sgirish "tx_desc_p $%p (desc value 0x%llx) ", 99044961713Sgirish tx_rd_index, head_index, 99144961713Sgirish tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 99244961713Sgirish 99344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 99444961713Sgirish "==> nxge_txdma_reclaim: dump desc:")); 99544961713Sgirish 99644961713Sgirish pkt_len = tx_desc_pp->bits.hdw.tr_len; 99744961713Sgirish tdc_stats->obytes += pkt_len; 99844961713Sgirish tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 99944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 100044961713Sgirish "==> nxge_txdma_reclaim: pkt_len %d " 100144961713Sgirish "tdc channel %d opackets %d", 100244961713Sgirish pkt_len, 100344961713Sgirish tdc, 100444961713Sgirish tdc_stats->opackets)); 100544961713Sgirish 100644961713Sgirish if (tx_msg_p->flags.dma_type == USE_DVMA) { 100744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 100844961713Sgirish "tx_desc_p = $%p " 100944961713Sgirish "tx_desc_pp = $%p " 101044961713Sgirish "index = %d", 101144961713Sgirish tx_desc_p, 101244961713Sgirish tx_desc_pp, 101344961713Sgirish tx_ring_p->rd_index)); 101444961713Sgirish (void) dvma_unload(tx_msg_p->dvma_handle, 101544961713Sgirish 0, -1); 101644961713Sgirish tx_msg_p->dvma_handle = NULL; 101744961713Sgirish if (tx_ring_p->dvma_wr_index == 101844961713Sgirish tx_ring_p->dvma_wrap_mask) { 101944961713Sgirish tx_ring_p->dvma_wr_index = 0; 102044961713Sgirish } else { 102144961713Sgirish tx_ring_p->dvma_wr_index++; 102244961713Sgirish } 102344961713Sgirish tx_ring_p->dvma_pending--; 102444961713Sgirish } else if (tx_msg_p->flags.dma_type == 102544961713Sgirish USE_DMA) { 102644961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 102744961713Sgirish "==> nxge_txdma_reclaim: " 102844961713Sgirish "USE DMA")); 102944961713Sgirish if (rc = ddi_dma_unbind_handle 103044961713Sgirish (tx_msg_p->dma_handle)) { 103144961713Sgirish cmn_err(CE_WARN, "!nxge_reclaim: " 103244961713Sgirish "ddi_dma_unbind_handle " 103344961713Sgirish "failed. status %d", rc); 103444961713Sgirish } 103544961713Sgirish } 103644961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 103744961713Sgirish "==> nxge_txdma_reclaim: count packets")); 103844961713Sgirish /* 103944961713Sgirish * count a chained packet only once. 104044961713Sgirish */ 104144961713Sgirish if (tx_msg_p->tx_message != NULL) { 104244961713Sgirish freemsg(tx_msg_p->tx_message); 104344961713Sgirish tx_msg_p->tx_message = NULL; 104444961713Sgirish } 104544961713Sgirish 104644961713Sgirish tx_msg_p->flags.dma_type = USE_NONE; 104744961713Sgirish tx_rd_index = tx_ring_p->rd_index; 104844961713Sgirish tx_rd_index = (tx_rd_index + 1) & 104944961713Sgirish tx_ring_p->tx_wrap_mask; 105044961713Sgirish tx_ring_p->rd_index = tx_rd_index; 105144961713Sgirish tx_ring_p->descs_pending--; 105244961713Sgirish tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 105344961713Sgirish tx_msg_p = &tx_msg_ring[tx_rd_index]; 105444961713Sgirish } 105544961713Sgirish 105644961713Sgirish status = (nmblks <= (tx_ring_p->tx_ring_size - 105744961713Sgirish tx_ring_p->descs_pending - 105844961713Sgirish TX_FULL_MARK)); 105944961713Sgirish if (status) { 106044961713Sgirish cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 106144961713Sgirish } 106244961713Sgirish } else { 106344961713Sgirish status = (nmblks <= 106444961713Sgirish (tx_ring_p->tx_ring_size - 106544961713Sgirish tx_ring_p->descs_pending - 106644961713Sgirish TX_FULL_MARK)); 106744961713Sgirish } 106844961713Sgirish 106944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 107044961713Sgirish "<== nxge_txdma_reclaim status = 0x%08x", status)); 107144961713Sgirish 107244961713Sgirish return (status); 107344961713Sgirish } 107444961713Sgirish 1075678453a8Sspeer /* 1076678453a8Sspeer * nxge_tx_intr 1077678453a8Sspeer * 1078678453a8Sspeer * Process a TDC interrupt 1079678453a8Sspeer * 1080678453a8Sspeer * Arguments: 1081678453a8Sspeer * arg1 A Logical Device state Vector (LSV) data structure. 1082678453a8Sspeer * arg2 nxge_t * 1083678453a8Sspeer * 1084678453a8Sspeer * Notes: 1085678453a8Sspeer * 1086678453a8Sspeer * NPI/NXGE function calls: 1087678453a8Sspeer * npi_txdma_control_status() 1088678453a8Sspeer * npi_intr_ldg_mgmt_set() 1089678453a8Sspeer * 1090678453a8Sspeer * nxge_tx_err_evnts() 1091678453a8Sspeer * nxge_txdma_reclaim() 1092678453a8Sspeer * 1093678453a8Sspeer * Registers accessed: 1094678453a8Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 1095678453a8Sspeer * PIO_LDSV 1096678453a8Sspeer * 1097678453a8Sspeer * Context: 1098678453a8Sspeer * Any domain 1099678453a8Sspeer */ 110044961713Sgirish uint_t 110144961713Sgirish nxge_tx_intr(void *arg1, void *arg2) 110244961713Sgirish { 110344961713Sgirish p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 110444961713Sgirish p_nxge_t nxgep = (p_nxge_t)arg2; 110544961713Sgirish p_nxge_ldg_t ldgp; 110644961713Sgirish uint8_t channel; 110744961713Sgirish uint32_t vindex; 110844961713Sgirish npi_handle_t handle; 110944961713Sgirish tx_cs_t cs; 111044961713Sgirish p_tx_ring_t *tx_rings; 111144961713Sgirish p_tx_ring_t tx_ring_p; 111244961713Sgirish npi_status_t rs = NPI_SUCCESS; 111344961713Sgirish uint_t serviced = DDI_INTR_UNCLAIMED; 111444961713Sgirish nxge_status_t status = NXGE_OK; 111544961713Sgirish 111644961713Sgirish if (ldvp == NULL) { 111744961713Sgirish NXGE_DEBUG_MSG((NULL, INT_CTL, 111844961713Sgirish "<== nxge_tx_intr: nxgep $%p ldvp $%p", 111944961713Sgirish nxgep, ldvp)); 112044961713Sgirish return (DDI_INTR_UNCLAIMED); 112144961713Sgirish } 112244961713Sgirish 112344961713Sgirish if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 112444961713Sgirish nxgep = ldvp->nxgep; 112544961713Sgirish } 112644961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, 112744961713Sgirish "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 112844961713Sgirish nxgep, ldvp)); 112944961713Sgirish /* 113044961713Sgirish * This interrupt handler is for a specific 113144961713Sgirish * transmit dma channel. 113244961713Sgirish */ 113344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 113444961713Sgirish /* Get the control and status for this channel. */ 113544961713Sgirish channel = ldvp->channel; 113644961713Sgirish ldgp = ldvp->ldgp; 113744961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, 113844961713Sgirish "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 113944961713Sgirish "channel %d", 114044961713Sgirish nxgep, ldvp, channel)); 114144961713Sgirish 114244961713Sgirish rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 114344961713Sgirish vindex = ldvp->vdma_index; 114444961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, 114544961713Sgirish "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 114644961713Sgirish channel, vindex, rs)); 114744961713Sgirish if (!rs && cs.bits.ldw.mk) { 114844961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, 114944961713Sgirish "==> nxge_tx_intr:channel %d ring index %d " 115044961713Sgirish "status 0x%08x (mk bit set)", 115144961713Sgirish channel, vindex, rs)); 115244961713Sgirish tx_rings = nxgep->tx_rings->rings; 115344961713Sgirish tx_ring_p = tx_rings[vindex]; 115444961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, 115544961713Sgirish "==> nxge_tx_intr:channel %d ring index %d " 115644961713Sgirish "status 0x%08x (mk bit set, calling reclaim)", 115744961713Sgirish channel, vindex, rs)); 115844961713Sgirish 115944961713Sgirish MUTEX_ENTER(&tx_ring_p->lock); 116044961713Sgirish (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 116144961713Sgirish MUTEX_EXIT(&tx_ring_p->lock); 116244961713Sgirish mac_tx_update(nxgep->mach); 116344961713Sgirish } 116444961713Sgirish 116544961713Sgirish /* 116644961713Sgirish * Process other transmit control and status. 116744961713Sgirish * Check the ldv state. 116844961713Sgirish */ 116944961713Sgirish status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 117044961713Sgirish /* 117144961713Sgirish * Rearm this logical group if this is a single device 117244961713Sgirish * group. 117344961713Sgirish */ 117444961713Sgirish if (ldgp->nldvs == 1) { 117544961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, 117644961713Sgirish "==> nxge_tx_intr: rearm")); 117744961713Sgirish if (status == NXGE_OK) { 1178678453a8Sspeer if (isLDOMguest(nxgep)) { 1179678453a8Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 1180678453a8Sspeer } else { 1181678453a8Sspeer (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1182678453a8Sspeer B_TRUE, ldgp->ldg_timer); 1183678453a8Sspeer } 118444961713Sgirish } 118544961713Sgirish } 118644961713Sgirish 118744961713Sgirish NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 118844961713Sgirish serviced = DDI_INTR_CLAIMED; 118944961713Sgirish return (serviced); 119044961713Sgirish } 119144961713Sgirish 119244961713Sgirish void 1193678453a8Sspeer nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 119444961713Sgirish { 119544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 119644961713Sgirish 119744961713Sgirish (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 119844961713Sgirish 119944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 120044961713Sgirish } 120144961713Sgirish 120244961713Sgirish void 1203678453a8Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 120444961713Sgirish { 120544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 120644961713Sgirish 120744961713Sgirish (void) nxge_txdma_stop(nxgep); 120844961713Sgirish 120944961713Sgirish (void) nxge_fixup_txdma_rings(nxgep); 121044961713Sgirish (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 121144961713Sgirish (void) nxge_tx_mac_enable(nxgep); 121244961713Sgirish (void) nxge_txdma_hw_kick(nxgep); 121344961713Sgirish 121444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 121544961713Sgirish } 121644961713Sgirish 1217678453a8Sspeer npi_status_t 1218678453a8Sspeer nxge_txdma_channel_disable( 1219678453a8Sspeer nxge_t *nxge, 1220678453a8Sspeer int channel) 1221678453a8Sspeer { 1222678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1223678453a8Sspeer npi_status_t rs; 1224678453a8Sspeer tdmc_intr_dbg_t intr_dbg; 1225678453a8Sspeer 1226678453a8Sspeer /* 1227678453a8Sspeer * Stop the dma channel and wait for the stop-done. 1228678453a8Sspeer * If the stop-done bit is not present, then force 1229678453a8Sspeer * an error so TXC will stop. 1230678453a8Sspeer * All channels bound to this port need to be stopped 1231678453a8Sspeer * and reset after injecting an interrupt error. 1232678453a8Sspeer */ 1233678453a8Sspeer rs = npi_txdma_channel_disable(handle, channel); 1234678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1235678453a8Sspeer "==> nxge_txdma_channel_disable(%d) " 1236678453a8Sspeer "rs 0x%x", channel, rs)); 1237678453a8Sspeer if (rs != NPI_SUCCESS) { 1238678453a8Sspeer /* Inject any error */ 1239678453a8Sspeer intr_dbg.value = 0; 1240678453a8Sspeer intr_dbg.bits.ldw.nack_pref = 1; 1241678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1242678453a8Sspeer "==> nxge_txdma_hw_mode: " 1243678453a8Sspeer "channel %d (stop failed 0x%x) " 1244678453a8Sspeer "(inject err)", rs, channel)); 1245678453a8Sspeer (void) npi_txdma_inj_int_error_set( 1246678453a8Sspeer handle, channel, &intr_dbg); 1247678453a8Sspeer rs = npi_txdma_channel_disable(handle, channel); 1248678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1249678453a8Sspeer "==> nxge_txdma_hw_mode: " 1250678453a8Sspeer "channel %d (stop again 0x%x) " 1251678453a8Sspeer "(after inject err)", 1252678453a8Sspeer rs, channel)); 1253678453a8Sspeer } 1254678453a8Sspeer 1255678453a8Sspeer return (rs); 1256678453a8Sspeer } 1257678453a8Sspeer 1258678453a8Sspeer /* 1259678453a8Sspeer * nxge_txdma_hw_mode 1260678453a8Sspeer * 1261678453a8Sspeer * Toggle all TDCs on (enable) or off (disable). 1262678453a8Sspeer * 1263678453a8Sspeer * Arguments: 1264678453a8Sspeer * nxgep 1265678453a8Sspeer * enable Enable or disable a TDC. 1266678453a8Sspeer * 1267678453a8Sspeer * Notes: 1268678453a8Sspeer * 1269678453a8Sspeer * NPI/NXGE function calls: 1270678453a8Sspeer * npi_txdma_channel_enable(TX_CS) 1271678453a8Sspeer * npi_txdma_channel_disable(TX_CS) 1272678453a8Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1273678453a8Sspeer * 1274678453a8Sspeer * Registers accessed: 1275678453a8Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 1276678453a8Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1277678453a8Sspeer * 1278678453a8Sspeer * Context: 1279678453a8Sspeer * Any domain 1280678453a8Sspeer */ 128144961713Sgirish nxge_status_t 128244961713Sgirish nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 128344961713Sgirish { 1284678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1285678453a8Sspeer 1286678453a8Sspeer npi_handle_t handle; 1287678453a8Sspeer nxge_status_t status; 1288678453a8Sspeer npi_status_t rs; 1289678453a8Sspeer int tdc; 129044961713Sgirish 129144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 129244961713Sgirish "==> nxge_txdma_hw_mode: enable mode %d", enable)); 129344961713Sgirish 129444961713Sgirish if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 129544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 129644961713Sgirish "<== nxge_txdma_mode: not initialized")); 129744961713Sgirish return (NXGE_ERROR); 129844961713Sgirish } 129944961713Sgirish 1300678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 130144961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 1302678453a8Sspeer "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 130344961713Sgirish return (NXGE_ERROR); 130444961713Sgirish } 130544961713Sgirish 1306678453a8Sspeer /* Enable or disable all of the TDCs owned by us. */ 130744961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 1308678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1309678453a8Sspeer if ((1 << tdc) & set->owned.map) { 1310678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1311678453a8Sspeer if (ring) { 1312678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1313678453a8Sspeer "==> nxge_txdma_hw_mode: channel %d", tdc)); 1314678453a8Sspeer if (enable) { 1315678453a8Sspeer rs = npi_txdma_channel_enable 1316678453a8Sspeer (handle, tdc); 131744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1318678453a8Sspeer "==> nxge_txdma_hw_mode: " 1319678453a8Sspeer "channel %d (enable) rs 0x%x", 1320678453a8Sspeer tdc, rs)); 1321678453a8Sspeer } else { 1322678453a8Sspeer rs = nxge_txdma_channel_disable 1323678453a8Sspeer (nxgep, tdc); 132444961713Sgirish } 132544961713Sgirish } 132644961713Sgirish } 132744961713Sgirish } 132844961713Sgirish 132944961713Sgirish status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 133044961713Sgirish 133144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 133244961713Sgirish "<== nxge_txdma_hw_mode: status 0x%x", status)); 133344961713Sgirish 133444961713Sgirish return (status); 133544961713Sgirish } 133644961713Sgirish 133744961713Sgirish void 133844961713Sgirish nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 133944961713Sgirish { 134044961713Sgirish npi_handle_t handle; 134144961713Sgirish 134244961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 134344961713Sgirish "==> nxge_txdma_enable_channel: channel %d", channel)); 134444961713Sgirish 134544961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 134644961713Sgirish /* enable the transmit dma channels */ 134744961713Sgirish (void) npi_txdma_channel_enable(handle, channel); 134844961713Sgirish 134944961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 135044961713Sgirish } 135144961713Sgirish 135244961713Sgirish void 135344961713Sgirish nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 135444961713Sgirish { 135544961713Sgirish npi_handle_t handle; 135644961713Sgirish 135744961713Sgirish NXGE_DEBUG_MSG((nxgep, DMA_CTL, 135844961713Sgirish "==> nxge_txdma_disable_channel: channel %d", channel)); 135944961713Sgirish 136044961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 136144961713Sgirish /* stop the transmit dma channels */ 136244961713Sgirish (void) npi_txdma_channel_disable(handle, channel); 136344961713Sgirish 136444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 136544961713Sgirish } 136644961713Sgirish 1367678453a8Sspeer /* 1368678453a8Sspeer * nxge_txdma_stop_inj_err 1369678453a8Sspeer * 1370678453a8Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 1371678453a8Sspeer * 1372678453a8Sspeer * Arguments: 1373678453a8Sspeer * nxgep 1374678453a8Sspeer * channel The channel to stop. 1375678453a8Sspeer * 1376678453a8Sspeer * Notes: 1377678453a8Sspeer * 1378678453a8Sspeer * NPI/NXGE function calls: 1379678453a8Sspeer * npi_txdma_channel_disable() 1380678453a8Sspeer * npi_txdma_inj_int_error_set() 1381678453a8Sspeer * #if defined(NXGE_DEBUG) 1382678453a8Sspeer * nxge_txdma_regs_dump_channels(nxgep); 1383678453a8Sspeer * #endif 1384678453a8Sspeer * 1385678453a8Sspeer * Registers accessed: 1386678453a8Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 1387678453a8Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1388678453a8Sspeer * 1389678453a8Sspeer * Context: 1390678453a8Sspeer * Any domain 1391678453a8Sspeer */ 139244961713Sgirish int 139344961713Sgirish nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 139444961713Sgirish { 139544961713Sgirish npi_handle_t handle; 139644961713Sgirish tdmc_intr_dbg_t intr_dbg; 139744961713Sgirish int status; 139844961713Sgirish npi_status_t rs = NPI_SUCCESS; 139944961713Sgirish 140044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 140144961713Sgirish /* 140244961713Sgirish * Stop the dma channel waits for the stop done. 140344961713Sgirish * If the stop done bit is not set, then create 140444961713Sgirish * an error. 140544961713Sgirish */ 140644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 140744961713Sgirish rs = npi_txdma_channel_disable(handle, channel); 140844961713Sgirish status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 140944961713Sgirish if (status == NXGE_OK) { 141044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 141144961713Sgirish "<== nxge_txdma_stop_inj_err (channel %d): " 141244961713Sgirish "stopped OK", channel)); 141344961713Sgirish return (status); 141444961713Sgirish } 141544961713Sgirish 141644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 141744961713Sgirish "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 141844961713Sgirish "injecting error", channel, rs)); 141944961713Sgirish /* Inject any error */ 142044961713Sgirish intr_dbg.value = 0; 142144961713Sgirish intr_dbg.bits.ldw.nack_pref = 1; 142244961713Sgirish (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 142344961713Sgirish 142444961713Sgirish /* Stop done bit will be set as a result of error injection */ 142544961713Sgirish rs = npi_txdma_channel_disable(handle, channel); 142644961713Sgirish status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 142744961713Sgirish if (!(rs & NPI_TXDMA_STOP_FAILED)) { 142844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 142944961713Sgirish "<== nxge_txdma_stop_inj_err (channel %d): " 143044961713Sgirish "stopped OK ", channel)); 143144961713Sgirish return (status); 143244961713Sgirish } 143344961713Sgirish 143444961713Sgirish #if defined(NXGE_DEBUG) 143544961713Sgirish nxge_txdma_regs_dump_channels(nxgep); 143644961713Sgirish #endif 143744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 143844961713Sgirish "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 143944961713Sgirish " (injected error but still not stopped)", channel, rs)); 144044961713Sgirish 144144961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 144244961713Sgirish return (status); 144344961713Sgirish } 144444961713Sgirish 144544961713Sgirish /*ARGSUSED*/ 144644961713Sgirish void 144744961713Sgirish nxge_fixup_txdma_rings(p_nxge_t nxgep) 144844961713Sgirish { 1449678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1450678453a8Sspeer int tdc; 145144961713Sgirish 145244961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 145344961713Sgirish 1454678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1455678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1456678453a8Sspeer "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 145744961713Sgirish return; 145844961713Sgirish } 145944961713Sgirish 1460678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1461678453a8Sspeer if ((1 << tdc) & set->owned.map) { 1462678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1463678453a8Sspeer if (ring) { 1464678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1465678453a8Sspeer "==> nxge_fixup_txdma_rings: channel %d", 1466678453a8Sspeer tdc)); 1467678453a8Sspeer nxge_txdma_fixup_channel(nxgep, ring, tdc); 1468678453a8Sspeer } 1469678453a8Sspeer } 147044961713Sgirish } 147144961713Sgirish 147244961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 147344961713Sgirish } 147444961713Sgirish 147544961713Sgirish /*ARGSUSED*/ 147644961713Sgirish void 147744961713Sgirish nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 147844961713Sgirish { 147944961713Sgirish p_tx_ring_t ring_p; 148044961713Sgirish 148144961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 148244961713Sgirish ring_p = nxge_txdma_get_ring(nxgep, channel); 148344961713Sgirish if (ring_p == NULL) { 148444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 148544961713Sgirish return; 148644961713Sgirish } 148744961713Sgirish 148844961713Sgirish if (ring_p->tdc != channel) { 148944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 149044961713Sgirish "<== nxge_txdma_fix_channel: channel not matched " 149144961713Sgirish "ring tdc %d passed channel", 149244961713Sgirish ring_p->tdc, channel)); 149344961713Sgirish return; 149444961713Sgirish } 149544961713Sgirish 149644961713Sgirish nxge_txdma_fixup_channel(nxgep, ring_p, channel); 149744961713Sgirish 149844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 149944961713Sgirish } 150044961713Sgirish 150144961713Sgirish /*ARGSUSED*/ 150244961713Sgirish void 150344961713Sgirish nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 150444961713Sgirish { 150544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 150644961713Sgirish 150744961713Sgirish if (ring_p == NULL) { 150844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 150944961713Sgirish "<== nxge_txdma_fixup_channel: NULL ring pointer")); 151044961713Sgirish return; 151144961713Sgirish } 151244961713Sgirish 151344961713Sgirish if (ring_p->tdc != channel) { 151444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 151544961713Sgirish "<== nxge_txdma_fixup_channel: channel not matched " 151644961713Sgirish "ring tdc %d passed channel", 151744961713Sgirish ring_p->tdc, channel)); 151844961713Sgirish return; 151944961713Sgirish } 152044961713Sgirish 152144961713Sgirish MUTEX_ENTER(&ring_p->lock); 152244961713Sgirish (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 152344961713Sgirish ring_p->rd_index = 0; 152444961713Sgirish ring_p->wr_index = 0; 152544961713Sgirish ring_p->ring_head.value = 0; 152644961713Sgirish ring_p->ring_kick_tail.value = 0; 152744961713Sgirish ring_p->descs_pending = 0; 152844961713Sgirish MUTEX_EXIT(&ring_p->lock); 152944961713Sgirish 153044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 153144961713Sgirish } 153244961713Sgirish 153344961713Sgirish /*ARGSUSED*/ 153444961713Sgirish void 153544961713Sgirish nxge_txdma_hw_kick(p_nxge_t nxgep) 153644961713Sgirish { 1537678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1538678453a8Sspeer int tdc; 153944961713Sgirish 154044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 154144961713Sgirish 1542678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 154344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 1544678453a8Sspeer "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 154544961713Sgirish return; 154644961713Sgirish } 154744961713Sgirish 1548678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1549678453a8Sspeer if ((1 << tdc) & set->owned.map) { 1550678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1551678453a8Sspeer if (ring) { 1552678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1553678453a8Sspeer "==> nxge_txdma_hw_kick: channel %d", tdc)); 1554678453a8Sspeer nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1555678453a8Sspeer } 1556678453a8Sspeer } 155744961713Sgirish } 155844961713Sgirish 155944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 156044961713Sgirish } 156144961713Sgirish 156244961713Sgirish /*ARGSUSED*/ 156344961713Sgirish void 156444961713Sgirish nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 156544961713Sgirish { 156644961713Sgirish p_tx_ring_t ring_p; 156744961713Sgirish 156844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 156944961713Sgirish 157044961713Sgirish ring_p = nxge_txdma_get_ring(nxgep, channel); 157144961713Sgirish if (ring_p == NULL) { 157244961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 157344961713Sgirish " nxge_txdma_kick_channel")); 157444961713Sgirish return; 157544961713Sgirish } 157644961713Sgirish 157744961713Sgirish if (ring_p->tdc != channel) { 157844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 157944961713Sgirish "<== nxge_txdma_kick_channel: channel not matched " 158044961713Sgirish "ring tdc %d passed channel", 158144961713Sgirish ring_p->tdc, channel)); 158244961713Sgirish return; 158344961713Sgirish } 158444961713Sgirish 158544961713Sgirish nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 158644961713Sgirish 158744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 158844961713Sgirish } 1589a3c5bd6dSspeer 159044961713Sgirish /*ARGSUSED*/ 159144961713Sgirish void 159244961713Sgirish nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 159344961713Sgirish { 159444961713Sgirish 159544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 159644961713Sgirish 159744961713Sgirish if (ring_p == NULL) { 159844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 159944961713Sgirish "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 160044961713Sgirish return; 160144961713Sgirish } 160244961713Sgirish 160344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 160444961713Sgirish } 160544961713Sgirish 1606678453a8Sspeer /* 1607678453a8Sspeer * nxge_check_tx_hang 1608678453a8Sspeer * 1609678453a8Sspeer * Check the state of all TDCs belonging to nxgep. 1610678453a8Sspeer * 1611678453a8Sspeer * Arguments: 1612678453a8Sspeer * nxgep 1613678453a8Sspeer * 1614678453a8Sspeer * Notes: 1615678453a8Sspeer * Called by nxge_hw.c:nxge_check_hw_state(). 1616678453a8Sspeer * 1617678453a8Sspeer * NPI/NXGE function calls: 1618678453a8Sspeer * 1619678453a8Sspeer * Registers accessed: 1620678453a8Sspeer * 1621678453a8Sspeer * Context: 1622678453a8Sspeer * Any domain 1623678453a8Sspeer */ 162444961713Sgirish /*ARGSUSED*/ 162544961713Sgirish void 162644961713Sgirish nxge_check_tx_hang(p_nxge_t nxgep) 162744961713Sgirish { 162844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 162944961713Sgirish 163044961713Sgirish /* 163144961713Sgirish * Needs inputs from hardware for regs: 163244961713Sgirish * head index had not moved since last timeout. 163344961713Sgirish * packets not transmitted or stuffed registers. 163444961713Sgirish */ 163544961713Sgirish if (nxge_txdma_hung(nxgep)) { 163644961713Sgirish nxge_fixup_hung_txdma_rings(nxgep); 163744961713Sgirish } 163844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 163944961713Sgirish } 164044961713Sgirish 1641678453a8Sspeer /* 1642678453a8Sspeer * nxge_txdma_hung 1643678453a8Sspeer * 1644678453a8Sspeer * Reset a TDC. 1645678453a8Sspeer * 1646678453a8Sspeer * Arguments: 1647678453a8Sspeer * nxgep 1648678453a8Sspeer * channel The channel to reset. 1649678453a8Sspeer * reg_data The current TX_CS. 1650678453a8Sspeer * 1651678453a8Sspeer * Notes: 1652678453a8Sspeer * Called by nxge_check_tx_hang() 1653678453a8Sspeer * 1654678453a8Sspeer * NPI/NXGE function calls: 1655678453a8Sspeer * nxge_txdma_channel_hung() 1656678453a8Sspeer * 1657678453a8Sspeer * Registers accessed: 1658678453a8Sspeer * 1659678453a8Sspeer * Context: 1660678453a8Sspeer * Any domain 1661678453a8Sspeer */ 166244961713Sgirish int 166344961713Sgirish nxge_txdma_hung(p_nxge_t nxgep) 166444961713Sgirish { 1665678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1666678453a8Sspeer int tdc; 166744961713Sgirish 166844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 166944961713Sgirish 1670678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 167144961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 1672678453a8Sspeer "<== nxge_txdma_hung: NULL ring pointer(s)")); 167344961713Sgirish return (B_FALSE); 167444961713Sgirish } 167544961713Sgirish 1676678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1677678453a8Sspeer if ((1 << tdc) & set->owned.map) { 1678678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1679678453a8Sspeer if (ring) { 1680678453a8Sspeer if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1681678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1682678453a8Sspeer "==> nxge_txdma_hung: TDC %d hung", 1683678453a8Sspeer tdc)); 1684678453a8Sspeer return (B_TRUE); 1685678453a8Sspeer } 1686678453a8Sspeer } 168744961713Sgirish } 168844961713Sgirish } 168944961713Sgirish 169044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 169144961713Sgirish 169244961713Sgirish return (B_FALSE); 169344961713Sgirish } 169444961713Sgirish 1695678453a8Sspeer /* 1696678453a8Sspeer * nxge_txdma_channel_hung 1697678453a8Sspeer * 1698678453a8Sspeer * Reset a TDC. 1699678453a8Sspeer * 1700678453a8Sspeer * Arguments: 1701678453a8Sspeer * nxgep 1702678453a8Sspeer * ring <channel>'s ring. 1703678453a8Sspeer * channel The channel to reset. 1704678453a8Sspeer * 1705678453a8Sspeer * Notes: 1706678453a8Sspeer * Called by nxge_txdma.c:nxge_txdma_hung() 1707678453a8Sspeer * 1708678453a8Sspeer * NPI/NXGE function calls: 1709678453a8Sspeer * npi_txdma_ring_head_get() 1710678453a8Sspeer * 1711678453a8Sspeer * Registers accessed: 1712678453a8Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1713678453a8Sspeer * 1714678453a8Sspeer * Context: 1715678453a8Sspeer * Any domain 1716678453a8Sspeer */ 171744961713Sgirish int 171844961713Sgirish nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 171944961713Sgirish { 172044961713Sgirish uint16_t head_index, tail_index; 172144961713Sgirish boolean_t head_wrap, tail_wrap; 172244961713Sgirish npi_handle_t handle; 172344961713Sgirish tx_ring_hdl_t tx_head; 172444961713Sgirish uint_t tx_rd_index; 172544961713Sgirish 172644961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 172744961713Sgirish 172844961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 172944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 173044961713Sgirish "==> nxge_txdma_channel_hung: channel %d", channel)); 173144961713Sgirish MUTEX_ENTER(&tx_ring_p->lock); 173244961713Sgirish (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 173344961713Sgirish 173444961713Sgirish tail_index = tx_ring_p->wr_index; 173544961713Sgirish tail_wrap = tx_ring_p->wr_index_wrap; 173644961713Sgirish tx_rd_index = tx_ring_p->rd_index; 173744961713Sgirish MUTEX_EXIT(&tx_ring_p->lock); 173844961713Sgirish 173944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 174044961713Sgirish "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 174144961713Sgirish "tail_index %d tail_wrap %d ", 174244961713Sgirish channel, tx_rd_index, tail_index, tail_wrap)); 174344961713Sgirish /* 174444961713Sgirish * Read the hardware maintained transmit head 174544961713Sgirish * and wrap around bit. 174644961713Sgirish */ 174744961713Sgirish (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 174844961713Sgirish head_index = tx_head.bits.ldw.head; 174944961713Sgirish head_wrap = tx_head.bits.ldw.wrap; 175044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 175144961713Sgirish "==> nxge_txdma_channel_hung: " 175244961713Sgirish "tx_rd_index %d tail %d tail_wrap %d " 175344961713Sgirish "head %d wrap %d", 175444961713Sgirish tx_rd_index, tail_index, tail_wrap, 175544961713Sgirish head_index, head_wrap)); 175644961713Sgirish 175744961713Sgirish if (TXDMA_RING_EMPTY(head_index, head_wrap, 175844961713Sgirish tail_index, tail_wrap) && 175944961713Sgirish (head_index == tx_rd_index)) { 176044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 176144961713Sgirish "==> nxge_txdma_channel_hung: EMPTY")); 176244961713Sgirish return (B_FALSE); 176344961713Sgirish } 176444961713Sgirish 176544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 176644961713Sgirish "==> nxge_txdma_channel_hung: Checking if ring full")); 176744961713Sgirish if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 176844961713Sgirish tail_wrap)) { 176944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 177044961713Sgirish "==> nxge_txdma_channel_hung: full")); 177144961713Sgirish return (B_TRUE); 177244961713Sgirish } 177344961713Sgirish 177444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 177544961713Sgirish 177644961713Sgirish return (B_FALSE); 177744961713Sgirish } 177844961713Sgirish 1779678453a8Sspeer /* 1780678453a8Sspeer * nxge_fixup_hung_txdma_rings 1781678453a8Sspeer * 1782678453a8Sspeer * Disable a TDC. 1783678453a8Sspeer * 1784678453a8Sspeer * Arguments: 1785678453a8Sspeer * nxgep 1786678453a8Sspeer * channel The channel to reset. 1787678453a8Sspeer * reg_data The current TX_CS. 1788678453a8Sspeer * 1789678453a8Sspeer * Notes: 1790678453a8Sspeer * Called by nxge_check_tx_hang() 1791678453a8Sspeer * 1792678453a8Sspeer * NPI/NXGE function calls: 1793678453a8Sspeer * npi_txdma_ring_head_get() 1794678453a8Sspeer * 1795678453a8Sspeer * Registers accessed: 1796678453a8Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1797678453a8Sspeer * 1798678453a8Sspeer * Context: 1799678453a8Sspeer * Any domain 1800678453a8Sspeer */ 180144961713Sgirish /*ARGSUSED*/ 180244961713Sgirish void 180344961713Sgirish nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 180444961713Sgirish { 1805678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1806678453a8Sspeer int tdc; 180744961713Sgirish 180844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 180944961713Sgirish 1810678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 181144961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 1812678453a8Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 181344961713Sgirish return; 181444961713Sgirish } 181544961713Sgirish 1816678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1817678453a8Sspeer if ((1 << tdc) & set->owned.map) { 1818678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1819678453a8Sspeer if (ring) { 1820678453a8Sspeer nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1821678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1822678453a8Sspeer "==> nxge_fixup_hung_txdma_rings: TDC %d", 1823678453a8Sspeer tdc)); 1824678453a8Sspeer } 1825678453a8Sspeer } 182644961713Sgirish } 182744961713Sgirish 182844961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 182944961713Sgirish } 183044961713Sgirish 1831678453a8Sspeer /* 1832678453a8Sspeer * nxge_txdma_fixup_hung_channel 1833678453a8Sspeer * 1834678453a8Sspeer * 'Fix' a hung TDC. 1835678453a8Sspeer * 1836678453a8Sspeer * Arguments: 1837678453a8Sspeer * nxgep 1838678453a8Sspeer * channel The channel to fix. 1839678453a8Sspeer * 1840678453a8Sspeer * Notes: 1841678453a8Sspeer * Called by nxge_fixup_hung_txdma_rings() 1842678453a8Sspeer * 1843678453a8Sspeer * 1. Reclaim the TDC. 1844678453a8Sspeer * 2. Disable the TDC. 1845678453a8Sspeer * 1846678453a8Sspeer * NPI/NXGE function calls: 1847678453a8Sspeer * nxge_txdma_reclaim() 1848678453a8Sspeer * npi_txdma_channel_disable(TX_CS) 1849678453a8Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1850678453a8Sspeer * 1851678453a8Sspeer * Registers accessed: 1852678453a8Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 1853678453a8Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1854678453a8Sspeer * 1855678453a8Sspeer * Context: 1856678453a8Sspeer * Any domain 1857678453a8Sspeer */ 185844961713Sgirish /*ARGSUSED*/ 185944961713Sgirish void 186044961713Sgirish nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 186144961713Sgirish { 186244961713Sgirish p_tx_ring_t ring_p; 186344961713Sgirish 186444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 186544961713Sgirish ring_p = nxge_txdma_get_ring(nxgep, channel); 186644961713Sgirish if (ring_p == NULL) { 186744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 186844961713Sgirish "<== nxge_txdma_fix_hung_channel")); 186944961713Sgirish return; 187044961713Sgirish } 187144961713Sgirish 187244961713Sgirish if (ring_p->tdc != channel) { 187344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 187444961713Sgirish "<== nxge_txdma_fix_hung_channel: channel not matched " 187544961713Sgirish "ring tdc %d passed channel", 187644961713Sgirish ring_p->tdc, channel)); 187744961713Sgirish return; 187844961713Sgirish } 187944961713Sgirish 188044961713Sgirish nxge_txdma_fixup_channel(nxgep, ring_p, channel); 188144961713Sgirish 188244961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 188344961713Sgirish } 188444961713Sgirish 188544961713Sgirish /*ARGSUSED*/ 188644961713Sgirish void 188744961713Sgirish nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 188844961713Sgirish uint16_t channel) 188944961713Sgirish { 189044961713Sgirish npi_handle_t handle; 189144961713Sgirish tdmc_intr_dbg_t intr_dbg; 189244961713Sgirish int status = NXGE_OK; 189344961713Sgirish 189444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 189544961713Sgirish 189644961713Sgirish if (ring_p == NULL) { 189744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 189844961713Sgirish "<== nxge_txdma_fixup_channel: NULL ring pointer")); 189944961713Sgirish return; 190044961713Sgirish } 190144961713Sgirish 190244961713Sgirish if (ring_p->tdc != channel) { 190344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 190444961713Sgirish "<== nxge_txdma_fixup_hung_channel: channel " 190544961713Sgirish "not matched " 190644961713Sgirish "ring tdc %d passed channel", 190744961713Sgirish ring_p->tdc, channel)); 190844961713Sgirish return; 190944961713Sgirish } 191044961713Sgirish 191144961713Sgirish /* Reclaim descriptors */ 191244961713Sgirish MUTEX_ENTER(&ring_p->lock); 191344961713Sgirish (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 191444961713Sgirish MUTEX_EXIT(&ring_p->lock); 191544961713Sgirish 191644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 191744961713Sgirish /* 191844961713Sgirish * Stop the dma channel waits for the stop done. 191944961713Sgirish * If the stop done bit is not set, then force 192044961713Sgirish * an error. 192144961713Sgirish */ 192244961713Sgirish status = npi_txdma_channel_disable(handle, channel); 192344961713Sgirish if (!(status & NPI_TXDMA_STOP_FAILED)) { 192444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 192544961713Sgirish "<== nxge_txdma_fixup_hung_channel: stopped OK " 192644961713Sgirish "ring tdc %d passed channel %d", 192744961713Sgirish ring_p->tdc, channel)); 192844961713Sgirish return; 192944961713Sgirish } 193044961713Sgirish 193144961713Sgirish /* Inject any error */ 193244961713Sgirish intr_dbg.value = 0; 193344961713Sgirish intr_dbg.bits.ldw.nack_pref = 1; 193444961713Sgirish (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 193544961713Sgirish 193644961713Sgirish /* Stop done bit will be set as a result of error injection */ 193744961713Sgirish status = npi_txdma_channel_disable(handle, channel); 193844961713Sgirish if (!(status & NPI_TXDMA_STOP_FAILED)) { 193944961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 194044961713Sgirish "<== nxge_txdma_fixup_hung_channel: stopped again" 194144961713Sgirish "ring tdc %d passed channel", 194244961713Sgirish ring_p->tdc, channel)); 194344961713Sgirish return; 194444961713Sgirish } 194544961713Sgirish 194644961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 194744961713Sgirish "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 194844961713Sgirish "ring tdc %d passed channel", 194944961713Sgirish ring_p->tdc, channel)); 195044961713Sgirish 195144961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 195244961713Sgirish } 195344961713Sgirish 195444961713Sgirish /*ARGSUSED*/ 195544961713Sgirish void 195644961713Sgirish nxge_reclaim_rings(p_nxge_t nxgep) 195744961713Sgirish { 1958678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1959678453a8Sspeer int tdc; 196044961713Sgirish 1961678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 196244961713Sgirish 1963678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 196444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 1965678453a8Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 196644961713Sgirish return; 196744961713Sgirish } 196844961713Sgirish 1969678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1970678453a8Sspeer if ((1 << tdc) & set->owned.map) { 1971678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1972678453a8Sspeer if (ring) { 1973678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1974678453a8Sspeer "==> nxge_reclaim_rings: TDC %d", tdc)); 1975678453a8Sspeer MUTEX_ENTER(&ring->lock); 1976678453a8Sspeer (void) nxge_txdma_reclaim(nxgep, ring, tdc); 1977678453a8Sspeer MUTEX_EXIT(&ring->lock); 1978678453a8Sspeer } 1979678453a8Sspeer } 198044961713Sgirish } 198144961713Sgirish 198244961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 198344961713Sgirish } 198444961713Sgirish 198544961713Sgirish void 198644961713Sgirish nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 198744961713Sgirish { 1988678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1989678453a8Sspeer npi_handle_t handle; 1990678453a8Sspeer int tdc; 199144961713Sgirish 1992678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 199344961713Sgirish 199444961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 199544961713Sgirish 1996678453a8Sspeer if (!isLDOMguest(nxgep)) { 1997678453a8Sspeer (void) npi_txdma_dump_fzc_regs(handle); 199844961713Sgirish 1999678453a8Sspeer /* Dump TXC registers. */ 2000678453a8Sspeer (void) npi_txc_dump_fzc_regs(handle); 2001678453a8Sspeer (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 200244961713Sgirish } 200344961713Sgirish 2004678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 200544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 2006678453a8Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 200744961713Sgirish return; 200844961713Sgirish } 200944961713Sgirish 2010678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2011678453a8Sspeer if ((1 << tdc) & set->owned.map) { 2012678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2013678453a8Sspeer if (ring) { 2014678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 2015678453a8Sspeer "==> nxge_txdma_regs_dump_channels: " 2016678453a8Sspeer "TDC %d", tdc)); 2017678453a8Sspeer (void) npi_txdma_dump_tdc_regs(handle, tdc); 2018678453a8Sspeer 2019678453a8Sspeer /* Dump TXC registers, if able to. */ 2020678453a8Sspeer if (!isLDOMguest(nxgep)) { 2021678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 2022678453a8Sspeer "==> nxge_txdma_regs_dump_channels:" 2023678453a8Sspeer " FZC TDC %d", tdc)); 2024678453a8Sspeer (void) npi_txc_dump_tdc_fzc_regs 2025678453a8Sspeer (handle, tdc); 2026678453a8Sspeer } 2027678453a8Sspeer nxge_txdma_regs_dump(nxgep, tdc); 2028678453a8Sspeer } 2029678453a8Sspeer } 203044961713Sgirish } 203144961713Sgirish 203244961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 203344961713Sgirish } 203444961713Sgirish 203544961713Sgirish void 203644961713Sgirish nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 203744961713Sgirish { 203844961713Sgirish npi_handle_t handle; 203944961713Sgirish tx_ring_hdl_t hdl; 204044961713Sgirish tx_ring_kick_t kick; 204144961713Sgirish tx_cs_t cs; 204244961713Sgirish txc_control_t control; 204344961713Sgirish uint32_t bitmap = 0; 204444961713Sgirish uint32_t burst = 0; 204544961713Sgirish uint32_t bytes = 0; 204644961713Sgirish dma_log_page_t cfg; 204744961713Sgirish 204844961713Sgirish printf("\n\tfunc # %d tdc %d ", 204944961713Sgirish nxgep->function_num, channel); 205044961713Sgirish cfg.page_num = 0; 205144961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 205244961713Sgirish (void) npi_txdma_log_page_get(handle, channel, &cfg); 205344961713Sgirish printf("\n\tlog page func %d valid page 0 %d", 205444961713Sgirish cfg.func_num, cfg.valid); 205544961713Sgirish cfg.page_num = 1; 205644961713Sgirish (void) npi_txdma_log_page_get(handle, channel, &cfg); 205744961713Sgirish printf("\n\tlog page func %d valid page 1 %d", 205844961713Sgirish cfg.func_num, cfg.valid); 205944961713Sgirish 206044961713Sgirish (void) npi_txdma_ring_head_get(handle, channel, &hdl); 206144961713Sgirish (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 206244961713Sgirish printf("\n\thead value is 0x%0llx", 206344961713Sgirish (long long)hdl.value); 206444961713Sgirish printf("\n\thead index %d", hdl.bits.ldw.head); 206544961713Sgirish printf("\n\tkick value is 0x%0llx", 206644961713Sgirish (long long)kick.value); 206744961713Sgirish printf("\n\ttail index %d\n", kick.bits.ldw.tail); 206844961713Sgirish 206944961713Sgirish (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 207044961713Sgirish printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 207144961713Sgirish printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 207244961713Sgirish 207344961713Sgirish (void) npi_txc_control(handle, OP_GET, &control); 207444961713Sgirish (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 207544961713Sgirish (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 207644961713Sgirish (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 207744961713Sgirish 207844961713Sgirish printf("\n\tTXC port control 0x%0llx", 207944961713Sgirish (long long)control.value); 208044961713Sgirish printf("\n\tTXC port bitmap 0x%x", bitmap); 208144961713Sgirish printf("\n\tTXC max burst %d", burst); 208244961713Sgirish printf("\n\tTXC bytes xmt %d\n", bytes); 208344961713Sgirish 208444961713Sgirish { 208544961713Sgirish ipp_status_t status; 208644961713Sgirish 208744961713Sgirish (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2088adfcba55Sjoycey #if defined(__i386) 2089adfcba55Sjoycey printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2090adfcba55Sjoycey #else 209144961713Sgirish printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2092adfcba55Sjoycey #endif 209344961713Sgirish } 209444961713Sgirish } 209544961713Sgirish 209644961713Sgirish /* 2097678453a8Sspeer * nxge_tdc_hvio_setup 2098678453a8Sspeer * 2099678453a8Sspeer * I'm not exactly sure what this code does. 2100678453a8Sspeer * 2101678453a8Sspeer * Arguments: 2102678453a8Sspeer * nxgep 2103678453a8Sspeer * channel The channel to map. 2104678453a8Sspeer * 2105678453a8Sspeer * Notes: 2106678453a8Sspeer * 2107678453a8Sspeer * NPI/NXGE function calls: 2108678453a8Sspeer * na 2109678453a8Sspeer * 2110678453a8Sspeer * Context: 2111678453a8Sspeer * Service domain? 211244961713Sgirish */ 2113678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2114678453a8Sspeer static void 2115678453a8Sspeer nxge_tdc_hvio_setup( 2116678453a8Sspeer nxge_t *nxgep, int channel) 211744961713Sgirish { 2118678453a8Sspeer nxge_dma_common_t *data; 2119678453a8Sspeer nxge_dma_common_t *control; 2120678453a8Sspeer tx_ring_t *ring; 2121678453a8Sspeer 2122678453a8Sspeer ring = nxgep->tx_rings->rings[channel]; 2123678453a8Sspeer data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2124678453a8Sspeer 2125678453a8Sspeer ring->hv_set = B_FALSE; 2126678453a8Sspeer 2127678453a8Sspeer ring->hv_tx_buf_base_ioaddr_pp = 2128678453a8Sspeer (uint64_t)data->orig_ioaddr_pp; 2129678453a8Sspeer ring->hv_tx_buf_ioaddr_size = 2130678453a8Sspeer (uint64_t)data->orig_alength; 2131678453a8Sspeer 2132678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2133678453a8Sspeer "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2134678453a8Sspeer "orig vatopa base io $%p orig_len 0x%llx (%d)", 2135678453a8Sspeer ring->hv_tx_buf_base_ioaddr_pp, 2136678453a8Sspeer ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2137678453a8Sspeer data->ioaddr_pp, data->orig_vatopa, 2138678453a8Sspeer data->orig_alength, data->orig_alength)); 2139678453a8Sspeer 2140678453a8Sspeer control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2141678453a8Sspeer 2142678453a8Sspeer ring->hv_tx_cntl_base_ioaddr_pp = 2143678453a8Sspeer (uint64_t)control->orig_ioaddr_pp; 2144678453a8Sspeer ring->hv_tx_cntl_ioaddr_size = 2145678453a8Sspeer (uint64_t)control->orig_alength; 2146678453a8Sspeer 2147678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2148678453a8Sspeer "hv cntl base io $%p orig ioaddr_pp ($%p) " 2149678453a8Sspeer "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2150678453a8Sspeer ring->hv_tx_cntl_base_ioaddr_pp, 2151678453a8Sspeer control->orig_ioaddr_pp, control->orig_vatopa, 2152678453a8Sspeer ring->hv_tx_cntl_ioaddr_size, 2153678453a8Sspeer control->orig_alength, control->orig_alength)); 2154678453a8Sspeer } 215544961713Sgirish #endif 215644961713Sgirish 2157678453a8Sspeer static nxge_status_t 2158678453a8Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel) 2159678453a8Sspeer { 2160678453a8Sspeer nxge_dma_common_t **pData; 2161678453a8Sspeer nxge_dma_common_t **pControl; 2162678453a8Sspeer tx_ring_t **pRing, *ring; 2163678453a8Sspeer tx_mbox_t **mailbox; 2164678453a8Sspeer uint32_t num_chunks; 2165678453a8Sspeer 2166678453a8Sspeer nxge_status_t status = NXGE_OK; 216744961713Sgirish 2168678453a8Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 216944961713Sgirish 2170678453a8Sspeer if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2171678453a8Sspeer if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2172678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2173678453a8Sspeer "<== nxge_map_txdma: buf not allocated")); 2174678453a8Sspeer return (NXGE_ERROR); 2175678453a8Sspeer } 217644961713Sgirish } 217744961713Sgirish 2178678453a8Sspeer if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 217944961713Sgirish return (NXGE_ERROR); 218044961713Sgirish 2181678453a8Sspeer num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2182678453a8Sspeer pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2183678453a8Sspeer pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2184678453a8Sspeer pRing = &nxgep->tx_rings->rings[channel]; 2185678453a8Sspeer mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 218644961713Sgirish 2187678453a8Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 218844961713Sgirish "tx_rings $%p tx_desc_rings $%p", 2189678453a8Sspeer nxgep->tx_rings, nxgep->tx_rings->rings)); 219044961713Sgirish 219144961713Sgirish /* 2192678453a8Sspeer * Map descriptors from the buffer pools for <channel>. 219344961713Sgirish */ 219444961713Sgirish 2195678453a8Sspeer /* 2196678453a8Sspeer * Set up and prepare buffer blocks, descriptors 2197678453a8Sspeer * and mailbox. 2198678453a8Sspeer */ 2199678453a8Sspeer status = nxge_map_txdma_channel(nxgep, channel, 2200678453a8Sspeer pData, pRing, num_chunks, pControl, mailbox); 2201678453a8Sspeer if (status != NXGE_OK) { 2202678453a8Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2203678453a8Sspeer "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2204678453a8Sspeer "returned 0x%x", 2205678453a8Sspeer nxgep, channel, status)); 2206678453a8Sspeer return (status); 220744961713Sgirish } 220844961713Sgirish 2209678453a8Sspeer ring = *pRing; 221044961713Sgirish 2211678453a8Sspeer ring->index = (uint16_t)channel; 2212678453a8Sspeer ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 221344961713Sgirish 2214678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2215678453a8Sspeer if (isLDOMguest(nxgep)) { 2216678453a8Sspeer (void) nxge_tdc_lp_conf(nxgep, channel); 2217678453a8Sspeer } else { 2218678453a8Sspeer nxge_tdc_hvio_setup(nxgep, channel); 221944961713Sgirish } 2220678453a8Sspeer #endif 222144961713Sgirish 2222678453a8Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2223678453a8Sspeer "(status 0x%x channel %d)", status, channel)); 222444961713Sgirish 2225678453a8Sspeer return (status); 222644961713Sgirish } 222744961713Sgirish 222844961713Sgirish static nxge_status_t 222944961713Sgirish nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 223044961713Sgirish p_nxge_dma_common_t *dma_buf_p, 223144961713Sgirish p_tx_ring_t *tx_desc_p, 223244961713Sgirish uint32_t num_chunks, 223344961713Sgirish p_nxge_dma_common_t *dma_cntl_p, 223444961713Sgirish p_tx_mbox_t *tx_mbox_p) 223544961713Sgirish { 223644961713Sgirish int status = NXGE_OK; 223744961713Sgirish 223844961713Sgirish /* 223944961713Sgirish * Set up and prepare buffer blocks, descriptors 224044961713Sgirish * and mailbox. 224144961713Sgirish */ 2242678453a8Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 224344961713Sgirish "==> nxge_map_txdma_channel (channel %d)", channel)); 224444961713Sgirish /* 224544961713Sgirish * Transmit buffer blocks 224644961713Sgirish */ 224744961713Sgirish status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 224844961713Sgirish dma_buf_p, tx_desc_p, num_chunks); 224944961713Sgirish if (status != NXGE_OK) { 225044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 225144961713Sgirish "==> nxge_map_txdma_channel (channel %d): " 225244961713Sgirish "map buffer failed 0x%x", channel, status)); 225344961713Sgirish goto nxge_map_txdma_channel_exit; 225444961713Sgirish } 225544961713Sgirish 225644961713Sgirish /* 225744961713Sgirish * Transmit block ring, and mailbox. 225844961713Sgirish */ 225944961713Sgirish nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 226044961713Sgirish tx_mbox_p); 226144961713Sgirish 226244961713Sgirish goto nxge_map_txdma_channel_exit; 226344961713Sgirish 226444961713Sgirish nxge_map_txdma_channel_fail1: 2265678453a8Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 226644961713Sgirish "==> nxge_map_txdma_channel: unmap buf" 226744961713Sgirish "(status 0x%x channel %d)", 226844961713Sgirish status, channel)); 226944961713Sgirish nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 227044961713Sgirish 227144961713Sgirish nxge_map_txdma_channel_exit: 2272678453a8Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 227344961713Sgirish "<== nxge_map_txdma_channel: " 227444961713Sgirish "(status 0x%x channel %d)", 227544961713Sgirish status, channel)); 227644961713Sgirish 227744961713Sgirish return (status); 227844961713Sgirish } 227944961713Sgirish 228044961713Sgirish /*ARGSUSED*/ 228144961713Sgirish static void 2282678453a8Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 228344961713Sgirish { 2284678453a8Sspeer tx_ring_t *ring; 2285678453a8Sspeer tx_mbox_t *mailbox; 2286678453a8Sspeer 228744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 228844961713Sgirish "==> nxge_unmap_txdma_channel (channel %d)", channel)); 228944961713Sgirish /* 229044961713Sgirish * unmap tx block ring, and mailbox. 229144961713Sgirish */ 2292678453a8Sspeer ring = nxgep->tx_rings->rings[channel]; 2293678453a8Sspeer mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2294678453a8Sspeer 2295678453a8Sspeer (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 229644961713Sgirish 229744961713Sgirish /* unmap buffer blocks */ 2298678453a8Sspeer (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2299678453a8Sspeer 2300678453a8Sspeer nxge_free_txb(nxgep, channel); 230144961713Sgirish 230244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 230344961713Sgirish } 230444961713Sgirish 2305678453a8Sspeer /* 2306678453a8Sspeer * nxge_map_txdma_channel_cfg_ring 2307678453a8Sspeer * 2308678453a8Sspeer * Map a TDC into our kernel space. 2309678453a8Sspeer * This function allocates all of the per-channel data structures. 2310678453a8Sspeer * 2311678453a8Sspeer * Arguments: 2312678453a8Sspeer * nxgep 2313678453a8Sspeer * dma_channel The channel to map. 2314678453a8Sspeer * dma_cntl_p 2315678453a8Sspeer * tx_ring_p dma_channel's transmit ring 2316678453a8Sspeer * tx_mbox_p dma_channel's mailbox 2317678453a8Sspeer * 2318678453a8Sspeer * Notes: 2319678453a8Sspeer * 2320678453a8Sspeer * NPI/NXGE function calls: 2321678453a8Sspeer * nxge_setup_dma_common() 2322678453a8Sspeer * 2323678453a8Sspeer * Registers accessed: 2324678453a8Sspeer * none. 2325678453a8Sspeer * 2326678453a8Sspeer * Context: 2327678453a8Sspeer * Any domain 2328678453a8Sspeer */ 232944961713Sgirish /*ARGSUSED*/ 233044961713Sgirish static void 233144961713Sgirish nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 233244961713Sgirish p_nxge_dma_common_t *dma_cntl_p, 233344961713Sgirish p_tx_ring_t tx_ring_p, 233444961713Sgirish p_tx_mbox_t *tx_mbox_p) 233544961713Sgirish { 233644961713Sgirish p_tx_mbox_t mboxp; 233744961713Sgirish p_nxge_dma_common_t cntl_dmap; 233844961713Sgirish p_nxge_dma_common_t dmap; 233944961713Sgirish p_tx_rng_cfig_t tx_ring_cfig_p; 234044961713Sgirish p_tx_ring_kick_t tx_ring_kick_p; 234144961713Sgirish p_tx_cs_t tx_cs_p; 234244961713Sgirish p_tx_dma_ent_msk_t tx_evmask_p; 234344961713Sgirish p_txdma_mbh_t mboxh_p; 234444961713Sgirish p_txdma_mbl_t mboxl_p; 234544961713Sgirish uint64_t tx_desc_len; 234644961713Sgirish 234744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 234844961713Sgirish "==> nxge_map_txdma_channel_cfg_ring")); 234944961713Sgirish 235044961713Sgirish cntl_dmap = *dma_cntl_p; 235144961713Sgirish 235244961713Sgirish dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 235344961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 235444961713Sgirish sizeof (tx_desc_t)); 235544961713Sgirish /* 235644961713Sgirish * Zero out transmit ring descriptors. 235744961713Sgirish */ 235844961713Sgirish bzero((caddr_t)dmap->kaddrp, dmap->alength); 235944961713Sgirish tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 236044961713Sgirish tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 236144961713Sgirish tx_cs_p = &(tx_ring_p->tx_cs); 236244961713Sgirish tx_evmask_p = &(tx_ring_p->tx_evmask); 236344961713Sgirish tx_ring_cfig_p->value = 0; 236444961713Sgirish tx_ring_kick_p->value = 0; 236544961713Sgirish tx_cs_p->value = 0; 236644961713Sgirish tx_evmask_p->value = 0; 236744961713Sgirish 236844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 236944961713Sgirish "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 237044961713Sgirish dma_channel, 237144961713Sgirish dmap->dma_cookie.dmac_laddress)); 237244961713Sgirish 237344961713Sgirish tx_ring_cfig_p->value = 0; 237444961713Sgirish tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 237544961713Sgirish tx_ring_cfig_p->value = 237644961713Sgirish (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 237744961713Sgirish (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 237844961713Sgirish 237944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 238044961713Sgirish "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 238144961713Sgirish dma_channel, 238244961713Sgirish tx_ring_cfig_p->value)); 238344961713Sgirish 238444961713Sgirish tx_cs_p->bits.ldw.rst = 1; 238544961713Sgirish 238644961713Sgirish /* Map in mailbox */ 238744961713Sgirish mboxp = (p_tx_mbox_t) 238844961713Sgirish KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 238944961713Sgirish dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 239044961713Sgirish nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 239144961713Sgirish mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 239244961713Sgirish mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 239344961713Sgirish mboxh_p->value = mboxl_p->value = 0; 239444961713Sgirish 239544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 239644961713Sgirish "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 239744961713Sgirish dmap->dma_cookie.dmac_laddress)); 239844961713Sgirish 239944961713Sgirish mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 240044961713Sgirish TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 240144961713Sgirish 240244961713Sgirish mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 240344961713Sgirish TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 240444961713Sgirish 240544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 240644961713Sgirish "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 240744961713Sgirish dmap->dma_cookie.dmac_laddress)); 240844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 240944961713Sgirish "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 241044961713Sgirish "mbox $%p", 241144961713Sgirish mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 241244961713Sgirish tx_ring_p->page_valid.value = 0; 241344961713Sgirish tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 241444961713Sgirish tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 241544961713Sgirish tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 241644961713Sgirish tx_ring_p->page_hdl.value = 0; 241744961713Sgirish 241844961713Sgirish tx_ring_p->page_valid.bits.ldw.page0 = 1; 241944961713Sgirish tx_ring_p->page_valid.bits.ldw.page1 = 1; 242044961713Sgirish 242144961713Sgirish tx_ring_p->max_burst.value = 0; 242244961713Sgirish tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 242344961713Sgirish 242444961713Sgirish *tx_mbox_p = mboxp; 242544961713Sgirish 242644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 242744961713Sgirish "<== nxge_map_txdma_channel_cfg_ring")); 242844961713Sgirish } 242944961713Sgirish 243044961713Sgirish /*ARGSUSED*/ 243144961713Sgirish static void 243244961713Sgirish nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 243344961713Sgirish p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 243444961713Sgirish { 243544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 243644961713Sgirish "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 243744961713Sgirish tx_ring_p->tdc)); 243844961713Sgirish 243944961713Sgirish KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 244044961713Sgirish 244144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 244244961713Sgirish "<== nxge_unmap_txdma_channel_cfg_ring")); 244344961713Sgirish } 244444961713Sgirish 2445678453a8Sspeer /* 2446678453a8Sspeer * nxge_map_txdma_channel_buf_ring 2447678453a8Sspeer * 2448678453a8Sspeer * 2449678453a8Sspeer * Arguments: 2450678453a8Sspeer * nxgep 2451678453a8Sspeer * channel The channel to map. 2452678453a8Sspeer * dma_buf_p 2453678453a8Sspeer * tx_desc_p channel's descriptor ring 2454678453a8Sspeer * num_chunks 2455678453a8Sspeer * 2456678453a8Sspeer * Notes: 2457678453a8Sspeer * 2458678453a8Sspeer * NPI/NXGE function calls: 2459678453a8Sspeer * nxge_setup_dma_common() 2460678453a8Sspeer * 2461678453a8Sspeer * Registers accessed: 2462678453a8Sspeer * none. 2463678453a8Sspeer * 2464678453a8Sspeer * Context: 2465678453a8Sspeer * Any domain 2466678453a8Sspeer */ 246744961713Sgirish static nxge_status_t 246844961713Sgirish nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 246944961713Sgirish p_nxge_dma_common_t *dma_buf_p, 247044961713Sgirish p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 247144961713Sgirish { 247244961713Sgirish p_nxge_dma_common_t dma_bufp, tmp_bufp; 247344961713Sgirish p_nxge_dma_common_t dmap; 247444961713Sgirish nxge_os_dma_handle_t tx_buf_dma_handle; 247544961713Sgirish p_tx_ring_t tx_ring_p; 247644961713Sgirish p_tx_msg_t tx_msg_ring; 247744961713Sgirish nxge_status_t status = NXGE_OK; 247844961713Sgirish int ddi_status = DDI_SUCCESS; 247944961713Sgirish int i, j, index; 248044961713Sgirish uint32_t size, bsize; 248144961713Sgirish uint32_t nblocks, nmsgs; 248244961713Sgirish 248344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 248444961713Sgirish "==> nxge_map_txdma_channel_buf_ring")); 248544961713Sgirish 248644961713Sgirish dma_bufp = tmp_bufp = *dma_buf_p; 248744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 248844961713Sgirish " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 248944961713Sgirish "chunks bufp $%p", 249044961713Sgirish channel, num_chunks, dma_bufp)); 249144961713Sgirish 249244961713Sgirish nmsgs = 0; 249344961713Sgirish for (i = 0; i < num_chunks; i++, tmp_bufp++) { 249444961713Sgirish nmsgs += tmp_bufp->nblocks; 249544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 249644961713Sgirish "==> nxge_map_txdma_channel_buf_ring: channel %d " 249744961713Sgirish "bufp $%p nblocks %d nmsgs %d", 249844961713Sgirish channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 249944961713Sgirish } 250044961713Sgirish if (!nmsgs) { 250144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 250244961713Sgirish "<== nxge_map_txdma_channel_buf_ring: channel %d " 250344961713Sgirish "no msg blocks", 250444961713Sgirish channel)); 250544961713Sgirish status = NXGE_ERROR; 250644961713Sgirish goto nxge_map_txdma_channel_buf_ring_exit; 250744961713Sgirish } 250844961713Sgirish 250944961713Sgirish tx_ring_p = (p_tx_ring_t) 251044961713Sgirish KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 251144961713Sgirish MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 251244961713Sgirish (void *)nxgep->interrupt_cookie); 25131f8914d5Sml 25141f8914d5Sml tx_ring_p->nxgep = nxgep; 25151f8914d5Sml tx_ring_p->serial = nxge_serialize_create(nmsgs, 25161f8914d5Sml nxge_serial_tx, tx_ring_p); 251744961713Sgirish /* 251844961713Sgirish * Allocate transmit message rings and handles for packets 251944961713Sgirish * not to be copied to premapped buffers. 252044961713Sgirish */ 252144961713Sgirish size = nmsgs * sizeof (tx_msg_t); 252244961713Sgirish tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 252344961713Sgirish for (i = 0; i < nmsgs; i++) { 252444961713Sgirish ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 252544961713Sgirish DDI_DMA_DONTWAIT, 0, 252644961713Sgirish &tx_msg_ring[i].dma_handle); 252744961713Sgirish if (ddi_status != DDI_SUCCESS) { 252844961713Sgirish status |= NXGE_DDI_FAILED; 252944961713Sgirish break; 253044961713Sgirish } 253144961713Sgirish } 253244961713Sgirish if (i < nmsgs) { 253356d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 253456d930aeSspeer "Allocate handles failed.")); 253544961713Sgirish goto nxge_map_txdma_channel_buf_ring_fail1; 253644961713Sgirish } 253744961713Sgirish 253844961713Sgirish tx_ring_p->tdc = channel; 253944961713Sgirish tx_ring_p->tx_msg_ring = tx_msg_ring; 254044961713Sgirish tx_ring_p->tx_ring_size = nmsgs; 254144961713Sgirish tx_ring_p->num_chunks = num_chunks; 254244961713Sgirish if (!nxge_tx_intr_thres) { 254344961713Sgirish nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 254444961713Sgirish } 254544961713Sgirish tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 254644961713Sgirish tx_ring_p->rd_index = 0; 254744961713Sgirish tx_ring_p->wr_index = 0; 254844961713Sgirish tx_ring_p->ring_head.value = 0; 254944961713Sgirish tx_ring_p->ring_kick_tail.value = 0; 255044961713Sgirish tx_ring_p->descs_pending = 0; 255144961713Sgirish 255244961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 255344961713Sgirish "==> nxge_map_txdma_channel_buf_ring: channel %d " 255444961713Sgirish "actual tx desc max %d nmsgs %d " 255544961713Sgirish "(config nxge_tx_ring_size %d)", 255644961713Sgirish channel, tx_ring_p->tx_ring_size, nmsgs, 255744961713Sgirish nxge_tx_ring_size)); 255844961713Sgirish 255944961713Sgirish /* 256044961713Sgirish * Map in buffers from the buffer pool. 256144961713Sgirish */ 256244961713Sgirish index = 0; 256344961713Sgirish bsize = dma_bufp->block_size; 256444961713Sgirish 256544961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 256644961713Sgirish "dma_bufp $%p tx_rng_p $%p " 256744961713Sgirish "tx_msg_rng_p $%p bsize %d", 256844961713Sgirish dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 256944961713Sgirish 257044961713Sgirish tx_buf_dma_handle = dma_bufp->dma_handle; 257144961713Sgirish for (i = 0; i < num_chunks; i++, dma_bufp++) { 257244961713Sgirish bsize = dma_bufp->block_size; 257344961713Sgirish nblocks = dma_bufp->nblocks; 257444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 257544961713Sgirish "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 257644961713Sgirish "size %d dma_bufp $%p", 257744961713Sgirish i, sizeof (nxge_dma_common_t), dma_bufp)); 257844961713Sgirish 257944961713Sgirish for (j = 0; j < nblocks; j++) { 258044961713Sgirish tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 258144961713Sgirish dmap = &tx_msg_ring[index++].buf_dma; 258244961713Sgirish #ifdef TX_MEM_DEBUG 258344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 258444961713Sgirish "==> nxge_map_txdma_channel_buf_ring: j %d" 258544961713Sgirish "dmap $%p", i, dmap)); 258644961713Sgirish #endif 258744961713Sgirish nxge_setup_dma_common(dmap, dma_bufp, 1, 258844961713Sgirish bsize); 258944961713Sgirish } 259044961713Sgirish } 259144961713Sgirish 259244961713Sgirish if (i < num_chunks) { 259356d930aeSspeer status = NXGE_ERROR; 259444961713Sgirish goto nxge_map_txdma_channel_buf_ring_fail1; 259544961713Sgirish } 259644961713Sgirish 259744961713Sgirish *tx_desc_p = tx_ring_p; 259844961713Sgirish 259944961713Sgirish goto nxge_map_txdma_channel_buf_ring_exit; 260044961713Sgirish 260144961713Sgirish nxge_map_txdma_channel_buf_ring_fail1: 26021f8914d5Sml if (tx_ring_p->serial) { 26031f8914d5Sml nxge_serialize_destroy(tx_ring_p->serial); 26041f8914d5Sml tx_ring_p->serial = NULL; 26051f8914d5Sml } 26061f8914d5Sml 260744961713Sgirish index--; 260844961713Sgirish for (; index >= 0; index--) { 260956d930aeSspeer if (tx_msg_ring[index].dma_handle != NULL) { 261056d930aeSspeer ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 261144961713Sgirish } 261244961713Sgirish } 261344961713Sgirish MUTEX_DESTROY(&tx_ring_p->lock); 261456d930aeSspeer KMEM_FREE(tx_msg_ring, size); 261544961713Sgirish KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 261644961713Sgirish 261756d930aeSspeer status = NXGE_ERROR; 261856d930aeSspeer 261944961713Sgirish nxge_map_txdma_channel_buf_ring_exit: 262044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 262144961713Sgirish "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 262244961713Sgirish 262344961713Sgirish return (status); 262444961713Sgirish } 262544961713Sgirish 262644961713Sgirish /*ARGSUSED*/ 262744961713Sgirish static void 262844961713Sgirish nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 262944961713Sgirish { 263044961713Sgirish p_tx_msg_t tx_msg_ring; 263144961713Sgirish p_tx_msg_t tx_msg_p; 263244961713Sgirish int i; 263344961713Sgirish 263444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 263544961713Sgirish "==> nxge_unmap_txdma_channel_buf_ring")); 263644961713Sgirish if (tx_ring_p == NULL) { 263744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 263844961713Sgirish "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 263944961713Sgirish return; 264044961713Sgirish } 264144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 264244961713Sgirish "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 264344961713Sgirish tx_ring_p->tdc)); 264444961713Sgirish 264544961713Sgirish tx_msg_ring = tx_ring_p->tx_msg_ring; 2646678453a8Sspeer 2647678453a8Sspeer /* 2648678453a8Sspeer * Since the serialization thread, timer thread and 2649678453a8Sspeer * interrupt thread can all call the transmit reclaim, 2650678453a8Sspeer * the unmapping function needs to acquire the lock 2651678453a8Sspeer * to free those buffers which were transmitted 2652678453a8Sspeer * by the hardware already. 2653678453a8Sspeer */ 2654678453a8Sspeer MUTEX_ENTER(&tx_ring_p->lock); 2655678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 2656678453a8Sspeer "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2657678453a8Sspeer "channel %d", 2658678453a8Sspeer tx_ring_p->tdc)); 2659678453a8Sspeer (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2660678453a8Sspeer 266144961713Sgirish for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 266244961713Sgirish tx_msg_p = &tx_msg_ring[i]; 266344961713Sgirish if (tx_msg_p->tx_message != NULL) { 266444961713Sgirish freemsg(tx_msg_p->tx_message); 266544961713Sgirish tx_msg_p->tx_message = NULL; 266644961713Sgirish } 266744961713Sgirish } 266844961713Sgirish 266944961713Sgirish for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 267044961713Sgirish if (tx_msg_ring[i].dma_handle != NULL) { 267144961713Sgirish ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 267244961713Sgirish } 2673678453a8Sspeer tx_msg_ring[i].dma_handle = NULL; 267444961713Sgirish } 267544961713Sgirish 2676678453a8Sspeer MUTEX_EXIT(&tx_ring_p->lock); 2677678453a8Sspeer 26781f8914d5Sml if (tx_ring_p->serial) { 26791f8914d5Sml nxge_serialize_destroy(tx_ring_p->serial); 26801f8914d5Sml tx_ring_p->serial = NULL; 26811f8914d5Sml } 26821f8914d5Sml 268344961713Sgirish MUTEX_DESTROY(&tx_ring_p->lock); 268444961713Sgirish KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 268544961713Sgirish KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 268644961713Sgirish 268744961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 268844961713Sgirish "<== nxge_unmap_txdma_channel_buf_ring")); 268944961713Sgirish } 269044961713Sgirish 269144961713Sgirish static nxge_status_t 2692678453a8Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 269344961713Sgirish { 269444961713Sgirish p_tx_rings_t tx_rings; 269544961713Sgirish p_tx_ring_t *tx_desc_rings; 269644961713Sgirish p_tx_mbox_areas_t tx_mbox_areas_p; 269744961713Sgirish p_tx_mbox_t *tx_mbox_p; 269844961713Sgirish nxge_status_t status = NXGE_OK; 269944961713Sgirish 270044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 270144961713Sgirish 270244961713Sgirish tx_rings = nxgep->tx_rings; 270344961713Sgirish if (tx_rings == NULL) { 270444961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 270544961713Sgirish "<== nxge_txdma_hw_start: NULL ring pointer")); 270644961713Sgirish return (NXGE_ERROR); 270744961713Sgirish } 270844961713Sgirish tx_desc_rings = tx_rings->rings; 270944961713Sgirish if (tx_desc_rings == NULL) { 271044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 271144961713Sgirish "<== nxge_txdma_hw_start: NULL ring pointers")); 271244961713Sgirish return (NXGE_ERROR); 271344961713Sgirish } 271444961713Sgirish 2715678453a8Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2716678453a8Sspeer "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 271744961713Sgirish 271844961713Sgirish tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 271944961713Sgirish tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 272044961713Sgirish 2721678453a8Sspeer status = nxge_txdma_start_channel(nxgep, channel, 2722678453a8Sspeer (p_tx_ring_t)tx_desc_rings[channel], 2723678453a8Sspeer (p_tx_mbox_t)tx_mbox_p[channel]); 2724678453a8Sspeer if (status != NXGE_OK) { 2725678453a8Sspeer goto nxge_txdma_hw_start_fail1; 272644961713Sgirish } 272744961713Sgirish 272844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 272944961713Sgirish "tx_rings $%p rings $%p", 273044961713Sgirish nxgep->tx_rings, nxgep->tx_rings->rings)); 273144961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 273244961713Sgirish "tx_rings $%p tx_desc_rings $%p", 273344961713Sgirish nxgep->tx_rings, tx_desc_rings)); 273444961713Sgirish 273544961713Sgirish goto nxge_txdma_hw_start_exit; 273644961713Sgirish 273744961713Sgirish nxge_txdma_hw_start_fail1: 273844961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 273944961713Sgirish "==> nxge_txdma_hw_start: disable " 2740678453a8Sspeer "(status 0x%x channel %d)", status, channel)); 274144961713Sgirish 274244961713Sgirish nxge_txdma_hw_start_exit: 274344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 274444961713Sgirish "==> nxge_txdma_hw_start: (status 0x%x)", status)); 274544961713Sgirish 274644961713Sgirish return (status); 274744961713Sgirish } 274844961713Sgirish 2749678453a8Sspeer /* 2750678453a8Sspeer * nxge_txdma_start_channel 2751678453a8Sspeer * 2752678453a8Sspeer * Start a TDC. 2753678453a8Sspeer * 2754678453a8Sspeer * Arguments: 2755678453a8Sspeer * nxgep 2756678453a8Sspeer * channel The channel to start. 2757678453a8Sspeer * tx_ring_p channel's transmit descriptor ring. 2758678453a8Sspeer * tx_mbox_p channel' smailbox. 2759678453a8Sspeer * 2760678453a8Sspeer * Notes: 2761678453a8Sspeer * 2762678453a8Sspeer * NPI/NXGE function calls: 2763678453a8Sspeer * nxge_reset_txdma_channel() 2764678453a8Sspeer * nxge_init_txdma_channel_event_mask() 2765678453a8Sspeer * nxge_enable_txdma_channel() 2766678453a8Sspeer * 2767678453a8Sspeer * Registers accessed: 2768678453a8Sspeer * none directly (see functions above). 2769678453a8Sspeer * 2770678453a8Sspeer * Context: 2771678453a8Sspeer * Any domain 2772678453a8Sspeer */ 277344961713Sgirish static nxge_status_t 277444961713Sgirish nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 277544961713Sgirish p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 277644961713Sgirish 277744961713Sgirish { 277844961713Sgirish nxge_status_t status = NXGE_OK; 277944961713Sgirish 278044961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 278144961713Sgirish "==> nxge_txdma_start_channel (channel %d)", channel)); 278244961713Sgirish /* 278344961713Sgirish * TXDMA/TXC must be in stopped state. 278444961713Sgirish */ 278544961713Sgirish (void) nxge_txdma_stop_inj_err(nxgep, channel); 278644961713Sgirish 278744961713Sgirish /* 278844961713Sgirish * Reset TXDMA channel 278944961713Sgirish */ 279044961713Sgirish tx_ring_p->tx_cs.value = 0; 279144961713Sgirish tx_ring_p->tx_cs.bits.ldw.rst = 1; 279244961713Sgirish status = nxge_reset_txdma_channel(nxgep, channel, 279344961713Sgirish tx_ring_p->tx_cs.value); 279444961713Sgirish if (status != NXGE_OK) { 279544961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 279644961713Sgirish "==> nxge_txdma_start_channel (channel %d)" 279744961713Sgirish " reset channel failed 0x%x", channel, status)); 279844961713Sgirish goto nxge_txdma_start_channel_exit; 279944961713Sgirish } 280044961713Sgirish 280144961713Sgirish /* 280244961713Sgirish * Initialize the TXDMA channel specific FZC control 280344961713Sgirish * configurations. These FZC registers are pertaining 280444961713Sgirish * to each TX channel (i.e. logical pages). 280544961713Sgirish */ 2806678453a8Sspeer if (!isLDOMguest(nxgep)) { 2807678453a8Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 2808678453a8Sspeer tx_ring_p, tx_mbox_p); 2809678453a8Sspeer if (status != NXGE_OK) { 2810678453a8Sspeer goto nxge_txdma_start_channel_exit; 2811678453a8Sspeer } 281244961713Sgirish } 281344961713Sgirish 281444961713Sgirish /* 281544961713Sgirish * Initialize the event masks. 281644961713Sgirish */ 281744961713Sgirish tx_ring_p->tx_evmask.value = 0; 281844961713Sgirish status = nxge_init_txdma_channel_event_mask(nxgep, 2819678453a8Sspeer channel, &tx_ring_p->tx_evmask); 282044961713Sgirish if (status != NXGE_OK) { 282144961713Sgirish goto nxge_txdma_start_channel_exit; 282244961713Sgirish } 282344961713Sgirish 282444961713Sgirish /* 282544961713Sgirish * Load TXDMA descriptors, buffers, mailbox, 282644961713Sgirish * initialise the DMA channels and 282744961713Sgirish * enable each DMA channel. 282844961713Sgirish */ 282944961713Sgirish status = nxge_enable_txdma_channel(nxgep, channel, 283044961713Sgirish tx_ring_p, tx_mbox_p); 283144961713Sgirish if (status != NXGE_OK) { 283244961713Sgirish goto nxge_txdma_start_channel_exit; 283344961713Sgirish } 283444961713Sgirish 283544961713Sgirish nxge_txdma_start_channel_exit: 283644961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 283744961713Sgirish 283844961713Sgirish return (status); 283944961713Sgirish } 284044961713Sgirish 2841678453a8Sspeer /* 2842678453a8Sspeer * nxge_txdma_stop_channel 2843678453a8Sspeer * 2844678453a8Sspeer * Stop a TDC. 2845678453a8Sspeer * 2846678453a8Sspeer * Arguments: 2847678453a8Sspeer * nxgep 2848678453a8Sspeer * channel The channel to stop. 2849678453a8Sspeer * tx_ring_p channel's transmit descriptor ring. 2850678453a8Sspeer * tx_mbox_p channel' smailbox. 2851678453a8Sspeer * 2852678453a8Sspeer * Notes: 2853678453a8Sspeer * 2854678453a8Sspeer * NPI/NXGE function calls: 2855678453a8Sspeer * nxge_txdma_stop_inj_err() 2856678453a8Sspeer * nxge_reset_txdma_channel() 2857678453a8Sspeer * nxge_init_txdma_channel_event_mask() 2858678453a8Sspeer * nxge_init_txdma_channel_cntl_stat() 2859678453a8Sspeer * nxge_disable_txdma_channel() 2860678453a8Sspeer * 2861678453a8Sspeer * Registers accessed: 2862678453a8Sspeer * none directly (see functions above). 2863678453a8Sspeer * 2864678453a8Sspeer * Context: 2865678453a8Sspeer * Any domain 2866678453a8Sspeer */ 286744961713Sgirish /*ARGSUSED*/ 286844961713Sgirish static nxge_status_t 2869678453a8Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 287044961713Sgirish { 2871678453a8Sspeer p_tx_ring_t tx_ring_p; 2872678453a8Sspeer int status = NXGE_OK; 287344961713Sgirish 287444961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 287544961713Sgirish "==> nxge_txdma_stop_channel: channel %d", channel)); 287644961713Sgirish 287744961713Sgirish /* 287844961713Sgirish * Stop (disable) TXDMA and TXC (if stop bit is set 287944961713Sgirish * and STOP_N_GO bit not set, the TXDMA reset state will 288044961713Sgirish * not be set if reset TXDMA. 288144961713Sgirish */ 288244961713Sgirish (void) nxge_txdma_stop_inj_err(nxgep, channel); 288344961713Sgirish 2884678453a8Sspeer tx_ring_p = nxgep->tx_rings->rings[channel]; 2885678453a8Sspeer 288644961713Sgirish /* 288744961713Sgirish * Reset TXDMA channel 288844961713Sgirish */ 288944961713Sgirish tx_ring_p->tx_cs.value = 0; 289044961713Sgirish tx_ring_p->tx_cs.bits.ldw.rst = 1; 289144961713Sgirish status = nxge_reset_txdma_channel(nxgep, channel, 289244961713Sgirish tx_ring_p->tx_cs.value); 289344961713Sgirish if (status != NXGE_OK) { 289444961713Sgirish goto nxge_txdma_stop_channel_exit; 289544961713Sgirish } 289644961713Sgirish 289744961713Sgirish #ifdef HARDWARE_REQUIRED 289844961713Sgirish /* Set up the interrupt event masks. */ 289944961713Sgirish tx_ring_p->tx_evmask.value = 0; 290044961713Sgirish status = nxge_init_txdma_channel_event_mask(nxgep, 290144961713Sgirish channel, &tx_ring_p->tx_evmask); 290244961713Sgirish if (status != NXGE_OK) { 290344961713Sgirish goto nxge_txdma_stop_channel_exit; 290444961713Sgirish } 290544961713Sgirish 290644961713Sgirish /* Initialize the DMA control and status register */ 290744961713Sgirish tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 290844961713Sgirish status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 290944961713Sgirish tx_ring_p->tx_cs.value); 291044961713Sgirish if (status != NXGE_OK) { 291144961713Sgirish goto nxge_txdma_stop_channel_exit; 291244961713Sgirish } 291344961713Sgirish 2914678453a8Sspeer tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2915678453a8Sspeer 291644961713Sgirish /* Disable channel */ 291744961713Sgirish status = nxge_disable_txdma_channel(nxgep, channel, 2918678453a8Sspeer tx_ring_p, tx_mbox_p); 291944961713Sgirish if (status != NXGE_OK) { 292044961713Sgirish goto nxge_txdma_start_channel_exit; 292144961713Sgirish } 292244961713Sgirish 292344961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 292444961713Sgirish "==> nxge_txdma_stop_channel: event done")); 292544961713Sgirish 292644961713Sgirish #endif 292744961713Sgirish 292844961713Sgirish nxge_txdma_stop_channel_exit: 292944961713Sgirish NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 293044961713Sgirish return (status); 293144961713Sgirish } 293244961713Sgirish 2933678453a8Sspeer /* 2934678453a8Sspeer * nxge_txdma_get_ring 2935678453a8Sspeer * 2936678453a8Sspeer * Get the ring for a TDC. 2937678453a8Sspeer * 2938678453a8Sspeer * Arguments: 2939678453a8Sspeer * nxgep 2940678453a8Sspeer * channel 2941678453a8Sspeer * 2942678453a8Sspeer * Notes: 2943678453a8Sspeer * 2944678453a8Sspeer * NPI/NXGE function calls: 2945678453a8Sspeer * 2946678453a8Sspeer * Registers accessed: 2947678453a8Sspeer * 2948678453a8Sspeer * Context: 2949678453a8Sspeer * Any domain 2950678453a8Sspeer */ 295144961713Sgirish static p_tx_ring_t 295244961713Sgirish nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 295344961713Sgirish { 2954678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 2955678453a8Sspeer int tdc; 295644961713Sgirish 295744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 295844961713Sgirish 2959678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 296044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 2961678453a8Sspeer "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 2962678453a8Sspeer goto return_null; 2963678453a8Sspeer } 2964678453a8Sspeer 2965678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2966678453a8Sspeer if ((1 << tdc) & set->owned.map) { 2967678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2968678453a8Sspeer if (ring) { 2969678453a8Sspeer if (channel == ring->tdc) { 2970678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 2971678453a8Sspeer "<== nxge_txdma_get_ring: " 2972678453a8Sspeer "tdc %d ring $%p", tdc, ring)); 2973678453a8Sspeer return (ring); 2974678453a8Sspeer } 2975678453a8Sspeer } 297644961713Sgirish } 297744961713Sgirish } 297844961713Sgirish 2979678453a8Sspeer return_null: 2980678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 2981678453a8Sspeer "ring not found")); 2982678453a8Sspeer 298344961713Sgirish return (NULL); 298444961713Sgirish } 298544961713Sgirish 2986678453a8Sspeer /* 2987678453a8Sspeer * nxge_txdma_get_mbox 2988678453a8Sspeer * 2989678453a8Sspeer * Get the mailbox for a TDC. 2990678453a8Sspeer * 2991678453a8Sspeer * Arguments: 2992678453a8Sspeer * nxgep 2993678453a8Sspeer * channel 2994678453a8Sspeer * 2995678453a8Sspeer * Notes: 2996678453a8Sspeer * 2997678453a8Sspeer * NPI/NXGE function calls: 2998678453a8Sspeer * 2999678453a8Sspeer * Registers accessed: 3000678453a8Sspeer * 3001678453a8Sspeer * Context: 3002678453a8Sspeer * Any domain 3003678453a8Sspeer */ 300444961713Sgirish static p_tx_mbox_t 300544961713Sgirish nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 300644961713Sgirish { 3007678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 3008678453a8Sspeer int tdc; 300944961713Sgirish 301044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 301144961713Sgirish 3012678453a8Sspeer if (nxgep->tx_mbox_areas_p == 0 || 3013678453a8Sspeer nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3014678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3015678453a8Sspeer "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3016678453a8Sspeer goto return_null; 301744961713Sgirish } 301844961713Sgirish 3019678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3020678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3021678453a8Sspeer "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3022678453a8Sspeer goto return_null; 3023678453a8Sspeer } 3024678453a8Sspeer 3025678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3026678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3027678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3028678453a8Sspeer if (ring) { 3029678453a8Sspeer if (channel == ring->tdc) { 3030678453a8Sspeer tx_mbox_t *mailbox = nxgep-> 3031678453a8Sspeer tx_mbox_areas_p-> 3032678453a8Sspeer txmbox_areas_p[tdc]; 3033678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3034678453a8Sspeer "<== nxge_txdma_get_mbox: tdc %d " 3035678453a8Sspeer "ring $%p", tdc, mailbox)); 3036678453a8Sspeer return (mailbox); 3037678453a8Sspeer } 3038678453a8Sspeer } 303944961713Sgirish } 304044961713Sgirish } 304144961713Sgirish 3042678453a8Sspeer return_null: 3043678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3044678453a8Sspeer "mailbox not found")); 3045678453a8Sspeer 304644961713Sgirish return (NULL); 304744961713Sgirish } 304844961713Sgirish 3049678453a8Sspeer /* 3050678453a8Sspeer * nxge_tx_err_evnts 3051678453a8Sspeer * 3052678453a8Sspeer * Recover a TDC. 3053678453a8Sspeer * 3054678453a8Sspeer * Arguments: 3055678453a8Sspeer * nxgep 3056678453a8Sspeer * index The index to the TDC ring. 3057678453a8Sspeer * ldvp Used to get the channel number ONLY. 3058678453a8Sspeer * cs A copy of the bits from TX_CS. 3059678453a8Sspeer * 3060678453a8Sspeer * Notes: 3061678453a8Sspeer * Calling tree: 3062678453a8Sspeer * nxge_tx_intr() 3063678453a8Sspeer * 3064678453a8Sspeer * NPI/NXGE function calls: 3065678453a8Sspeer * npi_txdma_ring_error_get() 3066678453a8Sspeer * npi_txdma_inj_par_error_get() 3067678453a8Sspeer * nxge_txdma_fatal_err_recover() 3068678453a8Sspeer * 3069678453a8Sspeer * Registers accessed: 3070678453a8Sspeer * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3071678453a8Sspeer * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3072678453a8Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3073678453a8Sspeer * 3074678453a8Sspeer * Context: 3075678453a8Sspeer * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3076678453a8Sspeer */ 307744961713Sgirish /*ARGSUSED*/ 307844961713Sgirish static nxge_status_t 307944961713Sgirish nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 308044961713Sgirish { 308144961713Sgirish npi_handle_t handle; 308244961713Sgirish npi_status_t rs; 308344961713Sgirish uint8_t channel; 308444961713Sgirish p_tx_ring_t *tx_rings; 308544961713Sgirish p_tx_ring_t tx_ring_p; 308644961713Sgirish p_nxge_tx_ring_stats_t tdc_stats; 308744961713Sgirish boolean_t txchan_fatal = B_FALSE; 308844961713Sgirish nxge_status_t status = NXGE_OK; 308944961713Sgirish tdmc_inj_par_err_t par_err; 309044961713Sgirish uint32_t value; 309144961713Sgirish 3092678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 309344961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 309444961713Sgirish channel = ldvp->channel; 309544961713Sgirish 309644961713Sgirish tx_rings = nxgep->tx_rings->rings; 309744961713Sgirish tx_ring_p = tx_rings[index]; 309844961713Sgirish tdc_stats = tx_ring_p->tdc_stats; 309944961713Sgirish if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 310044961713Sgirish (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 310144961713Sgirish (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 310244961713Sgirish if ((rs = npi_txdma_ring_error_get(handle, channel, 310344961713Sgirish &tdc_stats->errlog)) != NPI_SUCCESS) 310444961713Sgirish return (NXGE_ERROR | rs); 310544961713Sgirish } 310644961713Sgirish 310744961713Sgirish if (cs.bits.ldw.mbox_err) { 310844961713Sgirish tdc_stats->mbox_err++; 310944961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 311044961713Sgirish NXGE_FM_EREPORT_TDMC_MBOX_ERR); 311144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 311244961713Sgirish "==> nxge_tx_err_evnts(channel %d): " 311344961713Sgirish "fatal error: mailbox", channel)); 311444961713Sgirish txchan_fatal = B_TRUE; 311544961713Sgirish } 311644961713Sgirish if (cs.bits.ldw.pkt_size_err) { 311744961713Sgirish tdc_stats->pkt_size_err++; 311844961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 311944961713Sgirish NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 312044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 312144961713Sgirish "==> nxge_tx_err_evnts(channel %d): " 312244961713Sgirish "fatal error: pkt_size_err", channel)); 312344961713Sgirish txchan_fatal = B_TRUE; 312444961713Sgirish } 312544961713Sgirish if (cs.bits.ldw.tx_ring_oflow) { 312644961713Sgirish tdc_stats->tx_ring_oflow++; 312744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 312844961713Sgirish NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 312944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 313044961713Sgirish "==> nxge_tx_err_evnts(channel %d): " 313144961713Sgirish "fatal error: tx_ring_oflow", channel)); 313244961713Sgirish txchan_fatal = B_TRUE; 313344961713Sgirish } 313444961713Sgirish if (cs.bits.ldw.pref_buf_par_err) { 313544961713Sgirish tdc_stats->pre_buf_par_err++; 313644961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 313744961713Sgirish NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 313844961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 313944961713Sgirish "==> nxge_tx_err_evnts(channel %d): " 314044961713Sgirish "fatal error: pre_buf_par_err", channel)); 314144961713Sgirish /* Clear error injection source for parity error */ 314244961713Sgirish (void) npi_txdma_inj_par_error_get(handle, &value); 314344961713Sgirish par_err.value = value; 314444961713Sgirish par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 314544961713Sgirish (void) npi_txdma_inj_par_error_set(handle, par_err.value); 314644961713Sgirish txchan_fatal = B_TRUE; 314744961713Sgirish } 314844961713Sgirish if (cs.bits.ldw.nack_pref) { 314944961713Sgirish tdc_stats->nack_pref++; 315044961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 315144961713Sgirish NXGE_FM_EREPORT_TDMC_NACK_PREF); 315244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 315344961713Sgirish "==> nxge_tx_err_evnts(channel %d): " 315444961713Sgirish "fatal error: nack_pref", channel)); 315544961713Sgirish txchan_fatal = B_TRUE; 315644961713Sgirish } 315744961713Sgirish if (cs.bits.ldw.nack_pkt_rd) { 315844961713Sgirish tdc_stats->nack_pkt_rd++; 315944961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 316044961713Sgirish NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 316144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 316244961713Sgirish "==> nxge_tx_err_evnts(channel %d): " 316344961713Sgirish "fatal error: nack_pkt_rd", channel)); 316444961713Sgirish txchan_fatal = B_TRUE; 316544961713Sgirish } 316644961713Sgirish if (cs.bits.ldw.conf_part_err) { 316744961713Sgirish tdc_stats->conf_part_err++; 316844961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 316944961713Sgirish NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 317044961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 317144961713Sgirish "==> nxge_tx_err_evnts(channel %d): " 317244961713Sgirish "fatal error: config_partition_err", channel)); 317344961713Sgirish txchan_fatal = B_TRUE; 317444961713Sgirish } 317544961713Sgirish if (cs.bits.ldw.pkt_prt_err) { 317644961713Sgirish tdc_stats->pkt_part_err++; 317744961713Sgirish NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 317844961713Sgirish NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 317944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 318044961713Sgirish "==> nxge_tx_err_evnts(channel %d): " 318144961713Sgirish "fatal error: pkt_prt_err", channel)); 318244961713Sgirish txchan_fatal = B_TRUE; 318344961713Sgirish } 318444961713Sgirish 318544961713Sgirish /* Clear error injection source in case this is an injected error */ 318644961713Sgirish TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 318744961713Sgirish 318844961713Sgirish if (txchan_fatal) { 318944961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 319044961713Sgirish " nxge_tx_err_evnts: " 319144961713Sgirish " fatal error on channel %d cs 0x%llx\n", 319244961713Sgirish channel, cs.value)); 319344961713Sgirish status = nxge_txdma_fatal_err_recover(nxgep, channel, 319444961713Sgirish tx_ring_p); 319544961713Sgirish if (status == NXGE_OK) { 319644961713Sgirish FM_SERVICE_RESTORED(nxgep); 319744961713Sgirish } 319844961713Sgirish } 319944961713Sgirish 3200678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 320144961713Sgirish 320244961713Sgirish return (status); 320344961713Sgirish } 320444961713Sgirish 320544961713Sgirish static nxge_status_t 3206678453a8Sspeer nxge_txdma_fatal_err_recover( 3207678453a8Sspeer p_nxge_t nxgep, 3208678453a8Sspeer uint16_t channel, 3209678453a8Sspeer p_tx_ring_t tx_ring_p) 321044961713Sgirish { 321144961713Sgirish npi_handle_t handle; 321244961713Sgirish npi_status_t rs = NPI_SUCCESS; 321344961713Sgirish p_tx_mbox_t tx_mbox_p; 321444961713Sgirish nxge_status_t status = NXGE_OK; 321544961713Sgirish 321644961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 321744961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 321844961713Sgirish "Recovering from TxDMAChannel#%d error...", channel)); 321944961713Sgirish 322044961713Sgirish /* 322144961713Sgirish * Stop the dma channel waits for the stop done. 322244961713Sgirish * If the stop done bit is not set, then create 322344961713Sgirish * an error. 322444961713Sgirish */ 322544961713Sgirish 322644961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 322744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 322844961713Sgirish MUTEX_ENTER(&tx_ring_p->lock); 322944961713Sgirish rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 323044961713Sgirish if (rs != NPI_SUCCESS) { 323144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 323244961713Sgirish "==> nxge_txdma_fatal_err_recover (channel %d): " 323344961713Sgirish "stop failed ", channel)); 323444961713Sgirish goto fail; 323544961713Sgirish } 323644961713Sgirish 323744961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 323844961713Sgirish (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 323944961713Sgirish 324044961713Sgirish /* 324144961713Sgirish * Reset TXDMA channel 324244961713Sgirish */ 324344961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 324444961713Sgirish if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 324544961713Sgirish NPI_SUCCESS) { 324644961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 324744961713Sgirish "==> nxge_txdma_fatal_err_recover (channel %d)" 324844961713Sgirish " reset channel failed 0x%x", channel, rs)); 324944961713Sgirish goto fail; 325044961713Sgirish } 325144961713Sgirish 325244961713Sgirish /* 325344961713Sgirish * Reset the tail (kick) register to 0. 325444961713Sgirish * (Hardware will not reset it. Tx overflow fatal 325544961713Sgirish * error if tail is not set to 0 after reset! 325644961713Sgirish */ 325744961713Sgirish TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 325844961713Sgirish 325944961713Sgirish /* Restart TXDMA channel */ 326044961713Sgirish 3261678453a8Sspeer if (!isLDOMguest(nxgep)) { 3262678453a8Sspeer tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 326344961713Sgirish 3264678453a8Sspeer // XXX This is a problem in HIO! 3265678453a8Sspeer /* 3266678453a8Sspeer * Initialize the TXDMA channel specific FZC control 3267678453a8Sspeer * configurations. These FZC registers are pertaining 3268678453a8Sspeer * to each TX channel (i.e. logical pages). 3269678453a8Sspeer */ 3270678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3271678453a8Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 3272678453a8Sspeer tx_ring_p, tx_mbox_p); 3273678453a8Sspeer if (status != NXGE_OK) 3274678453a8Sspeer goto fail; 3275678453a8Sspeer } 327644961713Sgirish 327744961713Sgirish /* 327844961713Sgirish * Initialize the event masks. 327944961713Sgirish */ 328044961713Sgirish tx_ring_p->tx_evmask.value = 0; 328144961713Sgirish status = nxge_init_txdma_channel_event_mask(nxgep, channel, 328244961713Sgirish &tx_ring_p->tx_evmask); 328344961713Sgirish if (status != NXGE_OK) 328444961713Sgirish goto fail; 328544961713Sgirish 328644961713Sgirish tx_ring_p->wr_index_wrap = B_FALSE; 328744961713Sgirish tx_ring_p->wr_index = 0; 328844961713Sgirish tx_ring_p->rd_index = 0; 328944961713Sgirish 329044961713Sgirish /* 329144961713Sgirish * Load TXDMA descriptors, buffers, mailbox, 329244961713Sgirish * initialise the DMA channels and 329344961713Sgirish * enable each DMA channel. 329444961713Sgirish */ 329544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 329644961713Sgirish status = nxge_enable_txdma_channel(nxgep, channel, 329744961713Sgirish tx_ring_p, tx_mbox_p); 329844961713Sgirish MUTEX_EXIT(&tx_ring_p->lock); 329944961713Sgirish if (status != NXGE_OK) 330044961713Sgirish goto fail; 330144961713Sgirish 330244961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 330344961713Sgirish "Recovery Successful, TxDMAChannel#%d Restored", 330444961713Sgirish channel)); 330544961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 330644961713Sgirish 330744961713Sgirish return (NXGE_OK); 330844961713Sgirish 330944961713Sgirish fail: 331044961713Sgirish MUTEX_EXIT(&tx_ring_p->lock); 331144961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, 331244961713Sgirish "nxge_txdma_fatal_err_recover (channel %d): " 331344961713Sgirish "failed to recover this txdma channel", channel)); 331444961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 331544961713Sgirish 331644961713Sgirish return (status); 331744961713Sgirish } 331844961713Sgirish 3319678453a8Sspeer /* 3320678453a8Sspeer * nxge_tx_port_fatal_err_recover 3321678453a8Sspeer * 3322678453a8Sspeer * Attempt to recover from a fatal port error. 3323678453a8Sspeer * 3324678453a8Sspeer * Arguments: 3325678453a8Sspeer * nxgep 3326678453a8Sspeer * 3327678453a8Sspeer * Notes: 3328678453a8Sspeer * How would a guest do this? 3329678453a8Sspeer * 3330678453a8Sspeer * NPI/NXGE function calls: 3331678453a8Sspeer * 3332678453a8Sspeer * Registers accessed: 3333678453a8Sspeer * 3334678453a8Sspeer * Context: 3335678453a8Sspeer * Service domain 3336678453a8Sspeer */ 333744961713Sgirish nxge_status_t 333844961713Sgirish nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 333944961713Sgirish { 3340678453a8Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 3341678453a8Sspeer nxge_channel_t tdc; 3342678453a8Sspeer 3343678453a8Sspeer tx_ring_t *ring; 3344678453a8Sspeer tx_mbox_t *mailbox; 3345678453a8Sspeer 334644961713Sgirish npi_handle_t handle; 3347678453a8Sspeer nxge_status_t status; 3348678453a8Sspeer npi_status_t rs; 334944961713Sgirish 335044961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 335144961713Sgirish NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3352678453a8Sspeer "Recovering from TxPort error...")); 335344961713Sgirish 3354678453a8Sspeer if (isLDOMguest(nxgep)) { 3355678453a8Sspeer return (NXGE_OK); 335644961713Sgirish } 335744961713Sgirish 3358678453a8Sspeer if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3359678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3360678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: not initialized")); 3361678453a8Sspeer return (NXGE_ERROR); 336244961713Sgirish } 336344961713Sgirish 3364678453a8Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3365678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3366678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 3367678453a8Sspeer "NULL ring pointer(s)")); 3368678453a8Sspeer return (NXGE_ERROR); 3369678453a8Sspeer } 337044961713Sgirish 3371678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3372678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3373678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3374678453a8Sspeer if (ring) 3375678453a8Sspeer MUTEX_ENTER(&ring->lock); 337644961713Sgirish } 337744961713Sgirish } 337844961713Sgirish 3379678453a8Sspeer handle = NXGE_DEV_NPI_HANDLE(nxgep); 3380678453a8Sspeer 338144961713Sgirish /* 3382678453a8Sspeer * Stop all the TDCs owned by us. 3383678453a8Sspeer * (The shared TDCs will have been stopped by their owners.) 338444961713Sgirish */ 3385678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3386678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3387678453a8Sspeer ring = nxgep->tx_rings->rings[tdc]; 3388678453a8Sspeer if (ring) { 3389678453a8Sspeer rs = npi_txdma_channel_control 3390678453a8Sspeer (handle, TXDMA_STOP, tdc); 3391678453a8Sspeer if (rs != NPI_SUCCESS) { 3392678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3393678453a8Sspeer "nxge_tx_port_fatal_err_recover " 3394678453a8Sspeer "(channel %d): stop failed ", tdc)); 3395678453a8Sspeer goto fail; 3396678453a8Sspeer } 3397678453a8Sspeer } 339844961713Sgirish } 3399678453a8Sspeer } 340044961713Sgirish 3401678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 340244961713Sgirish 3403678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3404678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3405678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3406678453a8Sspeer if (ring) 3407678453a8Sspeer (void) nxge_txdma_reclaim(nxgep, ring, 0); 3408678453a8Sspeer } 340944961713Sgirish } 341044961713Sgirish 341144961713Sgirish /* 3412678453a8Sspeer * Reset all the TDCs. 341344961713Sgirish */ 3414678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3415678453a8Sspeer 3416678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3417678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3418678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3419678453a8Sspeer if (ring) { 3420678453a8Sspeer if ((rs = npi_txdma_channel_control 3421678453a8Sspeer (handle, TXDMA_RESET, tdc)) 3422678453a8Sspeer != NPI_SUCCESS) { 3423678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3424678453a8Sspeer "nxge_tx_port_fatal_err_recover " 3425678453a8Sspeer "(channel %d) reset channel " 3426678453a8Sspeer "failed 0x%x", tdc, rs)); 3427678453a8Sspeer goto fail; 3428678453a8Sspeer } 3429678453a8Sspeer } 3430678453a8Sspeer /* 3431678453a8Sspeer * Reset the tail (kick) register to 0. 3432678453a8Sspeer * (Hardware will not reset it. Tx overflow fatal 3433678453a8Sspeer * error if tail is not set to 0 after reset! 3434678453a8Sspeer */ 3435678453a8Sspeer TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 343644961713Sgirish } 3437678453a8Sspeer } 343844961713Sgirish 3439678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3440678453a8Sspeer 3441678453a8Sspeer /* Restart all the TDCs */ 3442678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3443678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3444678453a8Sspeer ring = nxgep->tx_rings->rings[tdc]; 3445678453a8Sspeer if (ring) { 3446678453a8Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3447678453a8Sspeer status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3448678453a8Sspeer ring, mailbox); 3449678453a8Sspeer ring->tx_evmask.value = 0; 3450678453a8Sspeer /* 3451678453a8Sspeer * Initialize the event masks. 3452678453a8Sspeer */ 3453678453a8Sspeer status = nxge_init_txdma_channel_event_mask 3454678453a8Sspeer (nxgep, tdc, &ring->tx_evmask); 3455678453a8Sspeer 3456678453a8Sspeer ring->wr_index_wrap = B_FALSE; 3457678453a8Sspeer ring->wr_index = 0; 3458678453a8Sspeer ring->rd_index = 0; 3459678453a8Sspeer 3460678453a8Sspeer if (status != NXGE_OK) 3461678453a8Sspeer goto fail; 3462678453a8Sspeer if (status != NXGE_OK) 3463678453a8Sspeer goto fail; 3464678453a8Sspeer } 3465678453a8Sspeer } 346644961713Sgirish } 346744961713Sgirish 3468678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 346944961713Sgirish 3470678453a8Sspeer /* Re-enable all the TDCs */ 3471678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3472678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3473678453a8Sspeer ring = nxgep->tx_rings->rings[tdc]; 3474678453a8Sspeer if (ring) { 3475678453a8Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3476678453a8Sspeer status = nxge_enable_txdma_channel(nxgep, tdc, 3477678453a8Sspeer ring, mailbox); 3478678453a8Sspeer if (status != NXGE_OK) 3479678453a8Sspeer goto fail; 3480678453a8Sspeer } 348144961713Sgirish } 348244961713Sgirish } 348344961713Sgirish 3484678453a8Sspeer /* 3485678453a8Sspeer * Unlock all the TDCs. 3486678453a8Sspeer */ 3487678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3488678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3489678453a8Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3490678453a8Sspeer if (ring) 3491678453a8Sspeer MUTEX_EXIT(&ring->lock); 349244961713Sgirish } 349344961713Sgirish } 349444961713Sgirish 3495678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 349644961713Sgirish NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 349744961713Sgirish 349844961713Sgirish return (NXGE_OK); 349944961713Sgirish 350044961713Sgirish fail: 3501678453a8Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3502678453a8Sspeer if ((1 << tdc) & set->owned.map) { 3503678453a8Sspeer ring = nxgep->tx_rings->rings[tdc]; 3504678453a8Sspeer if (ring) 3505678453a8Sspeer MUTEX_EXIT(&ring->lock); 350644961713Sgirish } 350744961713Sgirish } 350844961713Sgirish 3509678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3510678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 351144961713Sgirish 351244961713Sgirish return (status); 351344961713Sgirish } 351444961713Sgirish 3515678453a8Sspeer /* 3516678453a8Sspeer * nxge_txdma_inject_err 3517678453a8Sspeer * 3518678453a8Sspeer * Inject an error into a TDC. 3519678453a8Sspeer * 3520678453a8Sspeer * Arguments: 3521678453a8Sspeer * nxgep 3522678453a8Sspeer * err_id The error to inject. 3523678453a8Sspeer * chan The channel to inject into. 3524678453a8Sspeer * 3525678453a8Sspeer * Notes: 3526678453a8Sspeer * This is called from nxge_main.c:nxge_err_inject() 3527678453a8Sspeer * Has this ioctl ever been used? 3528678453a8Sspeer * 3529678453a8Sspeer * NPI/NXGE function calls: 3530678453a8Sspeer * npi_txdma_inj_par_error_get() 3531678453a8Sspeer * npi_txdma_inj_par_error_set() 3532678453a8Sspeer * 3533678453a8Sspeer * Registers accessed: 3534678453a8Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3535678453a8Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3536678453a8Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3537678453a8Sspeer * 3538678453a8Sspeer * Context: 3539678453a8Sspeer * Service domain 3540678453a8Sspeer */ 354144961713Sgirish void 354244961713Sgirish nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 354344961713Sgirish { 354444961713Sgirish tdmc_intr_dbg_t tdi; 354544961713Sgirish tdmc_inj_par_err_t par_err; 354644961713Sgirish uint32_t value; 354744961713Sgirish npi_handle_t handle; 354844961713Sgirish 354944961713Sgirish switch (err_id) { 355044961713Sgirish 355144961713Sgirish case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 355244961713Sgirish handle = NXGE_DEV_NPI_HANDLE(nxgep); 355344961713Sgirish /* Clear error injection source for parity error */ 355444961713Sgirish (void) npi_txdma_inj_par_error_get(handle, &value); 355544961713Sgirish par_err.value = value; 355644961713Sgirish par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 355744961713Sgirish (void) npi_txdma_inj_par_error_set(handle, par_err.value); 355844961713Sgirish 355944961713Sgirish par_err.bits.ldw.inject_parity_error = (1 << chan); 356044961713Sgirish (void) npi_txdma_inj_par_error_get(handle, &value); 356144961713Sgirish par_err.value = value; 356244961713Sgirish par_err.bits.ldw.inject_parity_error |= (1 << chan); 356344961713Sgirish cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 356444961713Sgirish (unsigned long long)par_err.value); 356544961713Sgirish (void) npi_txdma_inj_par_error_set(handle, par_err.value); 356644961713Sgirish break; 356744961713Sgirish 356844961713Sgirish case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 356944961713Sgirish case NXGE_FM_EREPORT_TDMC_NACK_PREF: 357044961713Sgirish case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 357144961713Sgirish case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 357244961713Sgirish case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 357344961713Sgirish case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 357444961713Sgirish case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 357544961713Sgirish TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 357644961713Sgirish chan, &tdi.value); 357744961713Sgirish if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 357844961713Sgirish tdi.bits.ldw.pref_buf_par_err = 1; 357944961713Sgirish else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 358044961713Sgirish tdi.bits.ldw.mbox_err = 1; 358144961713Sgirish else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 358244961713Sgirish tdi.bits.ldw.nack_pref = 1; 358344961713Sgirish else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 358444961713Sgirish tdi.bits.ldw.nack_pkt_rd = 1; 358544961713Sgirish else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 358644961713Sgirish tdi.bits.ldw.pkt_size_err = 1; 358744961713Sgirish else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 358844961713Sgirish tdi.bits.ldw.tx_ring_oflow = 1; 358944961713Sgirish else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 359044961713Sgirish tdi.bits.ldw.conf_part_err = 1; 359144961713Sgirish else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 359244961713Sgirish tdi.bits.ldw.pkt_part_err = 1; 3593adfcba55Sjoycey #if defined(__i386) 3594adfcba55Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3595adfcba55Sjoycey tdi.value); 3596adfcba55Sjoycey #else 359744961713Sgirish cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 359844961713Sgirish tdi.value); 3599adfcba55Sjoycey #endif 360044961713Sgirish TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 360144961713Sgirish chan, tdi.value); 360244961713Sgirish 360344961713Sgirish break; 360444961713Sgirish } 360544961713Sgirish } 3606