1*dc0cb1cdSDale Ghent /****************************************************************************** 2*dc0cb1cdSDale Ghent 3*dc0cb1cdSDale Ghent Copyright (c) 2001-2015, Intel Corporation 4*dc0cb1cdSDale Ghent All rights reserved. 5*dc0cb1cdSDale Ghent 6*dc0cb1cdSDale Ghent Redistribution and use in source and binary forms, with or without 7*dc0cb1cdSDale Ghent modification, are permitted provided that the following conditions are met: 8*dc0cb1cdSDale Ghent 9*dc0cb1cdSDale Ghent 1. Redistributions of source code must retain the above copyright notice, 10*dc0cb1cdSDale Ghent this list of conditions and the following disclaimer. 11*dc0cb1cdSDale Ghent 12*dc0cb1cdSDale Ghent 2. Redistributions in binary form must reproduce the above copyright 13*dc0cb1cdSDale Ghent notice, this list of conditions and the following disclaimer in the 14*dc0cb1cdSDale Ghent documentation and/or other materials provided with the distribution. 15*dc0cb1cdSDale Ghent 16*dc0cb1cdSDale Ghent 3. Neither the name of the Intel Corporation nor the names of its 17*dc0cb1cdSDale Ghent contributors may be used to endorse or promote products derived from 18*dc0cb1cdSDale Ghent this software without specific prior written permission. 19*dc0cb1cdSDale Ghent 20*dc0cb1cdSDale Ghent THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21*dc0cb1cdSDale Ghent AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22*dc0cb1cdSDale Ghent IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23*dc0cb1cdSDale Ghent ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24*dc0cb1cdSDale Ghent LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25*dc0cb1cdSDale Ghent CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26*dc0cb1cdSDale Ghent SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27*dc0cb1cdSDale Ghent INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28*dc0cb1cdSDale Ghent CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29*dc0cb1cdSDale Ghent ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30*dc0cb1cdSDale Ghent POSSIBILITY OF SUCH DAMAGE. 31*dc0cb1cdSDale Ghent 32*dc0cb1cdSDale Ghent ******************************************************************************/ 33*dc0cb1cdSDale Ghent /*$FreeBSD$*/ 34*dc0cb1cdSDale Ghent 35*dc0cb1cdSDale Ghent 36*dc0cb1cdSDale Ghent #include "ixgbe_type.h" 37*dc0cb1cdSDale Ghent #include "ixgbe_dcb.h" 38*dc0cb1cdSDale Ghent #include "ixgbe_dcb_82599.h" 39*dc0cb1cdSDale Ghent 40*dc0cb1cdSDale Ghent /** 41*dc0cb1cdSDale Ghent * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class 42*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 43*dc0cb1cdSDale Ghent * @stats: pointer to statistics structure 44*dc0cb1cdSDale Ghent * @tc_count: Number of elements in bwg_array. 45*dc0cb1cdSDale Ghent * 46*dc0cb1cdSDale Ghent * This function returns the status data for each of the Traffic Classes in use. 47*dc0cb1cdSDale Ghent */ 48*dc0cb1cdSDale Ghent s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, 49*dc0cb1cdSDale Ghent struct ixgbe_hw_stats *stats, 50*dc0cb1cdSDale Ghent u8 tc_count) 51*dc0cb1cdSDale Ghent { 52*dc0cb1cdSDale Ghent int tc; 53*dc0cb1cdSDale Ghent 54*dc0cb1cdSDale Ghent DEBUGFUNC("dcb_get_tc_stats"); 55*dc0cb1cdSDale Ghent 56*dc0cb1cdSDale Ghent if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) 57*dc0cb1cdSDale Ghent return IXGBE_ERR_PARAM; 58*dc0cb1cdSDale Ghent 59*dc0cb1cdSDale Ghent /* Statistics pertaining to each traffic class */ 60*dc0cb1cdSDale Ghent for (tc = 0; tc < tc_count; tc++) { 61*dc0cb1cdSDale Ghent /* Transmitted Packets */ 62*dc0cb1cdSDale Ghent stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); 63*dc0cb1cdSDale Ghent /* Transmitted Bytes (read low first to prevent missed carry) */ 64*dc0cb1cdSDale Ghent stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); 65*dc0cb1cdSDale Ghent stats->qbtc[tc] += 66*dc0cb1cdSDale Ghent (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); 67*dc0cb1cdSDale Ghent /* Received Packets */ 68*dc0cb1cdSDale Ghent stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); 69*dc0cb1cdSDale Ghent /* Received Bytes (read low first to prevent missed carry) */ 70*dc0cb1cdSDale Ghent stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); 71*dc0cb1cdSDale Ghent stats->qbrc[tc] += 72*dc0cb1cdSDale Ghent (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); 73*dc0cb1cdSDale Ghent 74*dc0cb1cdSDale Ghent /* Received Dropped Packet */ 75*dc0cb1cdSDale Ghent stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); 76*dc0cb1cdSDale Ghent } 77*dc0cb1cdSDale Ghent 78*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 79*dc0cb1cdSDale Ghent } 80*dc0cb1cdSDale Ghent 81*dc0cb1cdSDale Ghent /** 82*dc0cb1cdSDale Ghent * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data 83*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 84*dc0cb1cdSDale Ghent * @stats: pointer to statistics structure 85*dc0cb1cdSDale Ghent * @tc_count: Number of elements in bwg_array. 86*dc0cb1cdSDale Ghent * 87*dc0cb1cdSDale Ghent * This function returns the CBFC status data for each of the Traffic Classes. 88*dc0cb1cdSDale Ghent */ 89*dc0cb1cdSDale Ghent s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, 90*dc0cb1cdSDale Ghent struct ixgbe_hw_stats *stats, 91*dc0cb1cdSDale Ghent u8 tc_count) 92*dc0cb1cdSDale Ghent { 93*dc0cb1cdSDale Ghent int tc; 94*dc0cb1cdSDale Ghent 95*dc0cb1cdSDale Ghent DEBUGFUNC("dcb_get_pfc_stats"); 96*dc0cb1cdSDale Ghent 97*dc0cb1cdSDale Ghent if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) 98*dc0cb1cdSDale Ghent return IXGBE_ERR_PARAM; 99*dc0cb1cdSDale Ghent 100*dc0cb1cdSDale Ghent for (tc = 0; tc < tc_count; tc++) { 101*dc0cb1cdSDale Ghent /* Priority XOFF Transmitted */ 102*dc0cb1cdSDale Ghent stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); 103*dc0cb1cdSDale Ghent /* Priority XOFF Received */ 104*dc0cb1cdSDale Ghent stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); 105*dc0cb1cdSDale Ghent } 106*dc0cb1cdSDale Ghent 107*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 108*dc0cb1cdSDale Ghent } 109*dc0cb1cdSDale Ghent 110*dc0cb1cdSDale Ghent /** 111*dc0cb1cdSDale Ghent * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter 112*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 113*dc0cb1cdSDale Ghent * @dcb_config: pointer to ixgbe_dcb_config structure 114*dc0cb1cdSDale Ghent * 115*dc0cb1cdSDale Ghent * Configure Rx Packet Arbiter and credits for each traffic class. 116*dc0cb1cdSDale Ghent */ 117*dc0cb1cdSDale Ghent s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, 118*dc0cb1cdSDale Ghent u16 *max, u8 *bwg_id, u8 *tsa, 119*dc0cb1cdSDale Ghent u8 *map) 120*dc0cb1cdSDale Ghent { 121*dc0cb1cdSDale Ghent u32 reg = 0; 122*dc0cb1cdSDale Ghent u32 credit_refill = 0; 123*dc0cb1cdSDale Ghent u32 credit_max = 0; 124*dc0cb1cdSDale Ghent u8 i = 0; 125*dc0cb1cdSDale Ghent 126*dc0cb1cdSDale Ghent /* 127*dc0cb1cdSDale Ghent * Disable the arbiter before changing parameters 128*dc0cb1cdSDale Ghent * (always enable recycle mode; WSP) 129*dc0cb1cdSDale Ghent */ 130*dc0cb1cdSDale Ghent reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; 131*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); 132*dc0cb1cdSDale Ghent 133*dc0cb1cdSDale Ghent /* 134*dc0cb1cdSDale Ghent * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding 135*dc0cb1cdSDale Ghent * bits sets for the UPs that needs to be mappped to that TC. 136*dc0cb1cdSDale Ghent * e.g if priorities 6 and 7 are to be mapped to a TC then the 137*dc0cb1cdSDale Ghent * up_to_tc_bitmap value for that TC will be 11000000 in binary. 138*dc0cb1cdSDale Ghent */ 139*dc0cb1cdSDale Ghent reg = 0; 140*dc0cb1cdSDale Ghent for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) 141*dc0cb1cdSDale Ghent reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); 142*dc0cb1cdSDale Ghent 143*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); 144*dc0cb1cdSDale Ghent 145*dc0cb1cdSDale Ghent /* Configure traffic class credits and priority */ 146*dc0cb1cdSDale Ghent for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 147*dc0cb1cdSDale Ghent credit_refill = refill[i]; 148*dc0cb1cdSDale Ghent credit_max = max[i]; 149*dc0cb1cdSDale Ghent reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); 150*dc0cb1cdSDale Ghent 151*dc0cb1cdSDale Ghent reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; 152*dc0cb1cdSDale Ghent 153*dc0cb1cdSDale Ghent if (tsa[i] == ixgbe_dcb_tsa_strict) 154*dc0cb1cdSDale Ghent reg |= IXGBE_RTRPT4C_LSP; 155*dc0cb1cdSDale Ghent 156*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); 157*dc0cb1cdSDale Ghent } 158*dc0cb1cdSDale Ghent 159*dc0cb1cdSDale Ghent /* 160*dc0cb1cdSDale Ghent * Configure Rx packet plane (recycle mode; WSP) and 161*dc0cb1cdSDale Ghent * enable arbiter 162*dc0cb1cdSDale Ghent */ 163*dc0cb1cdSDale Ghent reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; 164*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); 165*dc0cb1cdSDale Ghent 166*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 167*dc0cb1cdSDale Ghent } 168*dc0cb1cdSDale Ghent 169*dc0cb1cdSDale Ghent /** 170*dc0cb1cdSDale Ghent * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter 171*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 172*dc0cb1cdSDale Ghent * @dcb_config: pointer to ixgbe_dcb_config structure 173*dc0cb1cdSDale Ghent * 174*dc0cb1cdSDale Ghent * Configure Tx Descriptor Arbiter and credits for each traffic class. 175*dc0cb1cdSDale Ghent */ 176*dc0cb1cdSDale Ghent s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, 177*dc0cb1cdSDale Ghent u16 *max, u8 *bwg_id, u8 *tsa) 178*dc0cb1cdSDale Ghent { 179*dc0cb1cdSDale Ghent u32 reg, max_credits; 180*dc0cb1cdSDale Ghent u8 i; 181*dc0cb1cdSDale Ghent 182*dc0cb1cdSDale Ghent /* Clear the per-Tx queue credits; we use per-TC instead */ 183*dc0cb1cdSDale Ghent for (i = 0; i < 128; i++) { 184*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 185*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); 186*dc0cb1cdSDale Ghent } 187*dc0cb1cdSDale Ghent 188*dc0cb1cdSDale Ghent /* Configure traffic class credits and priority */ 189*dc0cb1cdSDale Ghent for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 190*dc0cb1cdSDale Ghent max_credits = max[i]; 191*dc0cb1cdSDale Ghent reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; 192*dc0cb1cdSDale Ghent reg |= refill[i]; 193*dc0cb1cdSDale Ghent reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; 194*dc0cb1cdSDale Ghent 195*dc0cb1cdSDale Ghent if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) 196*dc0cb1cdSDale Ghent reg |= IXGBE_RTTDT2C_GSP; 197*dc0cb1cdSDale Ghent 198*dc0cb1cdSDale Ghent if (tsa[i] == ixgbe_dcb_tsa_strict) 199*dc0cb1cdSDale Ghent reg |= IXGBE_RTTDT2C_LSP; 200*dc0cb1cdSDale Ghent 201*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); 202*dc0cb1cdSDale Ghent } 203*dc0cb1cdSDale Ghent 204*dc0cb1cdSDale Ghent /* 205*dc0cb1cdSDale Ghent * Configure Tx descriptor plane (recycle mode; WSP) and 206*dc0cb1cdSDale Ghent * enable arbiter 207*dc0cb1cdSDale Ghent */ 208*dc0cb1cdSDale Ghent reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; 209*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 210*dc0cb1cdSDale Ghent 211*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 212*dc0cb1cdSDale Ghent } 213*dc0cb1cdSDale Ghent 214*dc0cb1cdSDale Ghent /** 215*dc0cb1cdSDale Ghent * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter 216*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 217*dc0cb1cdSDale Ghent * @dcb_config: pointer to ixgbe_dcb_config structure 218*dc0cb1cdSDale Ghent * 219*dc0cb1cdSDale Ghent * Configure Tx Packet Arbiter and credits for each traffic class. 220*dc0cb1cdSDale Ghent */ 221*dc0cb1cdSDale Ghent s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, 222*dc0cb1cdSDale Ghent u16 *max, u8 *bwg_id, u8 *tsa, 223*dc0cb1cdSDale Ghent u8 *map) 224*dc0cb1cdSDale Ghent { 225*dc0cb1cdSDale Ghent u32 reg; 226*dc0cb1cdSDale Ghent u8 i; 227*dc0cb1cdSDale Ghent 228*dc0cb1cdSDale Ghent /* 229*dc0cb1cdSDale Ghent * Disable the arbiter before changing parameters 230*dc0cb1cdSDale Ghent * (always enable recycle mode; SP; arb delay) 231*dc0cb1cdSDale Ghent */ 232*dc0cb1cdSDale Ghent reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | 233*dc0cb1cdSDale Ghent (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | 234*dc0cb1cdSDale Ghent IXGBE_RTTPCS_ARBDIS; 235*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); 236*dc0cb1cdSDale Ghent 237*dc0cb1cdSDale Ghent /* 238*dc0cb1cdSDale Ghent * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding 239*dc0cb1cdSDale Ghent * bits sets for the UPs that needs to be mappped to that TC. 240*dc0cb1cdSDale Ghent * e.g if priorities 6 and 7 are to be mapped to a TC then the 241*dc0cb1cdSDale Ghent * up_to_tc_bitmap value for that TC will be 11000000 in binary. 242*dc0cb1cdSDale Ghent */ 243*dc0cb1cdSDale Ghent reg = 0; 244*dc0cb1cdSDale Ghent for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) 245*dc0cb1cdSDale Ghent reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); 246*dc0cb1cdSDale Ghent 247*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); 248*dc0cb1cdSDale Ghent 249*dc0cb1cdSDale Ghent /* Configure traffic class credits and priority */ 250*dc0cb1cdSDale Ghent for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 251*dc0cb1cdSDale Ghent reg = refill[i]; 252*dc0cb1cdSDale Ghent reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; 253*dc0cb1cdSDale Ghent reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; 254*dc0cb1cdSDale Ghent 255*dc0cb1cdSDale Ghent if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) 256*dc0cb1cdSDale Ghent reg |= IXGBE_RTTPT2C_GSP; 257*dc0cb1cdSDale Ghent 258*dc0cb1cdSDale Ghent if (tsa[i] == ixgbe_dcb_tsa_strict) 259*dc0cb1cdSDale Ghent reg |= IXGBE_RTTPT2C_LSP; 260*dc0cb1cdSDale Ghent 261*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); 262*dc0cb1cdSDale Ghent } 263*dc0cb1cdSDale Ghent 264*dc0cb1cdSDale Ghent /* 265*dc0cb1cdSDale Ghent * Configure Tx packet plane (recycle mode; SP; arb delay) and 266*dc0cb1cdSDale Ghent * enable arbiter 267*dc0cb1cdSDale Ghent */ 268*dc0cb1cdSDale Ghent reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | 269*dc0cb1cdSDale Ghent (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); 270*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); 271*dc0cb1cdSDale Ghent 272*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 273*dc0cb1cdSDale Ghent } 274*dc0cb1cdSDale Ghent 275*dc0cb1cdSDale Ghent /** 276*dc0cb1cdSDale Ghent * ixgbe_dcb_config_pfc_82599 - Configure priority flow control 277*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 278*dc0cb1cdSDale Ghent * @pfc_en: enabled pfc bitmask 279*dc0cb1cdSDale Ghent * @map: priority to tc assignments indexed by priority 280*dc0cb1cdSDale Ghent * 281*dc0cb1cdSDale Ghent * Configure Priority Flow Control (PFC) for each traffic class. 282*dc0cb1cdSDale Ghent */ 283*dc0cb1cdSDale Ghent s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) 284*dc0cb1cdSDale Ghent { 285*dc0cb1cdSDale Ghent u32 i, j, fcrtl, reg; 286*dc0cb1cdSDale Ghent u8 max_tc = 0; 287*dc0cb1cdSDale Ghent 288*dc0cb1cdSDale Ghent /* Enable Transmit Priority Flow Control */ 289*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); 290*dc0cb1cdSDale Ghent 291*dc0cb1cdSDale Ghent /* Enable Receive Priority Flow Control */ 292*dc0cb1cdSDale Ghent reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 293*dc0cb1cdSDale Ghent reg |= IXGBE_MFLCN_DPF; 294*dc0cb1cdSDale Ghent 295*dc0cb1cdSDale Ghent /* 296*dc0cb1cdSDale Ghent * X540 supports per TC Rx priority flow control. So 297*dc0cb1cdSDale Ghent * clear all TCs and only enable those that should be 298*dc0cb1cdSDale Ghent * enabled. 299*dc0cb1cdSDale Ghent */ 300*dc0cb1cdSDale Ghent reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); 301*dc0cb1cdSDale Ghent 302*dc0cb1cdSDale Ghent if (hw->mac.type >= ixgbe_mac_X540) 303*dc0cb1cdSDale Ghent reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; 304*dc0cb1cdSDale Ghent 305*dc0cb1cdSDale Ghent if (pfc_en) 306*dc0cb1cdSDale Ghent reg |= IXGBE_MFLCN_RPFCE; 307*dc0cb1cdSDale Ghent 308*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); 309*dc0cb1cdSDale Ghent 310*dc0cb1cdSDale Ghent for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { 311*dc0cb1cdSDale Ghent if (map[i] > max_tc) 312*dc0cb1cdSDale Ghent max_tc = map[i]; 313*dc0cb1cdSDale Ghent } 314*dc0cb1cdSDale Ghent 315*dc0cb1cdSDale Ghent 316*dc0cb1cdSDale Ghent /* Configure PFC Tx thresholds per TC */ 317*dc0cb1cdSDale Ghent for (i = 0; i <= max_tc; i++) { 318*dc0cb1cdSDale Ghent int enabled = 0; 319*dc0cb1cdSDale Ghent 320*dc0cb1cdSDale Ghent for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { 321*dc0cb1cdSDale Ghent if ((map[j] == i) && (pfc_en & (1 << j))) { 322*dc0cb1cdSDale Ghent enabled = 1; 323*dc0cb1cdSDale Ghent break; 324*dc0cb1cdSDale Ghent } 325*dc0cb1cdSDale Ghent } 326*dc0cb1cdSDale Ghent 327*dc0cb1cdSDale Ghent if (enabled) { 328*dc0cb1cdSDale Ghent reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; 329*dc0cb1cdSDale Ghent fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; 330*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); 331*dc0cb1cdSDale Ghent } else { 332*dc0cb1cdSDale Ghent /* 333*dc0cb1cdSDale Ghent * In order to prevent Tx hangs when the internal Tx 334*dc0cb1cdSDale Ghent * switch is enabled we must set the high water mark 335*dc0cb1cdSDale Ghent * to the Rx packet buffer size - 24KB. This allows 336*dc0cb1cdSDale Ghent * the Tx switch to function even under heavy Rx 337*dc0cb1cdSDale Ghent * workloads. 338*dc0cb1cdSDale Ghent */ 339*dc0cb1cdSDale Ghent reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; 340*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 341*dc0cb1cdSDale Ghent } 342*dc0cb1cdSDale Ghent 343*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); 344*dc0cb1cdSDale Ghent } 345*dc0cb1cdSDale Ghent 346*dc0cb1cdSDale Ghent for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 347*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); 348*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); 349*dc0cb1cdSDale Ghent } 350*dc0cb1cdSDale Ghent 351*dc0cb1cdSDale Ghent /* Configure pause time (2 TCs per register) */ 352*dc0cb1cdSDale Ghent reg = hw->fc.pause_time | (hw->fc.pause_time << 16); 353*dc0cb1cdSDale Ghent for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 354*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 355*dc0cb1cdSDale Ghent 356*dc0cb1cdSDale Ghent /* Configure flow control refresh threshold value */ 357*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 358*dc0cb1cdSDale Ghent 359*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 360*dc0cb1cdSDale Ghent } 361*dc0cb1cdSDale Ghent 362*dc0cb1cdSDale Ghent /** 363*dc0cb1cdSDale Ghent * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics 364*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 365*dc0cb1cdSDale Ghent * 366*dc0cb1cdSDale Ghent * Configure queue statistics registers, all queues belonging to same traffic 367*dc0cb1cdSDale Ghent * class uses a single set of queue statistics counters. 368*dc0cb1cdSDale Ghent */ 369*dc0cb1cdSDale Ghent s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, 370*dc0cb1cdSDale Ghent struct ixgbe_dcb_config *dcb_config) 371*dc0cb1cdSDale Ghent { 372*dc0cb1cdSDale Ghent u32 reg = 0; 373*dc0cb1cdSDale Ghent u8 i = 0; 374*dc0cb1cdSDale Ghent u8 tc_count = 8; 375*dc0cb1cdSDale Ghent bool vt_mode = FALSE; 376*dc0cb1cdSDale Ghent 377*dc0cb1cdSDale Ghent if (dcb_config != NULL) { 378*dc0cb1cdSDale Ghent tc_count = dcb_config->num_tcs.pg_tcs; 379*dc0cb1cdSDale Ghent vt_mode = dcb_config->vt_mode; 380*dc0cb1cdSDale Ghent } 381*dc0cb1cdSDale Ghent 382*dc0cb1cdSDale Ghent if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4)) 383*dc0cb1cdSDale Ghent return IXGBE_ERR_PARAM; 384*dc0cb1cdSDale Ghent 385*dc0cb1cdSDale Ghent if (tc_count == 8 && vt_mode == FALSE) { 386*dc0cb1cdSDale Ghent /* 387*dc0cb1cdSDale Ghent * Receive Queues stats setting 388*dc0cb1cdSDale Ghent * 32 RQSMR registers, each configuring 4 queues. 389*dc0cb1cdSDale Ghent * 390*dc0cb1cdSDale Ghent * Set all 16 queues of each TC to the same stat 391*dc0cb1cdSDale Ghent * with TC 'n' going to stat 'n'. 392*dc0cb1cdSDale Ghent */ 393*dc0cb1cdSDale Ghent for (i = 0; i < 32; i++) { 394*dc0cb1cdSDale Ghent reg = 0x01010101 * (i / 4); 395*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); 396*dc0cb1cdSDale Ghent } 397*dc0cb1cdSDale Ghent /* 398*dc0cb1cdSDale Ghent * Transmit Queues stats setting 399*dc0cb1cdSDale Ghent * 32 TQSM registers, each controlling 4 queues. 400*dc0cb1cdSDale Ghent * 401*dc0cb1cdSDale Ghent * Set all queues of each TC to the same stat 402*dc0cb1cdSDale Ghent * with TC 'n' going to stat 'n'. 403*dc0cb1cdSDale Ghent * Tx queues are allocated non-uniformly to TCs: 404*dc0cb1cdSDale Ghent * 32, 32, 16, 16, 8, 8, 8, 8. 405*dc0cb1cdSDale Ghent */ 406*dc0cb1cdSDale Ghent for (i = 0; i < 32; i++) { 407*dc0cb1cdSDale Ghent if (i < 8) 408*dc0cb1cdSDale Ghent reg = 0x00000000; 409*dc0cb1cdSDale Ghent else if (i < 16) 410*dc0cb1cdSDale Ghent reg = 0x01010101; 411*dc0cb1cdSDale Ghent else if (i < 20) 412*dc0cb1cdSDale Ghent reg = 0x02020202; 413*dc0cb1cdSDale Ghent else if (i < 24) 414*dc0cb1cdSDale Ghent reg = 0x03030303; 415*dc0cb1cdSDale Ghent else if (i < 26) 416*dc0cb1cdSDale Ghent reg = 0x04040404; 417*dc0cb1cdSDale Ghent else if (i < 28) 418*dc0cb1cdSDale Ghent reg = 0x05050505; 419*dc0cb1cdSDale Ghent else if (i < 30) 420*dc0cb1cdSDale Ghent reg = 0x06060606; 421*dc0cb1cdSDale Ghent else 422*dc0cb1cdSDale Ghent reg = 0x07070707; 423*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); 424*dc0cb1cdSDale Ghent } 425*dc0cb1cdSDale Ghent } else if (tc_count == 4 && vt_mode == FALSE) { 426*dc0cb1cdSDale Ghent /* 427*dc0cb1cdSDale Ghent * Receive Queues stats setting 428*dc0cb1cdSDale Ghent * 32 RQSMR registers, each configuring 4 queues. 429*dc0cb1cdSDale Ghent * 430*dc0cb1cdSDale Ghent * Set all 16 queues of each TC to the same stat 431*dc0cb1cdSDale Ghent * with TC 'n' going to stat 'n'. 432*dc0cb1cdSDale Ghent */ 433*dc0cb1cdSDale Ghent for (i = 0; i < 32; i++) { 434*dc0cb1cdSDale Ghent if (i % 8 > 3) 435*dc0cb1cdSDale Ghent /* In 4 TC mode, odd 16-queue ranges are 436*dc0cb1cdSDale Ghent * not used. 437*dc0cb1cdSDale Ghent */ 438*dc0cb1cdSDale Ghent continue; 439*dc0cb1cdSDale Ghent reg = 0x01010101 * (i / 8); 440*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); 441*dc0cb1cdSDale Ghent } 442*dc0cb1cdSDale Ghent /* 443*dc0cb1cdSDale Ghent * Transmit Queues stats setting 444*dc0cb1cdSDale Ghent * 32 TQSM registers, each controlling 4 queues. 445*dc0cb1cdSDale Ghent * 446*dc0cb1cdSDale Ghent * Set all queues of each TC to the same stat 447*dc0cb1cdSDale Ghent * with TC 'n' going to stat 'n'. 448*dc0cb1cdSDale Ghent * Tx queues are allocated non-uniformly to TCs: 449*dc0cb1cdSDale Ghent * 64, 32, 16, 16. 450*dc0cb1cdSDale Ghent */ 451*dc0cb1cdSDale Ghent for (i = 0; i < 32; i++) { 452*dc0cb1cdSDale Ghent if (i < 16) 453*dc0cb1cdSDale Ghent reg = 0x00000000; 454*dc0cb1cdSDale Ghent else if (i < 24) 455*dc0cb1cdSDale Ghent reg = 0x01010101; 456*dc0cb1cdSDale Ghent else if (i < 28) 457*dc0cb1cdSDale Ghent reg = 0x02020202; 458*dc0cb1cdSDale Ghent else 459*dc0cb1cdSDale Ghent reg = 0x03030303; 460*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); 461*dc0cb1cdSDale Ghent } 462*dc0cb1cdSDale Ghent } else if (tc_count == 4 && vt_mode == TRUE) { 463*dc0cb1cdSDale Ghent /* 464*dc0cb1cdSDale Ghent * Receive Queues stats setting 465*dc0cb1cdSDale Ghent * 32 RQSMR registers, each configuring 4 queues. 466*dc0cb1cdSDale Ghent * 467*dc0cb1cdSDale Ghent * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each 468*dc0cb1cdSDale Ghent * pool. Set all 32 queues of each TC across pools to the same 469*dc0cb1cdSDale Ghent * stat with TC 'n' going to stat 'n'. 470*dc0cb1cdSDale Ghent */ 471*dc0cb1cdSDale Ghent for (i = 0; i < 32; i++) 472*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); 473*dc0cb1cdSDale Ghent /* 474*dc0cb1cdSDale Ghent * Transmit Queues stats setting 475*dc0cb1cdSDale Ghent * 32 TQSM registers, each controlling 4 queues. 476*dc0cb1cdSDale Ghent * 477*dc0cb1cdSDale Ghent * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each 478*dc0cb1cdSDale Ghent * pool. Set all 32 queues of each TC across pools to the same 479*dc0cb1cdSDale Ghent * stat with TC 'n' going to stat 'n'. 480*dc0cb1cdSDale Ghent */ 481*dc0cb1cdSDale Ghent for (i = 0; i < 32; i++) 482*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); 483*dc0cb1cdSDale Ghent } 484*dc0cb1cdSDale Ghent 485*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 486*dc0cb1cdSDale Ghent } 487*dc0cb1cdSDale Ghent 488*dc0cb1cdSDale Ghent /** 489*dc0cb1cdSDale Ghent * ixgbe_dcb_config_82599 - Configure general DCB parameters 490*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 491*dc0cb1cdSDale Ghent * @dcb_config: pointer to ixgbe_dcb_config structure 492*dc0cb1cdSDale Ghent * 493*dc0cb1cdSDale Ghent * Configure general DCB parameters. 494*dc0cb1cdSDale Ghent */ 495*dc0cb1cdSDale Ghent s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, 496*dc0cb1cdSDale Ghent struct ixgbe_dcb_config *dcb_config) 497*dc0cb1cdSDale Ghent { 498*dc0cb1cdSDale Ghent u32 reg; 499*dc0cb1cdSDale Ghent u32 q; 500*dc0cb1cdSDale Ghent 501*dc0cb1cdSDale Ghent /* Disable the Tx desc arbiter so that MTQC can be changed */ 502*dc0cb1cdSDale Ghent reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 503*dc0cb1cdSDale Ghent reg |= IXGBE_RTTDCS_ARBDIS; 504*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 505*dc0cb1cdSDale Ghent 506*dc0cb1cdSDale Ghent reg = IXGBE_READ_REG(hw, IXGBE_MRQC); 507*dc0cb1cdSDale Ghent if (dcb_config->num_tcs.pg_tcs == 8) { 508*dc0cb1cdSDale Ghent /* Enable DCB for Rx with 8 TCs */ 509*dc0cb1cdSDale Ghent switch (reg & IXGBE_MRQC_MRQE_MASK) { 510*dc0cb1cdSDale Ghent case 0: 511*dc0cb1cdSDale Ghent case IXGBE_MRQC_RT4TCEN: 512*dc0cb1cdSDale Ghent /* RSS disabled cases */ 513*dc0cb1cdSDale Ghent reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 514*dc0cb1cdSDale Ghent IXGBE_MRQC_RT8TCEN; 515*dc0cb1cdSDale Ghent break; 516*dc0cb1cdSDale Ghent case IXGBE_MRQC_RSSEN: 517*dc0cb1cdSDale Ghent case IXGBE_MRQC_RTRSS4TCEN: 518*dc0cb1cdSDale Ghent /* RSS enabled cases */ 519*dc0cb1cdSDale Ghent reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 520*dc0cb1cdSDale Ghent IXGBE_MRQC_RTRSS8TCEN; 521*dc0cb1cdSDale Ghent break; 522*dc0cb1cdSDale Ghent default: 523*dc0cb1cdSDale Ghent /* 524*dc0cb1cdSDale Ghent * Unsupported value, assume stale data, 525*dc0cb1cdSDale Ghent * overwrite no RSS 526*dc0cb1cdSDale Ghent */ 527*dc0cb1cdSDale Ghent ASSERT(0); 528*dc0cb1cdSDale Ghent reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 529*dc0cb1cdSDale Ghent IXGBE_MRQC_RT8TCEN; 530*dc0cb1cdSDale Ghent } 531*dc0cb1cdSDale Ghent } 532*dc0cb1cdSDale Ghent if (dcb_config->num_tcs.pg_tcs == 4) { 533*dc0cb1cdSDale Ghent /* We support both VT-on and VT-off with 4 TCs. */ 534*dc0cb1cdSDale Ghent if (dcb_config->vt_mode) 535*dc0cb1cdSDale Ghent reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 536*dc0cb1cdSDale Ghent IXGBE_MRQC_VMDQRT4TCEN; 537*dc0cb1cdSDale Ghent else 538*dc0cb1cdSDale Ghent reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | 539*dc0cb1cdSDale Ghent IXGBE_MRQC_RTRSS4TCEN; 540*dc0cb1cdSDale Ghent } 541*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); 542*dc0cb1cdSDale Ghent 543*dc0cb1cdSDale Ghent /* Enable DCB for Tx with 8 TCs */ 544*dc0cb1cdSDale Ghent if (dcb_config->num_tcs.pg_tcs == 8) 545*dc0cb1cdSDale Ghent reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; 546*dc0cb1cdSDale Ghent else { 547*dc0cb1cdSDale Ghent /* We support both VT-on and VT-off with 4 TCs. */ 548*dc0cb1cdSDale Ghent reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; 549*dc0cb1cdSDale Ghent if (dcb_config->vt_mode) 550*dc0cb1cdSDale Ghent reg |= IXGBE_MTQC_VT_ENA; 551*dc0cb1cdSDale Ghent } 552*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); 553*dc0cb1cdSDale Ghent 554*dc0cb1cdSDale Ghent /* Disable drop for all queues */ 555*dc0cb1cdSDale Ghent for (q = 0; q < 128; q++) 556*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_QDE, 557*dc0cb1cdSDale Ghent (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); 558*dc0cb1cdSDale Ghent 559*dc0cb1cdSDale Ghent /* Enable the Tx desc arbiter */ 560*dc0cb1cdSDale Ghent reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 561*dc0cb1cdSDale Ghent reg &= ~IXGBE_RTTDCS_ARBDIS; 562*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 563*dc0cb1cdSDale Ghent 564*dc0cb1cdSDale Ghent /* Enable Security TX Buffer IFG for DCB */ 565*dc0cb1cdSDale Ghent reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 566*dc0cb1cdSDale Ghent reg |= IXGBE_SECTX_DCB; 567*dc0cb1cdSDale Ghent IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); 568*dc0cb1cdSDale Ghent 569*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 570*dc0cb1cdSDale Ghent } 571*dc0cb1cdSDale Ghent 572*dc0cb1cdSDale Ghent /** 573*dc0cb1cdSDale Ghent * ixgbe_dcb_hw_config_82599 - Configure and enable DCB 574*dc0cb1cdSDale Ghent * @hw: pointer to hardware structure 575*dc0cb1cdSDale Ghent * @dcb_config: pointer to ixgbe_dcb_config structure 576*dc0cb1cdSDale Ghent * 577*dc0cb1cdSDale Ghent * Configure dcb settings and enable dcb mode. 578*dc0cb1cdSDale Ghent */ 579*dc0cb1cdSDale Ghent s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, 580*dc0cb1cdSDale Ghent u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, 581*dc0cb1cdSDale Ghent u8 *map) 582*dc0cb1cdSDale Ghent { 583*dc0cb1cdSDale Ghent UNREFERENCED_1PARAMETER(link_speed); 584*dc0cb1cdSDale Ghent 585*dc0cb1cdSDale Ghent ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, 586*dc0cb1cdSDale Ghent map); 587*dc0cb1cdSDale Ghent ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, 588*dc0cb1cdSDale Ghent tsa); 589*dc0cb1cdSDale Ghent ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, 590*dc0cb1cdSDale Ghent tsa, map); 591*dc0cb1cdSDale Ghent 592*dc0cb1cdSDale Ghent return IXGBE_SUCCESS; 593*dc0cb1cdSDale Ghent } 594*dc0cb1cdSDale Ghent 595