1*14b24e2bSVaishali Kulkarni /* 2*14b24e2bSVaishali Kulkarni * CDDL HEADER START 3*14b24e2bSVaishali Kulkarni * 4*14b24e2bSVaishali Kulkarni * The contents of this file are subject to the terms of the 5*14b24e2bSVaishali Kulkarni * Common Development and Distribution License, v.1, (the "License"). 6*14b24e2bSVaishali Kulkarni * You may not use this file except in compliance with the License. 7*14b24e2bSVaishali Kulkarni * 8*14b24e2bSVaishali Kulkarni * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*14b24e2bSVaishali Kulkarni * or http://opensource.org/licenses/CDDL-1.0. 10*14b24e2bSVaishali Kulkarni * See the License for the specific language governing permissions 11*14b24e2bSVaishali Kulkarni * and limitations under the License. 12*14b24e2bSVaishali Kulkarni * 13*14b24e2bSVaishali Kulkarni * When distributing Covered Code, include this CDDL HEADER in each 14*14b24e2bSVaishali Kulkarni * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*14b24e2bSVaishali Kulkarni * If applicable, add the following below this CDDL HEADER, with the 16*14b24e2bSVaishali Kulkarni * fields enclosed by brackets "[]" replaced with your own identifying 17*14b24e2bSVaishali Kulkarni * information: Portions Copyright [yyyy] [name of copyright owner] 18*14b24e2bSVaishali Kulkarni * 19*14b24e2bSVaishali Kulkarni * CDDL HEADER END 20*14b24e2bSVaishali Kulkarni */ 21*14b24e2bSVaishali Kulkarni 22*14b24e2bSVaishali Kulkarni /* 23*14b24e2bSVaishali Kulkarni * Copyright 2014-2017 Cavium, Inc. 24*14b24e2bSVaishali Kulkarni * The contents of this file are subject to the terms of the Common Development 25*14b24e2bSVaishali Kulkarni * and Distribution License, v.1, (the "License"). 26*14b24e2bSVaishali Kulkarni 27*14b24e2bSVaishali Kulkarni * You may not use this file except in compliance with the License. 28*14b24e2bSVaishali Kulkarni 29*14b24e2bSVaishali Kulkarni * You can obtain a copy of the License at available 30*14b24e2bSVaishali Kulkarni * at http://opensource.org/licenses/CDDL-1.0 31*14b24e2bSVaishali Kulkarni 32*14b24e2bSVaishali Kulkarni * See the License for the specific language governing permissions and 33*14b24e2bSVaishali Kulkarni * limitations under the License. 34*14b24e2bSVaishali Kulkarni */ 35*14b24e2bSVaishali Kulkarni 36*14b24e2bSVaishali Kulkarni #include "bcm_osal.h" 37*14b24e2bSVaishali Kulkarni 38*14b24e2bSVaishali Kulkarni #include "ecore.h" 39*14b24e2bSVaishali Kulkarni #include "ecore_status.h" 40*14b24e2bSVaishali Kulkarni #include "ecore_hsi_eth.h" 41*14b24e2bSVaishali Kulkarni #include "ecore_chain.h" 42*14b24e2bSVaishali Kulkarni #include "ecore_spq.h" 43*14b24e2bSVaishali Kulkarni #include "ecore_init_fw_funcs.h" 44*14b24e2bSVaishali Kulkarni #include "ecore_cxt.h" 45*14b24e2bSVaishali Kulkarni #include "ecore_l2.h" 46*14b24e2bSVaishali Kulkarni #include "ecore_sp_commands.h" 47*14b24e2bSVaishali Kulkarni #include "ecore_gtt_reg_addr.h" 48*14b24e2bSVaishali Kulkarni #include "ecore_iro.h" 49*14b24e2bSVaishali Kulkarni #include "reg_addr.h" 50*14b24e2bSVaishali Kulkarni #include "ecore_int.h" 51*14b24e2bSVaishali Kulkarni #include "ecore_hw.h" 52*14b24e2bSVaishali Kulkarni #include "ecore_vf.h" 53*14b24e2bSVaishali Kulkarni #include "ecore_sriov.h" 54*14b24e2bSVaishali Kulkarni #include "ecore_mcp.h" 55*14b24e2bSVaishali Kulkarni 56*14b24e2bSVaishali Kulkarni #define ECORE_MAX_SGES_NUM 16 57*14b24e2bSVaishali Kulkarni #define CRC32_POLY 0x1edc6f41 58*14b24e2bSVaishali Kulkarni 59*14b24e2bSVaishali Kulkarni struct ecore_l2_info { 60*14b24e2bSVaishali Kulkarni u32 queues; 61*14b24e2bSVaishali Kulkarni unsigned long **pp_qid_usage; 62*14b24e2bSVaishali Kulkarni 63*14b24e2bSVaishali Kulkarni /* The lock is meant to synchronize access to the qid usage */ 64*14b24e2bSVaishali Kulkarni osal_mutex_t lock; 65*14b24e2bSVaishali Kulkarni }; 66*14b24e2bSVaishali Kulkarni 67*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) 68*14b24e2bSVaishali Kulkarni { 69*14b24e2bSVaishali Kulkarni struct ecore_l2_info *p_l2_info; 70*14b24e2bSVaishali Kulkarni unsigned long **pp_qids; 71*14b24e2bSVaishali Kulkarni u32 i; 72*14b24e2bSVaishali Kulkarni 73*14b24e2bSVaishali Kulkarni if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 74*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 75*14b24e2bSVaishali Kulkarni 76*14b24e2bSVaishali Kulkarni p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info)); 77*14b24e2bSVaishali Kulkarni if (!p_l2_info) 78*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 79*14b24e2bSVaishali Kulkarni p_hwfn->p_l2_info = p_l2_info; 80*14b24e2bSVaishali Kulkarni 81*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) { 82*14b24e2bSVaishali Kulkarni p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 83*14b24e2bSVaishali Kulkarni } else { 84*14b24e2bSVaishali Kulkarni u8 rx = 0, tx = 0; 85*14b24e2bSVaishali Kulkarni 86*14b24e2bSVaishali Kulkarni ecore_vf_get_num_rxqs(p_hwfn, &rx); 87*14b24e2bSVaishali Kulkarni ecore_vf_get_num_txqs(p_hwfn, &tx); 88*14b24e2bSVaishali Kulkarni 89*14b24e2bSVaishali Kulkarni p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx); 90*14b24e2bSVaishali Kulkarni } 91*14b24e2bSVaishali Kulkarni 92*14b24e2bSVaishali Kulkarni pp_qids = OSAL_VZALLOC(p_hwfn->p_dev, 93*14b24e2bSVaishali Kulkarni sizeof(unsigned long *) * 94*14b24e2bSVaishali Kulkarni p_l2_info->queues); 95*14b24e2bSVaishali Kulkarni if (pp_qids == OSAL_NULL) 96*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 97*14b24e2bSVaishali Kulkarni p_l2_info->pp_qid_usage = pp_qids; 98*14b24e2bSVaishali Kulkarni 99*14b24e2bSVaishali Kulkarni for (i = 0; i < p_l2_info->queues; i++) { 100*14b24e2bSVaishali Kulkarni pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev, 101*14b24e2bSVaishali Kulkarni MAX_QUEUES_PER_QZONE / 8); 102*14b24e2bSVaishali Kulkarni if (pp_qids[i] == OSAL_NULL) 103*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 104*14b24e2bSVaishali Kulkarni } 105*14b24e2bSVaishali Kulkarni 106*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_LOCK_ALLOC 107*14b24e2bSVaishali Kulkarni OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock); 108*14b24e2bSVaishali Kulkarni #endif 109*14b24e2bSVaishali Kulkarni 110*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 111*14b24e2bSVaishali Kulkarni } 112*14b24e2bSVaishali Kulkarni 113*14b24e2bSVaishali Kulkarni void ecore_l2_setup(struct ecore_hwfn *p_hwfn) 114*14b24e2bSVaishali Kulkarni { 115*14b24e2bSVaishali Kulkarni if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 116*14b24e2bSVaishali Kulkarni return; 117*14b24e2bSVaishali Kulkarni 118*14b24e2bSVaishali Kulkarni OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock); 119*14b24e2bSVaishali Kulkarni } 120*14b24e2bSVaishali Kulkarni 121*14b24e2bSVaishali Kulkarni void ecore_l2_free(struct ecore_hwfn *p_hwfn) 122*14b24e2bSVaishali Kulkarni { 123*14b24e2bSVaishali Kulkarni u32 i; 124*14b24e2bSVaishali Kulkarni 125*14b24e2bSVaishali Kulkarni if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 126*14b24e2bSVaishali Kulkarni return; 127*14b24e2bSVaishali Kulkarni 128*14b24e2bSVaishali Kulkarni if (p_hwfn->p_l2_info == OSAL_NULL) 129*14b24e2bSVaishali Kulkarni return; 130*14b24e2bSVaishali Kulkarni 131*14b24e2bSVaishali Kulkarni if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL) 132*14b24e2bSVaishali Kulkarni goto out_l2_info; 133*14b24e2bSVaishali Kulkarni 134*14b24e2bSVaishali Kulkarni /* Free until hit first uninitialized entry */ 135*14b24e2bSVaishali Kulkarni for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 136*14b24e2bSVaishali Kulkarni if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL) 137*14b24e2bSVaishali Kulkarni break; 138*14b24e2bSVaishali Kulkarni OSAL_VFREE(p_hwfn->p_dev, 139*14b24e2bSVaishali Kulkarni p_hwfn->p_l2_info->pp_qid_usage[i]); 140*14b24e2bSVaishali Kulkarni } 141*14b24e2bSVaishali Kulkarni 142*14b24e2bSVaishali Kulkarni #ifdef CONFIG_ECORE_LOCK_ALLOC 143*14b24e2bSVaishali Kulkarni /* Lock is last to initialize, if everything else was */ 144*14b24e2bSVaishali Kulkarni if (i == p_hwfn->p_l2_info->queues) 145*14b24e2bSVaishali Kulkarni OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock); 146*14b24e2bSVaishali Kulkarni #endif 147*14b24e2bSVaishali Kulkarni 148*14b24e2bSVaishali Kulkarni OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); 149*14b24e2bSVaishali Kulkarni 150*14b24e2bSVaishali Kulkarni out_l2_info: 151*14b24e2bSVaishali Kulkarni OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); 152*14b24e2bSVaishali Kulkarni p_hwfn->p_l2_info = OSAL_NULL; 153*14b24e2bSVaishali Kulkarni } 154*14b24e2bSVaishali Kulkarni 155*14b24e2bSVaishali Kulkarni /* TODO - we'll need locking around these... */ 156*14b24e2bSVaishali Kulkarni static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn, 157*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid) 158*14b24e2bSVaishali Kulkarni { 159*14b24e2bSVaishali Kulkarni struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info; 160*14b24e2bSVaishali Kulkarni u16 queue_id = p_cid->rel.queue_id; 161*14b24e2bSVaishali Kulkarni bool b_rc = true; 162*14b24e2bSVaishali Kulkarni u8 first; 163*14b24e2bSVaishali Kulkarni 164*14b24e2bSVaishali Kulkarni OSAL_MUTEX_ACQUIRE(&p_l2_info->lock); 165*14b24e2bSVaishali Kulkarni 166*14b24e2bSVaishali Kulkarni if (queue_id > p_l2_info->queues) { 167*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 168*14b24e2bSVaishali Kulkarni "Requested to increase usage for qzone %04x out of %08x\n", 169*14b24e2bSVaishali Kulkarni queue_id, p_l2_info->queues); 170*14b24e2bSVaishali Kulkarni b_rc = false; 171*14b24e2bSVaishali Kulkarni goto out; 172*14b24e2bSVaishali Kulkarni } 173*14b24e2bSVaishali Kulkarni 174*14b24e2bSVaishali Kulkarni first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id], 175*14b24e2bSVaishali Kulkarni MAX_QUEUES_PER_QZONE); 176*14b24e2bSVaishali Kulkarni if (first >= MAX_QUEUES_PER_QZONE) { 177*14b24e2bSVaishali Kulkarni b_rc = false; 178*14b24e2bSVaishali Kulkarni goto out; 179*14b24e2bSVaishali Kulkarni } 180*14b24e2bSVaishali Kulkarni 181*14b24e2bSVaishali Kulkarni OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]); 182*14b24e2bSVaishali Kulkarni p_cid->qid_usage_idx = first; 183*14b24e2bSVaishali Kulkarni 184*14b24e2bSVaishali Kulkarni out: 185*14b24e2bSVaishali Kulkarni OSAL_MUTEX_RELEASE(&p_l2_info->lock); 186*14b24e2bSVaishali Kulkarni return b_rc; 187*14b24e2bSVaishali Kulkarni } 188*14b24e2bSVaishali Kulkarni 189*14b24e2bSVaishali Kulkarni static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn, 190*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid) 191*14b24e2bSVaishali Kulkarni { 192*14b24e2bSVaishali Kulkarni OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock); 193*14b24e2bSVaishali Kulkarni 194*14b24e2bSVaishali Kulkarni OSAL_CLEAR_BIT(p_cid->qid_usage_idx, 195*14b24e2bSVaishali Kulkarni p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 196*14b24e2bSVaishali Kulkarni 197*14b24e2bSVaishali Kulkarni OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock); 198*14b24e2bSVaishali Kulkarni } 199*14b24e2bSVaishali Kulkarni 200*14b24e2bSVaishali Kulkarni void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn, 201*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid) 202*14b24e2bSVaishali Kulkarni { 203*14b24e2bSVaishali Kulkarni bool b_legacy_vf = !!(p_cid->vf_legacy & 204*14b24e2bSVaishali Kulkarni ECORE_QCID_LEGACY_VF_CID); 205*14b24e2bSVaishali Kulkarni 206*14b24e2bSVaishali Kulkarni /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF. 207*14b24e2bSVaishali Kulkarni * For legacy vf-queues, the CID doesn't go through here. 208*14b24e2bSVaishali Kulkarni */ 209*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 210*14b24e2bSVaishali Kulkarni _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); 211*14b24e2bSVaishali Kulkarni 212*14b24e2bSVaishali Kulkarni /* VFs maintain the index inside queue-zone on their own */ 213*14b24e2bSVaishali Kulkarni if (p_cid->vfid == ECORE_QUEUE_CID_PF) 214*14b24e2bSVaishali Kulkarni ecore_eth_queue_qid_usage_del(p_hwfn, p_cid); 215*14b24e2bSVaishali Kulkarni 216*14b24e2bSVaishali Kulkarni OSAL_VFREE(p_hwfn->p_dev, p_cid); 217*14b24e2bSVaishali Kulkarni } 218*14b24e2bSVaishali Kulkarni 219*14b24e2bSVaishali Kulkarni /* The internal is only meant to be directly called by PFs initializeing CIDs 220*14b24e2bSVaishali Kulkarni * for their VFs. 221*14b24e2bSVaishali Kulkarni */ 222*14b24e2bSVaishali Kulkarni static struct ecore_queue_cid * 223*14b24e2bSVaishali Kulkarni _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, 224*14b24e2bSVaishali Kulkarni u16 opaque_fid, u32 cid, 225*14b24e2bSVaishali Kulkarni struct ecore_queue_start_common_params *p_params, 226*14b24e2bSVaishali Kulkarni struct ecore_queue_cid_vf_params *p_vf_params) 227*14b24e2bSVaishali Kulkarni { 228*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid; 229*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 230*14b24e2bSVaishali Kulkarni 231*14b24e2bSVaishali Kulkarni p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid)); 232*14b24e2bSVaishali Kulkarni if (p_cid == OSAL_NULL) 233*14b24e2bSVaishali Kulkarni return OSAL_NULL; 234*14b24e2bSVaishali Kulkarni 235*14b24e2bSVaishali Kulkarni p_cid->opaque_fid = opaque_fid; 236*14b24e2bSVaishali Kulkarni p_cid->cid = cid; 237*14b24e2bSVaishali Kulkarni p_cid->p_owner = p_hwfn; 238*14b24e2bSVaishali Kulkarni 239*14b24e2bSVaishali Kulkarni /* Fill in parameters */ 240*14b24e2bSVaishali Kulkarni p_cid->rel.vport_id = p_params->vport_id; 241*14b24e2bSVaishali Kulkarni p_cid->rel.queue_id = p_params->queue_id; 242*14b24e2bSVaishali Kulkarni p_cid->rel.stats_id = p_params->stats_id; 243*14b24e2bSVaishali Kulkarni p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 244*14b24e2bSVaishali Kulkarni p_cid->sb_idx = p_params->sb_idx; 245*14b24e2bSVaishali Kulkarni 246*14b24e2bSVaishali Kulkarni /* Fill-in bits related to VFs' queues if information was provided */ 247*14b24e2bSVaishali Kulkarni if (p_vf_params != OSAL_NULL) { 248*14b24e2bSVaishali Kulkarni p_cid->vfid = p_vf_params->vfid; 249*14b24e2bSVaishali Kulkarni p_cid->vf_qid = p_vf_params->vf_qid; 250*14b24e2bSVaishali Kulkarni p_cid->vf_legacy = p_vf_params->vf_legacy; 251*14b24e2bSVaishali Kulkarni } else { 252*14b24e2bSVaishali Kulkarni p_cid->vfid = ECORE_QUEUE_CID_PF; 253*14b24e2bSVaishali Kulkarni } 254*14b24e2bSVaishali Kulkarni 255*14b24e2bSVaishali Kulkarni /* Don't try calculating the absolute indices for VFs */ 256*14b24e2bSVaishali Kulkarni if (IS_VF(p_hwfn->p_dev)) { 257*14b24e2bSVaishali Kulkarni p_cid->abs = p_cid->rel; 258*14b24e2bSVaishali Kulkarni 259*14b24e2bSVaishali Kulkarni goto out; 260*14b24e2bSVaishali Kulkarni } 261*14b24e2bSVaishali Kulkarni 262*14b24e2bSVaishali Kulkarni /* Calculate the engine-absolute indices of the resources. 263*14b24e2bSVaishali Kulkarni * The would guarantee they're valid later on. 264*14b24e2bSVaishali Kulkarni * In some cases [SBs] we already have the right values. 265*14b24e2bSVaishali Kulkarni */ 266*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 267*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 268*14b24e2bSVaishali Kulkarni goto fail; 269*14b24e2bSVaishali Kulkarni 270*14b24e2bSVaishali Kulkarni rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, 271*14b24e2bSVaishali Kulkarni &p_cid->abs.queue_id); 272*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 273*14b24e2bSVaishali Kulkarni goto fail; 274*14b24e2bSVaishali Kulkarni 275*14b24e2bSVaishali Kulkarni /* In case of a PF configuring its VF's queues, the stats-id is already 276*14b24e2bSVaishali Kulkarni * absolute [since there's a single index that's suitable per-VF]. 277*14b24e2bSVaishali Kulkarni */ 278*14b24e2bSVaishali Kulkarni if (p_cid->vfid == ECORE_QUEUE_CID_PF) { 279*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id, 280*14b24e2bSVaishali Kulkarni &p_cid->abs.stats_id); 281*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 282*14b24e2bSVaishali Kulkarni goto fail; 283*14b24e2bSVaishali Kulkarni } else { 284*14b24e2bSVaishali Kulkarni p_cid->abs.stats_id = p_cid->rel.stats_id; 285*14b24e2bSVaishali Kulkarni } 286*14b24e2bSVaishali Kulkarni 287*14b24e2bSVaishali Kulkarni out: 288*14b24e2bSVaishali Kulkarni /* VF-images have provided the qid_usage_idx on their own. 289*14b24e2bSVaishali Kulkarni * Otherwise, we need to allocate a unique one. 290*14b24e2bSVaishali Kulkarni */ 291*14b24e2bSVaishali Kulkarni if (!p_vf_params) { 292*14b24e2bSVaishali Kulkarni if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid)) 293*14b24e2bSVaishali Kulkarni goto fail; 294*14b24e2bSVaishali Kulkarni } else { 295*14b24e2bSVaishali Kulkarni p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 296*14b24e2bSVaishali Kulkarni } 297*14b24e2bSVaishali Kulkarni 298*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 299*14b24e2bSVaishali Kulkarni "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 300*14b24e2bSVaishali Kulkarni p_cid->opaque_fid, p_cid->cid, 301*14b24e2bSVaishali Kulkarni p_cid->rel.vport_id, p_cid->abs.vport_id, 302*14b24e2bSVaishali Kulkarni p_cid->rel.queue_id, p_cid->qid_usage_idx, 303*14b24e2bSVaishali Kulkarni p_cid->abs.queue_id, 304*14b24e2bSVaishali Kulkarni p_cid->rel.stats_id, p_cid->abs.stats_id, 305*14b24e2bSVaishali Kulkarni p_cid->sb_igu_id, p_cid->sb_idx); 306*14b24e2bSVaishali Kulkarni 307*14b24e2bSVaishali Kulkarni return p_cid; 308*14b24e2bSVaishali Kulkarni 309*14b24e2bSVaishali Kulkarni fail: 310*14b24e2bSVaishali Kulkarni OSAL_VFREE(p_hwfn->p_dev, p_cid); 311*14b24e2bSVaishali Kulkarni return OSAL_NULL; 312*14b24e2bSVaishali Kulkarni } 313*14b24e2bSVaishali Kulkarni 314*14b24e2bSVaishali Kulkarni struct ecore_queue_cid * 315*14b24e2bSVaishali Kulkarni ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 316*14b24e2bSVaishali Kulkarni struct ecore_queue_start_common_params *p_params, 317*14b24e2bSVaishali Kulkarni struct ecore_queue_cid_vf_params *p_vf_params) 318*14b24e2bSVaishali Kulkarni { 319*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid; 320*14b24e2bSVaishali Kulkarni u8 vfid = ECORE_CXT_PF_CID; 321*14b24e2bSVaishali Kulkarni bool b_legacy_vf = false; 322*14b24e2bSVaishali Kulkarni u32 cid = 0; 323*14b24e2bSVaishali Kulkarni 324*14b24e2bSVaishali Kulkarni /* In case of legacy VFs, The CID can be derived from the additional 325*14b24e2bSVaishali Kulkarni * VF parameters - the VF assumes queue X uses CID X, so we can simply 326*14b24e2bSVaishali Kulkarni * use the vf_qid for this purpose as well. 327*14b24e2bSVaishali Kulkarni */ 328*14b24e2bSVaishali Kulkarni if (p_vf_params) { 329*14b24e2bSVaishali Kulkarni vfid = p_vf_params->vfid; 330*14b24e2bSVaishali Kulkarni 331*14b24e2bSVaishali Kulkarni if (p_vf_params->vf_legacy & 332*14b24e2bSVaishali Kulkarni ECORE_QCID_LEGACY_VF_CID) { 333*14b24e2bSVaishali Kulkarni b_legacy_vf = true; 334*14b24e2bSVaishali Kulkarni cid = p_vf_params->vf_qid; 335*14b24e2bSVaishali Kulkarni } 336*14b24e2bSVaishali Kulkarni } 337*14b24e2bSVaishali Kulkarni 338*14b24e2bSVaishali Kulkarni /* Get a unique firmware CID for this queue, in case it's a PF. 339*14b24e2bSVaishali Kulkarni * VF's don't need a CID as the queue configuration will be done 340*14b24e2bSVaishali Kulkarni * by PF. 341*14b24e2bSVaishali Kulkarni */ 342*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) { 343*14b24e2bSVaishali Kulkarni if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 344*14b24e2bSVaishali Kulkarni &cid, vfid) != ECORE_SUCCESS) { 345*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n"); 346*14b24e2bSVaishali Kulkarni return OSAL_NULL; 347*14b24e2bSVaishali Kulkarni } 348*14b24e2bSVaishali Kulkarni } 349*14b24e2bSVaishali Kulkarni 350*14b24e2bSVaishali Kulkarni p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 351*14b24e2bSVaishali Kulkarni p_params, p_vf_params); 352*14b24e2bSVaishali Kulkarni if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 353*14b24e2bSVaishali Kulkarni _ecore_cxt_release_cid(p_hwfn, cid, vfid); 354*14b24e2bSVaishali Kulkarni 355*14b24e2bSVaishali Kulkarni return p_cid; 356*14b24e2bSVaishali Kulkarni } 357*14b24e2bSVaishali Kulkarni 358*14b24e2bSVaishali Kulkarni static struct ecore_queue_cid * 359*14b24e2bSVaishali Kulkarni ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 360*14b24e2bSVaishali Kulkarni struct ecore_queue_start_common_params *p_params) 361*14b24e2bSVaishali Kulkarni { 362*14b24e2bSVaishali Kulkarni return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL); 363*14b24e2bSVaishali Kulkarni } 364*14b24e2bSVaishali Kulkarni 365*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, 366*14b24e2bSVaishali Kulkarni struct ecore_sp_vport_start_params *p_params) 367*14b24e2bSVaishali Kulkarni { 368*14b24e2bSVaishali Kulkarni struct vport_start_ramrod_data *p_ramrod = OSAL_NULL; 369*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 370*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 371*14b24e2bSVaishali Kulkarni u16 rx_mode = 0, tx_err = 0; 372*14b24e2bSVaishali Kulkarni u8 abs_vport_id = 0; 373*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_NOTIMPL; 374*14b24e2bSVaishali Kulkarni 375*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 376*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 377*14b24e2bSVaishali Kulkarni return rc; 378*14b24e2bSVaishali Kulkarni 379*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 380*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 381*14b24e2bSVaishali Kulkarni init_data.cid = ecore_spq_get_cid(p_hwfn); 382*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_params->opaque_fid; 383*14b24e2bSVaishali Kulkarni init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 384*14b24e2bSVaishali Kulkarni 385*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 386*14b24e2bSVaishali Kulkarni ETH_RAMROD_VPORT_START, 387*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 388*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 389*14b24e2bSVaishali Kulkarni return rc; 390*14b24e2bSVaishali Kulkarni 391*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.vport_start; 392*14b24e2bSVaishali Kulkarni p_ramrod->vport_id = abs_vport_id; 393*14b24e2bSVaishali Kulkarni 394*14b24e2bSVaishali Kulkarni p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu); 395*14b24e2bSVaishali Kulkarni p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 396*14b24e2bSVaishali Kulkarni p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 397*14b24e2bSVaishali Kulkarni p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 398*14b24e2bSVaishali Kulkarni p_ramrod->untagged = p_params->only_untagged; 399*14b24e2bSVaishali Kulkarni p_ramrod->zero_placement_offset = p_params->zero_placement_offset; 400*14b24e2bSVaishali Kulkarni 401*14b24e2bSVaishali Kulkarni SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 402*14b24e2bSVaishali Kulkarni SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 403*14b24e2bSVaishali Kulkarni 404*14b24e2bSVaishali Kulkarni p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode); 405*14b24e2bSVaishali Kulkarni 406*14b24e2bSVaishali Kulkarni /* Handle requests for strict behavior on transmission errors */ 407*14b24e2bSVaishali Kulkarni SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE, 408*14b24e2bSVaishali Kulkarni p_params->b_err_illegal_vlan_mode ? 409*14b24e2bSVaishali Kulkarni ETH_TX_ERR_ASSERT_MALICIOUS : 0); 410*14b24e2bSVaishali Kulkarni SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL, 411*14b24e2bSVaishali Kulkarni p_params->b_err_small_pkt ? 412*14b24e2bSVaishali Kulkarni ETH_TX_ERR_ASSERT_MALICIOUS : 0); 413*14b24e2bSVaishali Kulkarni SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR, 414*14b24e2bSVaishali Kulkarni p_params->b_err_anti_spoof ? 415*14b24e2bSVaishali Kulkarni ETH_TX_ERR_ASSERT_MALICIOUS : 0); 416*14b24e2bSVaishali Kulkarni SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS, 417*14b24e2bSVaishali Kulkarni p_params->b_err_illegal_inband_mode ? 418*14b24e2bSVaishali Kulkarni ETH_TX_ERR_ASSERT_MALICIOUS : 0); 419*14b24e2bSVaishali Kulkarni SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG, 420*14b24e2bSVaishali Kulkarni p_params->b_err_vlan_insert_with_inband ? 421*14b24e2bSVaishali Kulkarni ETH_TX_ERR_ASSERT_MALICIOUS : 0); 422*14b24e2bSVaishali Kulkarni SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION, 423*14b24e2bSVaishali Kulkarni p_params->b_err_big_pkt ? 424*14b24e2bSVaishali Kulkarni ETH_TX_ERR_ASSERT_MALICIOUS : 0); 425*14b24e2bSVaishali Kulkarni SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME, 426*14b24e2bSVaishali Kulkarni p_params->b_err_ctrl_frame ? 427*14b24e2bSVaishali Kulkarni ETH_TX_ERR_ASSERT_MALICIOUS : 0); 428*14b24e2bSVaishali Kulkarni p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err); 429*14b24e2bSVaishali Kulkarni 430*14b24e2bSVaishali Kulkarni /* TPA related fields */ 431*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&p_ramrod->tpa_param, 0, 432*14b24e2bSVaishali Kulkarni sizeof(struct eth_vport_tpa_param)); 433*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; 434*14b24e2bSVaishali Kulkarni 435*14b24e2bSVaishali Kulkarni switch (p_params->tpa_mode) { 436*14b24e2bSVaishali Kulkarni case ECORE_TPA_MODE_GRO: 437*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 438*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_max_size = (u16)-1; 439*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu/2; 440*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu/2; 441*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; 442*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; 443*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1; 444*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1; 445*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_pkt_split_flg = 1; 446*14b24e2bSVaishali Kulkarni p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; 447*14b24e2bSVaishali Kulkarni break; 448*14b24e2bSVaishali Kulkarni default: 449*14b24e2bSVaishali Kulkarni break; 450*14b24e2bSVaishali Kulkarni } 451*14b24e2bSVaishali Kulkarni 452*14b24e2bSVaishali Kulkarni p_ramrod->tx_switching_en = p_params->tx_switching; 453*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 454*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 455*14b24e2bSVaishali Kulkarni p_ramrod->tx_switching_en = 0; 456*14b24e2bSVaishali Kulkarni #endif 457*14b24e2bSVaishali Kulkarni 458*14b24e2bSVaishali Kulkarni p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 459*14b24e2bSVaishali Kulkarni p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 460*14b24e2bSVaishali Kulkarni 461*14b24e2bSVaishali Kulkarni /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 462*14b24e2bSVaishali Kulkarni p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev, 463*14b24e2bSVaishali Kulkarni p_params->concrete_fid); 464*14b24e2bSVaishali Kulkarni 465*14b24e2bSVaishali Kulkarni return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 466*14b24e2bSVaishali Kulkarni } 467*14b24e2bSVaishali Kulkarni 468*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, 469*14b24e2bSVaishali Kulkarni struct ecore_sp_vport_start_params *p_params) 470*14b24e2bSVaishali Kulkarni { 471*14b24e2bSVaishali Kulkarni if (IS_VF(p_hwfn->p_dev)) 472*14b24e2bSVaishali Kulkarni return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id, 473*14b24e2bSVaishali Kulkarni p_params->mtu, 474*14b24e2bSVaishali Kulkarni p_params->remove_inner_vlan, 475*14b24e2bSVaishali Kulkarni p_params->tpa_mode, 476*14b24e2bSVaishali Kulkarni p_params->max_buffers_per_cqe, 477*14b24e2bSVaishali Kulkarni p_params->only_untagged); 478*14b24e2bSVaishali Kulkarni 479*14b24e2bSVaishali Kulkarni return ecore_sp_eth_vport_start(p_hwfn, p_params); 480*14b24e2bSVaishali Kulkarni } 481*14b24e2bSVaishali Kulkarni 482*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 483*14b24e2bSVaishali Kulkarni ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, 484*14b24e2bSVaishali Kulkarni struct vport_update_ramrod_data *p_ramrod, 485*14b24e2bSVaishali Kulkarni struct ecore_rss_params *p_rss) 486*14b24e2bSVaishali Kulkarni { 487*14b24e2bSVaishali Kulkarni struct eth_vport_rss_config *p_config; 488*14b24e2bSVaishali Kulkarni int i, table_size; 489*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 490*14b24e2bSVaishali Kulkarni 491*14b24e2bSVaishali Kulkarni if (!p_rss) { 492*14b24e2bSVaishali Kulkarni p_ramrod->common.update_rss_flg = 0; 493*14b24e2bSVaishali Kulkarni return rc; 494*14b24e2bSVaishali Kulkarni } 495*14b24e2bSVaishali Kulkarni p_config = &p_ramrod->rss_config; 496*14b24e2bSVaishali Kulkarni 497*14b24e2bSVaishali Kulkarni OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE != 498*14b24e2bSVaishali Kulkarni ETH_RSS_IND_TABLE_ENTRIES_NUM); 499*14b24e2bSVaishali Kulkarni 500*14b24e2bSVaishali Kulkarni rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, 501*14b24e2bSVaishali Kulkarni &p_config->rss_id); 502*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 503*14b24e2bSVaishali Kulkarni return rc; 504*14b24e2bSVaishali Kulkarni 505*14b24e2bSVaishali Kulkarni p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 506*14b24e2bSVaishali Kulkarni p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 507*14b24e2bSVaishali Kulkarni p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 508*14b24e2bSVaishali Kulkarni p_config->update_rss_key = p_rss->update_rss_key; 509*14b24e2bSVaishali Kulkarni 510*14b24e2bSVaishali Kulkarni p_config->rss_mode = p_rss->rss_enable ? 511*14b24e2bSVaishali Kulkarni ETH_VPORT_RSS_MODE_REGULAR : 512*14b24e2bSVaishali Kulkarni ETH_VPORT_RSS_MODE_DISABLED; 513*14b24e2bSVaishali Kulkarni 514*14b24e2bSVaishali Kulkarni p_config->capabilities = 0; 515*14b24e2bSVaishali Kulkarni 516*14b24e2bSVaishali Kulkarni SET_FIELD(p_config->capabilities, 517*14b24e2bSVaishali Kulkarni ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 518*14b24e2bSVaishali Kulkarni !!(p_rss->rss_caps & ECORE_RSS_IPV4)); 519*14b24e2bSVaishali Kulkarni SET_FIELD(p_config->capabilities, 520*14b24e2bSVaishali Kulkarni ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 521*14b24e2bSVaishali Kulkarni !!(p_rss->rss_caps & ECORE_RSS_IPV6)); 522*14b24e2bSVaishali Kulkarni SET_FIELD(p_config->capabilities, 523*14b24e2bSVaishali Kulkarni ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 524*14b24e2bSVaishali Kulkarni !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP)); 525*14b24e2bSVaishali Kulkarni SET_FIELD(p_config->capabilities, 526*14b24e2bSVaishali Kulkarni ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 527*14b24e2bSVaishali Kulkarni !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP)); 528*14b24e2bSVaishali Kulkarni SET_FIELD(p_config->capabilities, 529*14b24e2bSVaishali Kulkarni ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 530*14b24e2bSVaishali Kulkarni !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP)); 531*14b24e2bSVaishali Kulkarni SET_FIELD(p_config->capabilities, 532*14b24e2bSVaishali Kulkarni ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 533*14b24e2bSVaishali Kulkarni !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP)); 534*14b24e2bSVaishali Kulkarni p_config->tbl_size = p_rss->rss_table_size_log; 535*14b24e2bSVaishali Kulkarni p_config->capabilities = 536*14b24e2bSVaishali Kulkarni OSAL_CPU_TO_LE16(p_config->capabilities); 537*14b24e2bSVaishali Kulkarni 538*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 539*14b24e2bSVaishali Kulkarni "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 540*14b24e2bSVaishali Kulkarni p_ramrod->common.update_rss_flg, 541*14b24e2bSVaishali Kulkarni p_config->rss_mode, 542*14b24e2bSVaishali Kulkarni p_config->update_rss_capabilities, 543*14b24e2bSVaishali Kulkarni p_config->capabilities, 544*14b24e2bSVaishali Kulkarni p_config->update_rss_ind_table, 545*14b24e2bSVaishali Kulkarni p_config->update_rss_key); 546*14b24e2bSVaishali Kulkarni 547*14b24e2bSVaishali Kulkarni table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE, 548*14b24e2bSVaishali Kulkarni 1 << p_config->tbl_size); 549*14b24e2bSVaishali Kulkarni for (i = 0; i < table_size; i++) { 550*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i]; 551*14b24e2bSVaishali Kulkarni 552*14b24e2bSVaishali Kulkarni if (!p_queue) 553*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 554*14b24e2bSVaishali Kulkarni 555*14b24e2bSVaishali Kulkarni p_config->indirection_table[i] = 556*14b24e2bSVaishali Kulkarni OSAL_CPU_TO_LE16(p_queue->abs.queue_id); 557*14b24e2bSVaishali Kulkarni } 558*14b24e2bSVaishali Kulkarni 559*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 560*14b24e2bSVaishali Kulkarni "Configured RSS indirection table [%d entries]:\n", 561*14b24e2bSVaishali Kulkarni table_size); 562*14b24e2bSVaishali Kulkarni for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) { 563*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 564*14b24e2bSVaishali Kulkarni "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 565*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i]), 566*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]), 567*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]), 568*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]), 569*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]), 570*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]), 571*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]), 572*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]), 573*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]), 574*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]), 575*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]), 576*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]), 577*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]), 578*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]), 579*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]), 580*14b24e2bSVaishali Kulkarni OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15])); 581*14b24e2bSVaishali Kulkarni } 582*14b24e2bSVaishali Kulkarni 583*14b24e2bSVaishali Kulkarni for (i = 0; i < 10; i++) 584*14b24e2bSVaishali Kulkarni p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]); 585*14b24e2bSVaishali Kulkarni 586*14b24e2bSVaishali Kulkarni return rc; 587*14b24e2bSVaishali Kulkarni } 588*14b24e2bSVaishali Kulkarni 589*14b24e2bSVaishali Kulkarni static void 590*14b24e2bSVaishali Kulkarni ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn, 591*14b24e2bSVaishali Kulkarni struct vport_update_ramrod_data *p_ramrod, 592*14b24e2bSVaishali Kulkarni struct ecore_filter_accept_flags accept_flags) 593*14b24e2bSVaishali Kulkarni { 594*14b24e2bSVaishali Kulkarni p_ramrod->common.update_rx_mode_flg = 595*14b24e2bSVaishali Kulkarni accept_flags.update_rx_mode_config; 596*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tx_mode_flg = 597*14b24e2bSVaishali Kulkarni accept_flags.update_tx_mode_config; 598*14b24e2bSVaishali Kulkarni 599*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 600*14b24e2bSVaishali Kulkarni /* On B0 emulation we cannot enable Tx, since this would cause writes 601*14b24e2bSVaishali Kulkarni * to PVFC HW block which isn't implemented in emulation. 602*14b24e2bSVaishali Kulkarni */ 603*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 604*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 605*14b24e2bSVaishali Kulkarni "Non-Asic - prevent Tx mode in vport update\n"); 606*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tx_mode_flg = 0; 607*14b24e2bSVaishali Kulkarni } 608*14b24e2bSVaishali Kulkarni #endif 609*14b24e2bSVaishali Kulkarni 610*14b24e2bSVaishali Kulkarni /* Set Rx mode accept flags */ 611*14b24e2bSVaishali Kulkarni if (p_ramrod->common.update_rx_mode_flg) { 612*14b24e2bSVaishali Kulkarni u8 accept_filter = accept_flags.rx_accept_filter; 613*14b24e2bSVaishali Kulkarni u16 state = 0; 614*14b24e2bSVaishali Kulkarni 615*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 616*14b24e2bSVaishali Kulkarni !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) || 617*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); 618*14b24e2bSVaishali Kulkarni 619*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 620*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)); 621*14b24e2bSVaishali Kulkarni 622*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 623*14b24e2bSVaishali Kulkarni !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) || 624*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 625*14b24e2bSVaishali Kulkarni 626*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 627*14b24e2bSVaishali Kulkarni (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 628*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 629*14b24e2bSVaishali Kulkarni 630*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 631*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_BCAST)); 632*14b24e2bSVaishali Kulkarni 633*14b24e2bSVaishali Kulkarni p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state); 634*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 635*14b24e2bSVaishali Kulkarni "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n", 636*14b24e2bSVaishali Kulkarni p_ramrod->common.vport_id, state); 637*14b24e2bSVaishali Kulkarni } 638*14b24e2bSVaishali Kulkarni 639*14b24e2bSVaishali Kulkarni /* Set Tx mode accept flags */ 640*14b24e2bSVaishali Kulkarni if (p_ramrod->common.update_tx_mode_flg) { 641*14b24e2bSVaishali Kulkarni u8 accept_filter = accept_flags.tx_accept_filter; 642*14b24e2bSVaishali Kulkarni u16 state = 0; 643*14b24e2bSVaishali Kulkarni 644*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 645*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_NONE)); 646*14b24e2bSVaishali Kulkarni 647*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 648*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_NONE)); 649*14b24e2bSVaishali Kulkarni 650*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 651*14b24e2bSVaishali Kulkarni (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 652*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 653*14b24e2bSVaishali Kulkarni 654*14b24e2bSVaishali Kulkarni SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 655*14b24e2bSVaishali Kulkarni !!(accept_filter & ECORE_ACCEPT_BCAST)); 656*14b24e2bSVaishali Kulkarni 657*14b24e2bSVaishali Kulkarni p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state); 658*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 659*14b24e2bSVaishali Kulkarni "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n", 660*14b24e2bSVaishali Kulkarni p_ramrod->common.vport_id, state); 661*14b24e2bSVaishali Kulkarni } 662*14b24e2bSVaishali Kulkarni } 663*14b24e2bSVaishali Kulkarni 664*14b24e2bSVaishali Kulkarni static void 665*14b24e2bSVaishali Kulkarni ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn, 666*14b24e2bSVaishali Kulkarni struct vport_update_ramrod_data *p_ramrod, 667*14b24e2bSVaishali Kulkarni struct ecore_sge_tpa_params *p_params) 668*14b24e2bSVaishali Kulkarni { 669*14b24e2bSVaishali Kulkarni struct eth_vport_tpa_param *p_tpa; 670*14b24e2bSVaishali Kulkarni 671*14b24e2bSVaishali Kulkarni if (!p_params) { 672*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tpa_param_flg = 0; 673*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tpa_en_flg = 0; 674*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tpa_param_flg = 0; 675*14b24e2bSVaishali Kulkarni return; 676*14b24e2bSVaishali Kulkarni } 677*14b24e2bSVaishali Kulkarni 678*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 679*14b24e2bSVaishali Kulkarni p_tpa = &p_ramrod->tpa_param; 680*14b24e2bSVaishali Kulkarni p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 681*14b24e2bSVaishali Kulkarni p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 682*14b24e2bSVaishali Kulkarni p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 683*14b24e2bSVaishali Kulkarni p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 684*14b24e2bSVaishali Kulkarni 685*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 686*14b24e2bSVaishali Kulkarni p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 687*14b24e2bSVaishali Kulkarni p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 688*14b24e2bSVaishali Kulkarni p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 689*14b24e2bSVaishali Kulkarni p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 690*14b24e2bSVaishali Kulkarni p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 691*14b24e2bSVaishali Kulkarni p_tpa->tpa_max_size = p_params->tpa_max_size; 692*14b24e2bSVaishali Kulkarni p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; 693*14b24e2bSVaishali Kulkarni p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; 694*14b24e2bSVaishali Kulkarni } 695*14b24e2bSVaishali Kulkarni 696*14b24e2bSVaishali Kulkarni static void 697*14b24e2bSVaishali Kulkarni ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn, 698*14b24e2bSVaishali Kulkarni struct vport_update_ramrod_data *p_ramrod, 699*14b24e2bSVaishali Kulkarni struct ecore_sp_vport_update_params *p_params) 700*14b24e2bSVaishali Kulkarni { 701*14b24e2bSVaishali Kulkarni int i; 702*14b24e2bSVaishali Kulkarni 703*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, 704*14b24e2bSVaishali Kulkarni sizeof(p_ramrod->approx_mcast.bins)); 705*14b24e2bSVaishali Kulkarni 706*14b24e2bSVaishali Kulkarni if (!p_params->update_approx_mcast_flg) 707*14b24e2bSVaishali Kulkarni return; 708*14b24e2bSVaishali Kulkarni 709*14b24e2bSVaishali Kulkarni p_ramrod->common.update_approx_mcast_flg = 1; 710*14b24e2bSVaishali Kulkarni for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 711*14b24e2bSVaishali Kulkarni u32 *p_bins = (u32 *)p_params->bins; 712*14b24e2bSVaishali Kulkarni 713*14b24e2bSVaishali Kulkarni p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); 714*14b24e2bSVaishali Kulkarni } 715*14b24e2bSVaishali Kulkarni } 716*14b24e2bSVaishali Kulkarni 717*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, 718*14b24e2bSVaishali Kulkarni struct ecore_sp_vport_update_params *p_params, 719*14b24e2bSVaishali Kulkarni enum spq_mode comp_mode, 720*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_comp_data) 721*14b24e2bSVaishali Kulkarni { 722*14b24e2bSVaishali Kulkarni struct ecore_rss_params *p_rss_params = p_params->rss_params; 723*14b24e2bSVaishali Kulkarni struct vport_update_ramrod_data_cmn *p_cmn; 724*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 725*14b24e2bSVaishali Kulkarni struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 726*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 727*14b24e2bSVaishali Kulkarni u8 abs_vport_id = 0, val; 728*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_NOTIMPL; 729*14b24e2bSVaishali Kulkarni 730*14b24e2bSVaishali Kulkarni if (IS_VF(p_hwfn->p_dev)) { 731*14b24e2bSVaishali Kulkarni rc = ecore_vf_pf_vport_update(p_hwfn, p_params); 732*14b24e2bSVaishali Kulkarni return rc; 733*14b24e2bSVaishali Kulkarni } 734*14b24e2bSVaishali Kulkarni 735*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 736*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 737*14b24e2bSVaishali Kulkarni return rc; 738*14b24e2bSVaishali Kulkarni 739*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 740*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 741*14b24e2bSVaishali Kulkarni init_data.cid = ecore_spq_get_cid(p_hwfn); 742*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_params->opaque_fid; 743*14b24e2bSVaishali Kulkarni init_data.comp_mode = comp_mode; 744*14b24e2bSVaishali Kulkarni init_data.p_comp_data = p_comp_data; 745*14b24e2bSVaishali Kulkarni 746*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 747*14b24e2bSVaishali Kulkarni ETH_RAMROD_VPORT_UPDATE, 748*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 749*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 750*14b24e2bSVaishali Kulkarni return rc; 751*14b24e2bSVaishali Kulkarni 752*14b24e2bSVaishali Kulkarni /* Copy input params to ramrod according to FW struct */ 753*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.vport_update; 754*14b24e2bSVaishali Kulkarni p_cmn = &p_ramrod->common; 755*14b24e2bSVaishali Kulkarni 756*14b24e2bSVaishali Kulkarni p_cmn->vport_id = abs_vport_id; 757*14b24e2bSVaishali Kulkarni 758*14b24e2bSVaishali Kulkarni p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 759*14b24e2bSVaishali Kulkarni p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 760*14b24e2bSVaishali Kulkarni p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 761*14b24e2bSVaishali Kulkarni p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 762*14b24e2bSVaishali Kulkarni 763*14b24e2bSVaishali Kulkarni p_cmn->accept_any_vlan = p_params->accept_any_vlan; 764*14b24e2bSVaishali Kulkarni val = p_params->update_accept_any_vlan_flg; 765*14b24e2bSVaishali Kulkarni p_cmn->update_accept_any_vlan_flg = val; 766*14b24e2bSVaishali Kulkarni 767*14b24e2bSVaishali Kulkarni p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 768*14b24e2bSVaishali Kulkarni val = p_params->update_inner_vlan_removal_flg; 769*14b24e2bSVaishali Kulkarni p_cmn->update_inner_vlan_removal_en_flg = val; 770*14b24e2bSVaishali Kulkarni 771*14b24e2bSVaishali Kulkarni p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 772*14b24e2bSVaishali Kulkarni val = p_params->update_default_vlan_enable_flg; 773*14b24e2bSVaishali Kulkarni p_cmn->update_default_vlan_en_flg = val; 774*14b24e2bSVaishali Kulkarni 775*14b24e2bSVaishali Kulkarni p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan); 776*14b24e2bSVaishali Kulkarni p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 777*14b24e2bSVaishali Kulkarni 778*14b24e2bSVaishali Kulkarni p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 779*14b24e2bSVaishali Kulkarni 780*14b24e2bSVaishali Kulkarni p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 781*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 782*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 783*14b24e2bSVaishali Kulkarni if (p_ramrod->common.tx_switching_en || 784*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tx_switching_en_flg) { 785*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, false, "FPGA - why are we seeing tx-switching? Overriding it\n"); 786*14b24e2bSVaishali Kulkarni p_ramrod->common.tx_switching_en = 0; 787*14b24e2bSVaishali Kulkarni p_ramrod->common.update_tx_switching_en_flg = 1; 788*14b24e2bSVaishali Kulkarni } 789*14b24e2bSVaishali Kulkarni #endif 790*14b24e2bSVaishali Kulkarni p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 791*14b24e2bSVaishali Kulkarni 792*14b24e2bSVaishali Kulkarni p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 793*14b24e2bSVaishali Kulkarni val = p_params->update_anti_spoofing_en_flg; 794*14b24e2bSVaishali Kulkarni p_ramrod->common.update_anti_spoofing_en_flg = val; 795*14b24e2bSVaishali Kulkarni 796*14b24e2bSVaishali Kulkarni rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 797*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 798*14b24e2bSVaishali Kulkarni /* Return spq entry which is taken in ecore_sp_init_request()*/ 799*14b24e2bSVaishali Kulkarni ecore_spq_return_entry(p_hwfn, p_ent); 800*14b24e2bSVaishali Kulkarni return rc; 801*14b24e2bSVaishali Kulkarni } 802*14b24e2bSVaishali Kulkarni 803*14b24e2bSVaishali Kulkarni /* Update mcast bins for VFs, PF doesn't use this functionality */ 804*14b24e2bSVaishali Kulkarni ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 805*14b24e2bSVaishali Kulkarni 806*14b24e2bSVaishali Kulkarni ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 807*14b24e2bSVaishali Kulkarni ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, 808*14b24e2bSVaishali Kulkarni p_params->sge_tpa_params); 809*14b24e2bSVaishali Kulkarni return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 810*14b24e2bSVaishali Kulkarni } 811*14b24e2bSVaishali Kulkarni 812*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, 813*14b24e2bSVaishali Kulkarni u16 opaque_fid, 814*14b24e2bSVaishali Kulkarni u8 vport_id) 815*14b24e2bSVaishali Kulkarni { 816*14b24e2bSVaishali Kulkarni struct vport_stop_ramrod_data *p_ramrod; 817*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 818*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent; 819*14b24e2bSVaishali Kulkarni u8 abs_vport_id = 0; 820*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 821*14b24e2bSVaishali Kulkarni 822*14b24e2bSVaishali Kulkarni if (IS_VF(p_hwfn->p_dev)) 823*14b24e2bSVaishali Kulkarni return ecore_vf_pf_vport_stop(p_hwfn); 824*14b24e2bSVaishali Kulkarni 825*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 826*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 827*14b24e2bSVaishali Kulkarni return rc; 828*14b24e2bSVaishali Kulkarni 829*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 830*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 831*14b24e2bSVaishali Kulkarni init_data.cid = ecore_spq_get_cid(p_hwfn); 832*14b24e2bSVaishali Kulkarni init_data.opaque_fid = opaque_fid; 833*14b24e2bSVaishali Kulkarni init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 834*14b24e2bSVaishali Kulkarni 835*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 836*14b24e2bSVaishali Kulkarni ETH_RAMROD_VPORT_STOP, 837*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 838*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 839*14b24e2bSVaishali Kulkarni return rc; 840*14b24e2bSVaishali Kulkarni 841*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.vport_stop; 842*14b24e2bSVaishali Kulkarni p_ramrod->vport_id = abs_vport_id; 843*14b24e2bSVaishali Kulkarni 844*14b24e2bSVaishali Kulkarni return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 845*14b24e2bSVaishali Kulkarni } 846*14b24e2bSVaishali Kulkarni 847*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 848*14b24e2bSVaishali Kulkarni ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn, 849*14b24e2bSVaishali Kulkarni struct ecore_filter_accept_flags *p_accept_flags) 850*14b24e2bSVaishali Kulkarni { 851*14b24e2bSVaishali Kulkarni struct ecore_sp_vport_update_params s_params; 852*14b24e2bSVaishali Kulkarni 853*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&s_params, 0, sizeof(s_params)); 854*14b24e2bSVaishali Kulkarni OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags, 855*14b24e2bSVaishali Kulkarni sizeof(struct ecore_filter_accept_flags)); 856*14b24e2bSVaishali Kulkarni 857*14b24e2bSVaishali Kulkarni return ecore_vf_pf_vport_update(p_hwfn, &s_params); 858*14b24e2bSVaishali Kulkarni } 859*14b24e2bSVaishali Kulkarni 860*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_filter_accept_cmd(struct ecore_dev *p_dev, 861*14b24e2bSVaishali Kulkarni u8 vport, 862*14b24e2bSVaishali Kulkarni struct ecore_filter_accept_flags accept_flags, 863*14b24e2bSVaishali Kulkarni u8 update_accept_any_vlan, 864*14b24e2bSVaishali Kulkarni u8 accept_any_vlan, 865*14b24e2bSVaishali Kulkarni enum spq_mode comp_mode, 866*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_comp_data) 867*14b24e2bSVaishali Kulkarni { 868*14b24e2bSVaishali Kulkarni struct ecore_sp_vport_update_params vport_update_params; 869*14b24e2bSVaishali Kulkarni int i, rc; 870*14b24e2bSVaishali Kulkarni 871*14b24e2bSVaishali Kulkarni /* Prepare and send the vport rx_mode change */ 872*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params)); 873*14b24e2bSVaishali Kulkarni vport_update_params.vport_id = vport; 874*14b24e2bSVaishali Kulkarni vport_update_params.accept_flags = accept_flags; 875*14b24e2bSVaishali Kulkarni vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 876*14b24e2bSVaishali Kulkarni vport_update_params.accept_any_vlan = accept_any_vlan; 877*14b24e2bSVaishali Kulkarni 878*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 879*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 880*14b24e2bSVaishali Kulkarni 881*14b24e2bSVaishali Kulkarni vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 882*14b24e2bSVaishali Kulkarni 883*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 884*14b24e2bSVaishali Kulkarni rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags); 885*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 886*14b24e2bSVaishali Kulkarni return rc; 887*14b24e2bSVaishali Kulkarni continue; 888*14b24e2bSVaishali Kulkarni } 889*14b24e2bSVaishali Kulkarni 890*14b24e2bSVaishali Kulkarni rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 891*14b24e2bSVaishali Kulkarni comp_mode, p_comp_data); 892*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 893*14b24e2bSVaishali Kulkarni DP_ERR(p_dev, "Update rx_mode failed %d\n", rc); 894*14b24e2bSVaishali Kulkarni return rc; 895*14b24e2bSVaishali Kulkarni } 896*14b24e2bSVaishali Kulkarni 897*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 898*14b24e2bSVaishali Kulkarni "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 899*14b24e2bSVaishali Kulkarni accept_flags.rx_accept_filter, 900*14b24e2bSVaishali Kulkarni accept_flags.tx_accept_filter); 901*14b24e2bSVaishali Kulkarni 902*14b24e2bSVaishali Kulkarni if (update_accept_any_vlan) 903*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 904*14b24e2bSVaishali Kulkarni "accept_any_vlan=%d configured\n", 905*14b24e2bSVaishali Kulkarni accept_any_vlan); 906*14b24e2bSVaishali Kulkarni } 907*14b24e2bSVaishali Kulkarni 908*14b24e2bSVaishali Kulkarni return 0; 909*14b24e2bSVaishali Kulkarni } 910*14b24e2bSVaishali Kulkarni 911*14b24e2bSVaishali Kulkarni enum _ecore_status_t 912*14b24e2bSVaishali Kulkarni ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, 913*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid, 914*14b24e2bSVaishali Kulkarni u16 bd_max_bytes, 915*14b24e2bSVaishali Kulkarni dma_addr_t bd_chain_phys_addr, 916*14b24e2bSVaishali Kulkarni dma_addr_t cqe_pbl_addr, 917*14b24e2bSVaishali Kulkarni u16 cqe_pbl_size) 918*14b24e2bSVaishali Kulkarni { 919*14b24e2bSVaishali Kulkarni struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 920*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 921*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 922*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_NOTIMPL; 923*14b24e2bSVaishali Kulkarni 924*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 925*14b24e2bSVaishali Kulkarni p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id, 926*14b24e2bSVaishali Kulkarni p_cid->abs.vport_id, p_cid->sb_igu_id); 927*14b24e2bSVaishali Kulkarni 928*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 929*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 930*14b24e2bSVaishali Kulkarni init_data.cid = p_cid->cid; 931*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_cid->opaque_fid; 932*14b24e2bSVaishali Kulkarni init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 933*14b24e2bSVaishali Kulkarni 934*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 935*14b24e2bSVaishali Kulkarni ETH_RAMROD_RX_QUEUE_START, 936*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 937*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 938*14b24e2bSVaishali Kulkarni return rc; 939*14b24e2bSVaishali Kulkarni 940*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.rx_queue_start; 941*14b24e2bSVaishali Kulkarni 942*14b24e2bSVaishali Kulkarni p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 943*14b24e2bSVaishali Kulkarni p_ramrod->sb_index = p_cid->sb_idx; 944*14b24e2bSVaishali Kulkarni p_ramrod->vport_id = p_cid->abs.vport_id; 945*14b24e2bSVaishali Kulkarni p_ramrod->stats_counter_id = p_cid->abs.stats_id; 946*14b24e2bSVaishali Kulkarni p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 947*14b24e2bSVaishali Kulkarni p_ramrod->complete_cqe_flg = 0; 948*14b24e2bSVaishali Kulkarni p_ramrod->complete_event_flg = 1; 949*14b24e2bSVaishali Kulkarni 950*14b24e2bSVaishali Kulkarni p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes); 951*14b24e2bSVaishali Kulkarni DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 952*14b24e2bSVaishali Kulkarni 953*14b24e2bSVaishali Kulkarni p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size); 954*14b24e2bSVaishali Kulkarni DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 955*14b24e2bSVaishali Kulkarni 956*14b24e2bSVaishali Kulkarni if (p_cid->vfid != ECORE_QUEUE_CID_PF) { 957*14b24e2bSVaishali Kulkarni bool b_legacy_vf = !!(p_cid->vf_legacy & 958*14b24e2bSVaishali Kulkarni ECORE_QCID_LEGACY_VF_RX_PROD); 959*14b24e2bSVaishali Kulkarni 960*14b24e2bSVaishali Kulkarni p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 961*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n", 962*14b24e2bSVaishali Kulkarni b_legacy_vf ? " [legacy]" : "", 963*14b24e2bSVaishali Kulkarni p_cid->vf_qid); 964*14b24e2bSVaishali Kulkarni p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 965*14b24e2bSVaishali Kulkarni } 966*14b24e2bSVaishali Kulkarni 967*14b24e2bSVaishali Kulkarni return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 968*14b24e2bSVaishali Kulkarni } 969*14b24e2bSVaishali Kulkarni 970*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 971*14b24e2bSVaishali Kulkarni ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn, 972*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid, 973*14b24e2bSVaishali Kulkarni u16 bd_max_bytes, 974*14b24e2bSVaishali Kulkarni dma_addr_t bd_chain_phys_addr, 975*14b24e2bSVaishali Kulkarni dma_addr_t cqe_pbl_addr, 976*14b24e2bSVaishali Kulkarni u16 cqe_pbl_size, 977*14b24e2bSVaishali Kulkarni void OSAL_IOMEM **pp_prod) 978*14b24e2bSVaishali Kulkarni { 979*14b24e2bSVaishali Kulkarni u32 init_prod_val = 0; 980*14b24e2bSVaishali Kulkarni 981*14b24e2bSVaishali Kulkarni *pp_prod = (u8 OSAL_IOMEM*) 982*14b24e2bSVaishali Kulkarni p_hwfn->regview + 983*14b24e2bSVaishali Kulkarni GTT_BAR0_MAP_REG_MSDM_RAM + 984*14b24e2bSVaishali Kulkarni MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 985*14b24e2bSVaishali Kulkarni 986*14b24e2bSVaishali Kulkarni /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 987*14b24e2bSVaishali Kulkarni __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 988*14b24e2bSVaishali Kulkarni (u32 *)(&init_prod_val)); 989*14b24e2bSVaishali Kulkarni 990*14b24e2bSVaishali Kulkarni return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid, 991*14b24e2bSVaishali Kulkarni bd_max_bytes, 992*14b24e2bSVaishali Kulkarni bd_chain_phys_addr, 993*14b24e2bSVaishali Kulkarni cqe_pbl_addr, cqe_pbl_size); 994*14b24e2bSVaishali Kulkarni } 995*14b24e2bSVaishali Kulkarni 996*14b24e2bSVaishali Kulkarni enum _ecore_status_t 997*14b24e2bSVaishali Kulkarni ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, 998*14b24e2bSVaishali Kulkarni u16 opaque_fid, 999*14b24e2bSVaishali Kulkarni struct ecore_queue_start_common_params *p_params, 1000*14b24e2bSVaishali Kulkarni u16 bd_max_bytes, 1001*14b24e2bSVaishali Kulkarni dma_addr_t bd_chain_phys_addr, 1002*14b24e2bSVaishali Kulkarni dma_addr_t cqe_pbl_addr, 1003*14b24e2bSVaishali Kulkarni u16 cqe_pbl_size, 1004*14b24e2bSVaishali Kulkarni struct ecore_rxq_start_ret_params *p_ret_params) 1005*14b24e2bSVaishali Kulkarni { 1006*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid; 1007*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1008*14b24e2bSVaishali Kulkarni 1009*14b24e2bSVaishali Kulkarni /* Allocate a CID for the queue */ 1010*14b24e2bSVaishali Kulkarni p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params); 1011*14b24e2bSVaishali Kulkarni if (p_cid == OSAL_NULL) 1012*14b24e2bSVaishali Kulkarni return ECORE_NOMEM; 1013*14b24e2bSVaishali Kulkarni 1014*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) 1015*14b24e2bSVaishali Kulkarni rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid, 1016*14b24e2bSVaishali Kulkarni bd_max_bytes, 1017*14b24e2bSVaishali Kulkarni bd_chain_phys_addr, 1018*14b24e2bSVaishali Kulkarni cqe_pbl_addr, cqe_pbl_size, 1019*14b24e2bSVaishali Kulkarni &p_ret_params->p_prod); 1020*14b24e2bSVaishali Kulkarni else 1021*14b24e2bSVaishali Kulkarni rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid, 1022*14b24e2bSVaishali Kulkarni bd_max_bytes, 1023*14b24e2bSVaishali Kulkarni bd_chain_phys_addr, 1024*14b24e2bSVaishali Kulkarni cqe_pbl_addr, 1025*14b24e2bSVaishali Kulkarni cqe_pbl_size, 1026*14b24e2bSVaishali Kulkarni &p_ret_params->p_prod); 1027*14b24e2bSVaishali Kulkarni 1028*14b24e2bSVaishali Kulkarni /* Provide the caller with a reference to as handler */ 1029*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1030*14b24e2bSVaishali Kulkarni ecore_eth_queue_cid_release(p_hwfn, p_cid); 1031*14b24e2bSVaishali Kulkarni else 1032*14b24e2bSVaishali Kulkarni p_ret_params->p_handle = (void *)p_cid; 1033*14b24e2bSVaishali Kulkarni 1034*14b24e2bSVaishali Kulkarni return rc; 1035*14b24e2bSVaishali Kulkarni } 1036*14b24e2bSVaishali Kulkarni 1037*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, 1038*14b24e2bSVaishali Kulkarni void **pp_rxq_handles, 1039*14b24e2bSVaishali Kulkarni u8 num_rxqs, 1040*14b24e2bSVaishali Kulkarni u8 complete_cqe_flg, 1041*14b24e2bSVaishali Kulkarni u8 complete_event_flg, 1042*14b24e2bSVaishali Kulkarni enum spq_mode comp_mode, 1043*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_comp_data) 1044*14b24e2bSVaishali Kulkarni { 1045*14b24e2bSVaishali Kulkarni struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL; 1046*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 1047*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 1048*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid; 1049*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_NOTIMPL; 1050*14b24e2bSVaishali Kulkarni u8 i; 1051*14b24e2bSVaishali Kulkarni 1052*14b24e2bSVaishali Kulkarni #ifndef LINUX_REMOVE 1053*14b24e2bSVaishali Kulkarni if (IS_VF(p_hwfn->p_dev)) 1054*14b24e2bSVaishali Kulkarni return ecore_vf_pf_rxqs_update(p_hwfn, 1055*14b24e2bSVaishali Kulkarni (struct ecore_queue_cid **) 1056*14b24e2bSVaishali Kulkarni pp_rxq_handles, 1057*14b24e2bSVaishali Kulkarni num_rxqs, 1058*14b24e2bSVaishali Kulkarni complete_cqe_flg, 1059*14b24e2bSVaishali Kulkarni complete_event_flg); 1060*14b24e2bSVaishali Kulkarni #endif 1061*14b24e2bSVaishali Kulkarni 1062*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1063*14b24e2bSVaishali Kulkarni init_data.comp_mode = comp_mode; 1064*14b24e2bSVaishali Kulkarni init_data.p_comp_data = p_comp_data; 1065*14b24e2bSVaishali Kulkarni 1066*14b24e2bSVaishali Kulkarni for (i = 0; i < num_rxqs; i++) { 1067*14b24e2bSVaishali Kulkarni p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i]; 1068*14b24e2bSVaishali Kulkarni 1069*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 1070*14b24e2bSVaishali Kulkarni init_data.cid = p_cid->cid; 1071*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_cid->opaque_fid; 1072*14b24e2bSVaishali Kulkarni 1073*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 1074*14b24e2bSVaishali Kulkarni ETH_RAMROD_RX_QUEUE_UPDATE, 1075*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 1076*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1077*14b24e2bSVaishali Kulkarni return rc; 1078*14b24e2bSVaishali Kulkarni 1079*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.rx_queue_update; 1080*14b24e2bSVaishali Kulkarni p_ramrod->vport_id = p_cid->abs.vport_id; 1081*14b24e2bSVaishali Kulkarni 1082*14b24e2bSVaishali Kulkarni p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1083*14b24e2bSVaishali Kulkarni p_ramrod->complete_cqe_flg = complete_cqe_flg; 1084*14b24e2bSVaishali Kulkarni p_ramrod->complete_event_flg = complete_event_flg; 1085*14b24e2bSVaishali Kulkarni 1086*14b24e2bSVaishali Kulkarni rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1087*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1088*14b24e2bSVaishali Kulkarni return rc; 1089*14b24e2bSVaishali Kulkarni } 1090*14b24e2bSVaishali Kulkarni 1091*14b24e2bSVaishali Kulkarni return rc; 1092*14b24e2bSVaishali Kulkarni } 1093*14b24e2bSVaishali Kulkarni 1094*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 1095*14b24e2bSVaishali Kulkarni ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1096*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid, 1097*14b24e2bSVaishali Kulkarni bool b_eq_completion_only, 1098*14b24e2bSVaishali Kulkarni bool b_cqe_completion) 1099*14b24e2bSVaishali Kulkarni { 1100*14b24e2bSVaishali Kulkarni struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL; 1101*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 1102*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 1103*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1104*14b24e2bSVaishali Kulkarni 1105*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1106*14b24e2bSVaishali Kulkarni init_data.cid = p_cid->cid; 1107*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_cid->opaque_fid; 1108*14b24e2bSVaishali Kulkarni init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1109*14b24e2bSVaishali Kulkarni 1110*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 1111*14b24e2bSVaishali Kulkarni ETH_RAMROD_RX_QUEUE_STOP, 1112*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 1113*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1114*14b24e2bSVaishali Kulkarni return rc; 1115*14b24e2bSVaishali Kulkarni 1116*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.rx_queue_stop; 1117*14b24e2bSVaishali Kulkarni p_ramrod->vport_id = p_cid->abs.vport_id; 1118*14b24e2bSVaishali Kulkarni p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1119*14b24e2bSVaishali Kulkarni 1120*14b24e2bSVaishali Kulkarni /* Cleaning the queue requires the completion to arrive there. 1121*14b24e2bSVaishali Kulkarni * In addition, VFs require the answer to come as eqe to PF. 1122*14b24e2bSVaishali Kulkarni */ 1123*14b24e2bSVaishali Kulkarni p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) && 1124*14b24e2bSVaishali Kulkarni !b_eq_completion_only) || 1125*14b24e2bSVaishali Kulkarni b_cqe_completion; 1126*14b24e2bSVaishali Kulkarni p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) || 1127*14b24e2bSVaishali Kulkarni b_eq_completion_only; 1128*14b24e2bSVaishali Kulkarni 1129*14b24e2bSVaishali Kulkarni return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1130*14b24e2bSVaishali Kulkarni } 1131*14b24e2bSVaishali Kulkarni 1132*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1133*14b24e2bSVaishali Kulkarni void *p_rxq, 1134*14b24e2bSVaishali Kulkarni bool eq_completion_only, 1135*14b24e2bSVaishali Kulkarni bool cqe_completion) 1136*14b24e2bSVaishali Kulkarni { 1137*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq; 1138*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_NOTIMPL; 1139*14b24e2bSVaishali Kulkarni 1140*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) 1141*14b24e2bSVaishali Kulkarni rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1142*14b24e2bSVaishali Kulkarni eq_completion_only, 1143*14b24e2bSVaishali Kulkarni cqe_completion); 1144*14b24e2bSVaishali Kulkarni else 1145*14b24e2bSVaishali Kulkarni rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1146*14b24e2bSVaishali Kulkarni 1147*14b24e2bSVaishali Kulkarni if (rc == ECORE_SUCCESS) 1148*14b24e2bSVaishali Kulkarni ecore_eth_queue_cid_release(p_hwfn, p_cid); 1149*14b24e2bSVaishali Kulkarni return rc; 1150*14b24e2bSVaishali Kulkarni } 1151*14b24e2bSVaishali Kulkarni 1152*14b24e2bSVaishali Kulkarni enum _ecore_status_t 1153*14b24e2bSVaishali Kulkarni ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, 1154*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid, 1155*14b24e2bSVaishali Kulkarni dma_addr_t pbl_addr, u16 pbl_size, 1156*14b24e2bSVaishali Kulkarni u16 pq_id) 1157*14b24e2bSVaishali Kulkarni { 1158*14b24e2bSVaishali Kulkarni struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 1159*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 1160*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 1161*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_NOTIMPL; 1162*14b24e2bSVaishali Kulkarni 1163*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 1164*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1165*14b24e2bSVaishali Kulkarni init_data.cid = p_cid->cid; 1166*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_cid->opaque_fid; 1167*14b24e2bSVaishali Kulkarni init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1168*14b24e2bSVaishali Kulkarni 1169*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 1170*14b24e2bSVaishali Kulkarni ETH_RAMROD_TX_QUEUE_START, 1171*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 1172*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1173*14b24e2bSVaishali Kulkarni return rc; 1174*14b24e2bSVaishali Kulkarni 1175*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.tx_queue_start; 1176*14b24e2bSVaishali Kulkarni p_ramrod->vport_id = p_cid->abs.vport_id; 1177*14b24e2bSVaishali Kulkarni 1178*14b24e2bSVaishali Kulkarni p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 1179*14b24e2bSVaishali Kulkarni p_ramrod->sb_index = p_cid->sb_idx; 1180*14b24e2bSVaishali Kulkarni p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1181*14b24e2bSVaishali Kulkarni 1182*14b24e2bSVaishali Kulkarni p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1183*14b24e2bSVaishali Kulkarni p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1184*14b24e2bSVaishali Kulkarni 1185*14b24e2bSVaishali Kulkarni p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size); 1186*14b24e2bSVaishali Kulkarni DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1187*14b24e2bSVaishali Kulkarni 1188*14b24e2bSVaishali Kulkarni p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id); 1189*14b24e2bSVaishali Kulkarni 1190*14b24e2bSVaishali Kulkarni return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1191*14b24e2bSVaishali Kulkarni } 1192*14b24e2bSVaishali Kulkarni 1193*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 1194*14b24e2bSVaishali Kulkarni ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, 1195*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid, 1196*14b24e2bSVaishali Kulkarni u8 tc, 1197*14b24e2bSVaishali Kulkarni dma_addr_t pbl_addr, u16 pbl_size, 1198*14b24e2bSVaishali Kulkarni void OSAL_IOMEM **pp_doorbell) 1199*14b24e2bSVaishali Kulkarni { 1200*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1201*14b24e2bSVaishali Kulkarni 1202*14b24e2bSVaishali Kulkarni /* TODO - set tc in the pq_params for multi-cos */ 1203*14b24e2bSVaishali Kulkarni rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, 1204*14b24e2bSVaishali Kulkarni pbl_addr, pbl_size, 1205*14b24e2bSVaishali Kulkarni ecore_get_cm_pq_idx_mcos(p_hwfn, tc)); 1206*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1207*14b24e2bSVaishali Kulkarni return rc; 1208*14b24e2bSVaishali Kulkarni 1209*14b24e2bSVaishali Kulkarni /* Provide the caller with the necessary return values */ 1210*14b24e2bSVaishali Kulkarni *pp_doorbell = (u8 OSAL_IOMEM *) 1211*14b24e2bSVaishali Kulkarni p_hwfn->doorbells + 1212*14b24e2bSVaishali Kulkarni DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY); 1213*14b24e2bSVaishali Kulkarni 1214*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1215*14b24e2bSVaishali Kulkarni } 1216*14b24e2bSVaishali Kulkarni 1217*14b24e2bSVaishali Kulkarni enum _ecore_status_t 1218*14b24e2bSVaishali Kulkarni ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 1219*14b24e2bSVaishali Kulkarni struct ecore_queue_start_common_params *p_params, 1220*14b24e2bSVaishali Kulkarni u8 tc, 1221*14b24e2bSVaishali Kulkarni dma_addr_t pbl_addr, u16 pbl_size, 1222*14b24e2bSVaishali Kulkarni struct ecore_txq_start_ret_params *p_ret_params) 1223*14b24e2bSVaishali Kulkarni { 1224*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid; 1225*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1226*14b24e2bSVaishali Kulkarni 1227*14b24e2bSVaishali Kulkarni p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params); 1228*14b24e2bSVaishali Kulkarni if (p_cid == OSAL_NULL) 1229*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 1230*14b24e2bSVaishali Kulkarni 1231*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) 1232*14b24e2bSVaishali Kulkarni rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1233*14b24e2bSVaishali Kulkarni pbl_addr, pbl_size, 1234*14b24e2bSVaishali Kulkarni &p_ret_params->p_doorbell); 1235*14b24e2bSVaishali Kulkarni else 1236*14b24e2bSVaishali Kulkarni rc = ecore_vf_pf_txq_start(p_hwfn, p_cid, 1237*14b24e2bSVaishali Kulkarni pbl_addr, pbl_size, 1238*14b24e2bSVaishali Kulkarni &p_ret_params->p_doorbell); 1239*14b24e2bSVaishali Kulkarni 1240*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1241*14b24e2bSVaishali Kulkarni ecore_eth_queue_cid_release(p_hwfn, p_cid); 1242*14b24e2bSVaishali Kulkarni else 1243*14b24e2bSVaishali Kulkarni p_ret_params->p_handle = (void *)p_cid; 1244*14b24e2bSVaishali Kulkarni 1245*14b24e2bSVaishali Kulkarni return rc; 1246*14b24e2bSVaishali Kulkarni } 1247*14b24e2bSVaishali Kulkarni 1248*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 1249*14b24e2bSVaishali Kulkarni ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1250*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid) 1251*14b24e2bSVaishali Kulkarni { 1252*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 1253*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 1254*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1255*14b24e2bSVaishali Kulkarni 1256*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1257*14b24e2bSVaishali Kulkarni init_data.cid = p_cid->cid; 1258*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_cid->opaque_fid; 1259*14b24e2bSVaishali Kulkarni init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1260*14b24e2bSVaishali Kulkarni 1261*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 1262*14b24e2bSVaishali Kulkarni ETH_RAMROD_TX_QUEUE_STOP, 1263*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 1264*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1265*14b24e2bSVaishali Kulkarni return rc; 1266*14b24e2bSVaishali Kulkarni 1267*14b24e2bSVaishali Kulkarni return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1268*14b24e2bSVaishali Kulkarni } 1269*14b24e2bSVaishali Kulkarni 1270*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1271*14b24e2bSVaishali Kulkarni void *p_handle) 1272*14b24e2bSVaishali Kulkarni { 1273*14b24e2bSVaishali Kulkarni struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 1274*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1275*14b24e2bSVaishali Kulkarni 1276*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) 1277*14b24e2bSVaishali Kulkarni rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1278*14b24e2bSVaishali Kulkarni else 1279*14b24e2bSVaishali Kulkarni rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid); 1280*14b24e2bSVaishali Kulkarni 1281*14b24e2bSVaishali Kulkarni if (rc == ECORE_SUCCESS) 1282*14b24e2bSVaishali Kulkarni ecore_eth_queue_cid_release(p_hwfn, p_cid); 1283*14b24e2bSVaishali Kulkarni return rc; 1284*14b24e2bSVaishali Kulkarni } 1285*14b24e2bSVaishali Kulkarni 1286*14b24e2bSVaishali Kulkarni static enum eth_filter_action ecore_filter_action(enum ecore_filter_opcode opcode) 1287*14b24e2bSVaishali Kulkarni { 1288*14b24e2bSVaishali Kulkarni enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1289*14b24e2bSVaishali Kulkarni 1290*14b24e2bSVaishali Kulkarni switch (opcode) { 1291*14b24e2bSVaishali Kulkarni case ECORE_FILTER_ADD: 1292*14b24e2bSVaishali Kulkarni action = ETH_FILTER_ACTION_ADD; 1293*14b24e2bSVaishali Kulkarni break; 1294*14b24e2bSVaishali Kulkarni case ECORE_FILTER_REMOVE: 1295*14b24e2bSVaishali Kulkarni action = ETH_FILTER_ACTION_REMOVE; 1296*14b24e2bSVaishali Kulkarni break; 1297*14b24e2bSVaishali Kulkarni case ECORE_FILTER_FLUSH: 1298*14b24e2bSVaishali Kulkarni action = ETH_FILTER_ACTION_REMOVE_ALL; 1299*14b24e2bSVaishali Kulkarni break; 1300*14b24e2bSVaishali Kulkarni default: 1301*14b24e2bSVaishali Kulkarni action = MAX_ETH_FILTER_ACTION; 1302*14b24e2bSVaishali Kulkarni } 1303*14b24e2bSVaishali Kulkarni 1304*14b24e2bSVaishali Kulkarni return action; 1305*14b24e2bSVaishali Kulkarni } 1306*14b24e2bSVaishali Kulkarni 1307*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 1308*14b24e2bSVaishali Kulkarni ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn, 1309*14b24e2bSVaishali Kulkarni u16 opaque_fid, 1310*14b24e2bSVaishali Kulkarni struct ecore_filter_ucast *p_filter_cmd, 1311*14b24e2bSVaishali Kulkarni struct vport_filter_update_ramrod_data **pp_ramrod, 1312*14b24e2bSVaishali Kulkarni struct ecore_spq_entry **pp_ent, 1313*14b24e2bSVaishali Kulkarni enum spq_mode comp_mode, 1314*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_comp_data) 1315*14b24e2bSVaishali Kulkarni { 1316*14b24e2bSVaishali Kulkarni u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1317*14b24e2bSVaishali Kulkarni struct vport_filter_update_ramrod_data *p_ramrod; 1318*14b24e2bSVaishali Kulkarni struct eth_filter_cmd *p_first_filter; 1319*14b24e2bSVaishali Kulkarni struct eth_filter_cmd *p_second_filter; 1320*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 1321*14b24e2bSVaishali Kulkarni enum eth_filter_action action; 1322*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1323*14b24e2bSVaishali Kulkarni 1324*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1325*14b24e2bSVaishali Kulkarni &vport_to_remove_from); 1326*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1327*14b24e2bSVaishali Kulkarni return rc; 1328*14b24e2bSVaishali Kulkarni 1329*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1330*14b24e2bSVaishali Kulkarni &vport_to_add_to); 1331*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1332*14b24e2bSVaishali Kulkarni return rc; 1333*14b24e2bSVaishali Kulkarni 1334*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 1335*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1336*14b24e2bSVaishali Kulkarni init_data.cid = ecore_spq_get_cid(p_hwfn); 1337*14b24e2bSVaishali Kulkarni init_data.opaque_fid = opaque_fid; 1338*14b24e2bSVaishali Kulkarni init_data.comp_mode = comp_mode; 1339*14b24e2bSVaishali Kulkarni init_data.p_comp_data = p_comp_data; 1340*14b24e2bSVaishali Kulkarni 1341*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, pp_ent, 1342*14b24e2bSVaishali Kulkarni ETH_RAMROD_FILTERS_UPDATE, 1343*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 1344*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1345*14b24e2bSVaishali Kulkarni return rc; 1346*14b24e2bSVaishali Kulkarni 1347*14b24e2bSVaishali Kulkarni *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1348*14b24e2bSVaishali Kulkarni p_ramrod = *pp_ramrod; 1349*14b24e2bSVaishali Kulkarni p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1350*14b24e2bSVaishali Kulkarni p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1351*14b24e2bSVaishali Kulkarni 1352*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 1353*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1354*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1355*14b24e2bSVaishali Kulkarni "Non-Asic - prevent Tx filters\n"); 1356*14b24e2bSVaishali Kulkarni p_ramrod->filter_cmd_hdr.tx = 0; 1357*14b24e2bSVaishali Kulkarni } 1358*14b24e2bSVaishali Kulkarni 1359*14b24e2bSVaishali Kulkarni #endif 1360*14b24e2bSVaishali Kulkarni 1361*14b24e2bSVaishali Kulkarni switch (p_filter_cmd->opcode) { 1362*14b24e2bSVaishali Kulkarni case ECORE_FILTER_REPLACE: 1363*14b24e2bSVaishali Kulkarni case ECORE_FILTER_MOVE: 1364*14b24e2bSVaishali Kulkarni p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; 1365*14b24e2bSVaishali Kulkarni default: 1366*14b24e2bSVaishali Kulkarni p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; 1367*14b24e2bSVaishali Kulkarni } 1368*14b24e2bSVaishali Kulkarni 1369*14b24e2bSVaishali Kulkarni p_first_filter = &p_ramrod->filter_cmds[0]; 1370*14b24e2bSVaishali Kulkarni p_second_filter = &p_ramrod->filter_cmds[1]; 1371*14b24e2bSVaishali Kulkarni 1372*14b24e2bSVaishali Kulkarni switch (p_filter_cmd->type) { 1373*14b24e2bSVaishali Kulkarni case ECORE_FILTER_MAC: 1374*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_MAC; break; 1375*14b24e2bSVaishali Kulkarni case ECORE_FILTER_VLAN: 1376*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; 1377*14b24e2bSVaishali Kulkarni case ECORE_FILTER_MAC_VLAN: 1378*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; 1379*14b24e2bSVaishali Kulkarni case ECORE_FILTER_INNER_MAC: 1380*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; 1381*14b24e2bSVaishali Kulkarni case ECORE_FILTER_INNER_VLAN: 1382*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; 1383*14b24e2bSVaishali Kulkarni case ECORE_FILTER_INNER_PAIR: 1384*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; 1385*14b24e2bSVaishali Kulkarni case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1386*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1387*14b24e2bSVaishali Kulkarni break; 1388*14b24e2bSVaishali Kulkarni case ECORE_FILTER_MAC_VNI_PAIR: 1389*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; 1390*14b24e2bSVaishali Kulkarni case ECORE_FILTER_VNI: 1391*14b24e2bSVaishali Kulkarni p_first_filter->type = ETH_FILTER_TYPE_VNI; break; 1392*14b24e2bSVaishali Kulkarni } 1393*14b24e2bSVaishali Kulkarni 1394*14b24e2bSVaishali Kulkarni if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1395*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1396*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1397*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1398*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1399*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) 1400*14b24e2bSVaishali Kulkarni ecore_set_fw_mac_addr(&p_first_filter->mac_msb, 1401*14b24e2bSVaishali Kulkarni &p_first_filter->mac_mid, 1402*14b24e2bSVaishali Kulkarni &p_first_filter->mac_lsb, 1403*14b24e2bSVaishali Kulkarni (u8 *)p_filter_cmd->mac); 1404*14b24e2bSVaishali Kulkarni 1405*14b24e2bSVaishali Kulkarni if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1406*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1407*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1408*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1409*14b24e2bSVaishali Kulkarni p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan); 1410*14b24e2bSVaishali Kulkarni 1411*14b24e2bSVaishali Kulkarni if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1412*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1413*14b24e2bSVaishali Kulkarni (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1414*14b24e2bSVaishali Kulkarni p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni); 1415*14b24e2bSVaishali Kulkarni 1416*14b24e2bSVaishali Kulkarni if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) { 1417*14b24e2bSVaishali Kulkarni p_second_filter->type = p_first_filter->type; 1418*14b24e2bSVaishali Kulkarni p_second_filter->mac_msb = p_first_filter->mac_msb; 1419*14b24e2bSVaishali Kulkarni p_second_filter->mac_mid = p_first_filter->mac_mid; 1420*14b24e2bSVaishali Kulkarni p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1421*14b24e2bSVaishali Kulkarni p_second_filter->vlan_id = p_first_filter->vlan_id; 1422*14b24e2bSVaishali Kulkarni p_second_filter->vni = p_first_filter->vni; 1423*14b24e2bSVaishali Kulkarni 1424*14b24e2bSVaishali Kulkarni p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1425*14b24e2bSVaishali Kulkarni 1426*14b24e2bSVaishali Kulkarni p_first_filter->vport_id = vport_to_remove_from; 1427*14b24e2bSVaishali Kulkarni 1428*14b24e2bSVaishali Kulkarni p_second_filter->action = ETH_FILTER_ACTION_ADD; 1429*14b24e2bSVaishali Kulkarni p_second_filter->vport_id = vport_to_add_to; 1430*14b24e2bSVaishali Kulkarni } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) { 1431*14b24e2bSVaishali Kulkarni p_first_filter->vport_id = vport_to_add_to; 1432*14b24e2bSVaishali Kulkarni OSAL_MEMCPY(p_second_filter, p_first_filter, 1433*14b24e2bSVaishali Kulkarni sizeof(*p_second_filter)); 1434*14b24e2bSVaishali Kulkarni p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1435*14b24e2bSVaishali Kulkarni p_second_filter->action = ETH_FILTER_ACTION_ADD; 1436*14b24e2bSVaishali Kulkarni } else { 1437*14b24e2bSVaishali Kulkarni action = ecore_filter_action(p_filter_cmd->opcode); 1438*14b24e2bSVaishali Kulkarni 1439*14b24e2bSVaishali Kulkarni if (action == MAX_ETH_FILTER_ACTION) { 1440*14b24e2bSVaishali Kulkarni DP_NOTICE(p_hwfn, true, 1441*14b24e2bSVaishali Kulkarni "%d is not supported yet\n", 1442*14b24e2bSVaishali Kulkarni p_filter_cmd->opcode); 1443*14b24e2bSVaishali Kulkarni return ECORE_NOTIMPL; 1444*14b24e2bSVaishali Kulkarni } 1445*14b24e2bSVaishali Kulkarni 1446*14b24e2bSVaishali Kulkarni p_first_filter->action = action; 1447*14b24e2bSVaishali Kulkarni p_first_filter->vport_id = 1448*14b24e2bSVaishali Kulkarni (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1449*14b24e2bSVaishali Kulkarni vport_to_remove_from : vport_to_add_to; 1450*14b24e2bSVaishali Kulkarni } 1451*14b24e2bSVaishali Kulkarni 1452*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1453*14b24e2bSVaishali Kulkarni } 1454*14b24e2bSVaishali Kulkarni 1455*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, 1456*14b24e2bSVaishali Kulkarni u16 opaque_fid, 1457*14b24e2bSVaishali Kulkarni struct ecore_filter_ucast *p_filter_cmd, 1458*14b24e2bSVaishali Kulkarni enum spq_mode comp_mode, 1459*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_comp_data) 1460*14b24e2bSVaishali Kulkarni { 1461*14b24e2bSVaishali Kulkarni struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL; 1462*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 1463*14b24e2bSVaishali Kulkarni struct eth_filter_cmd_header *p_header; 1464*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1465*14b24e2bSVaishali Kulkarni 1466*14b24e2bSVaishali Kulkarni rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1467*14b24e2bSVaishali Kulkarni &p_ramrod, &p_ent, 1468*14b24e2bSVaishali Kulkarni comp_mode, p_comp_data); 1469*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 1470*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1471*14b24e2bSVaishali Kulkarni return rc; 1472*14b24e2bSVaishali Kulkarni } 1473*14b24e2bSVaishali Kulkarni p_header = &p_ramrod->filter_cmd_hdr; 1474*14b24e2bSVaishali Kulkarni p_header->assert_on_error = p_filter_cmd->assert_on_error; 1475*14b24e2bSVaishali Kulkarni 1476*14b24e2bSVaishali Kulkarni rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1477*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 1478*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, 1479*14b24e2bSVaishali Kulkarni "Unicast filter ADD command failed %d\n", 1480*14b24e2bSVaishali Kulkarni rc); 1481*14b24e2bSVaishali Kulkarni return rc; 1482*14b24e2bSVaishali Kulkarni } 1483*14b24e2bSVaishali Kulkarni 1484*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1485*14b24e2bSVaishali Kulkarni "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1486*14b24e2bSVaishali Kulkarni (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" : 1487*14b24e2bSVaishali Kulkarni ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1488*14b24e2bSVaishali Kulkarni "REMOVE" : 1489*14b24e2bSVaishali Kulkarni ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ? 1490*14b24e2bSVaishali Kulkarni "MOVE" : "REPLACE")), 1491*14b24e2bSVaishali Kulkarni (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" : 1492*14b24e2bSVaishali Kulkarni ((p_filter_cmd->type == ECORE_FILTER_VLAN) ? 1493*14b24e2bSVaishali Kulkarni "VLAN" : "MAC & VLAN"), 1494*14b24e2bSVaishali Kulkarni p_ramrod->filter_cmd_hdr.cmd_cnt, 1495*14b24e2bSVaishali Kulkarni p_filter_cmd->is_rx_filter, 1496*14b24e2bSVaishali Kulkarni p_filter_cmd->is_tx_filter); 1497*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1498*14b24e2bSVaishali Kulkarni "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1499*14b24e2bSVaishali Kulkarni p_filter_cmd->vport_to_add_to, 1500*14b24e2bSVaishali Kulkarni p_filter_cmd->vport_to_remove_from, 1501*14b24e2bSVaishali Kulkarni p_filter_cmd->mac[0], p_filter_cmd->mac[1], 1502*14b24e2bSVaishali Kulkarni p_filter_cmd->mac[2], p_filter_cmd->mac[3], 1503*14b24e2bSVaishali Kulkarni p_filter_cmd->mac[4], p_filter_cmd->mac[5], 1504*14b24e2bSVaishali Kulkarni p_filter_cmd->vlan); 1505*14b24e2bSVaishali Kulkarni 1506*14b24e2bSVaishali Kulkarni return ECORE_SUCCESS; 1507*14b24e2bSVaishali Kulkarni } 1508*14b24e2bSVaishali Kulkarni 1509*14b24e2bSVaishali Kulkarni /******************************************************************************* 1510*14b24e2bSVaishali Kulkarni * Description: 1511*14b24e2bSVaishali Kulkarni * Calculates crc 32 on a buffer 1512*14b24e2bSVaishali Kulkarni * Note: crc32_length MUST be aligned to 8 1513*14b24e2bSVaishali Kulkarni * Return: 1514*14b24e2bSVaishali Kulkarni ******************************************************************************/ 1515*14b24e2bSVaishali Kulkarni static u32 ecore_calc_crc32c(u8 *crc32_packet, 1516*14b24e2bSVaishali Kulkarni u32 crc32_length, 1517*14b24e2bSVaishali Kulkarni u32 crc32_seed, 1518*14b24e2bSVaishali Kulkarni u8 complement) 1519*14b24e2bSVaishali Kulkarni { 1520*14b24e2bSVaishali Kulkarni u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1521*14b24e2bSVaishali Kulkarni u8 msb = 0, current_byte = 0; 1522*14b24e2bSVaishali Kulkarni 1523*14b24e2bSVaishali Kulkarni if ((crc32_packet == OSAL_NULL) || 1524*14b24e2bSVaishali Kulkarni (crc32_length == 0) || 1525*14b24e2bSVaishali Kulkarni ((crc32_length % 8) != 0)) { 1526*14b24e2bSVaishali Kulkarni return crc32_result; 1527*14b24e2bSVaishali Kulkarni } 1528*14b24e2bSVaishali Kulkarni 1529*14b24e2bSVaishali Kulkarni for (byte = 0; byte < crc32_length; byte++) { 1530*14b24e2bSVaishali Kulkarni current_byte = crc32_packet[byte]; 1531*14b24e2bSVaishali Kulkarni for (bit = 0; bit < 8; bit++) { 1532*14b24e2bSVaishali Kulkarni msb = (u8)(crc32_result >> 31); 1533*14b24e2bSVaishali Kulkarni crc32_result = crc32_result << 1; 1534*14b24e2bSVaishali Kulkarni if (msb != (0x1 & (current_byte >> bit))) { 1535*14b24e2bSVaishali Kulkarni crc32_result = crc32_result ^ CRC32_POLY; 1536*14b24e2bSVaishali Kulkarni crc32_result |= 1; /*crc32_result[0] = 1;*/ 1537*14b24e2bSVaishali Kulkarni } 1538*14b24e2bSVaishali Kulkarni } 1539*14b24e2bSVaishali Kulkarni } 1540*14b24e2bSVaishali Kulkarni 1541*14b24e2bSVaishali Kulkarni return crc32_result; 1542*14b24e2bSVaishali Kulkarni } 1543*14b24e2bSVaishali Kulkarni 1544*14b24e2bSVaishali Kulkarni static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len) 1545*14b24e2bSVaishali Kulkarni { 1546*14b24e2bSVaishali Kulkarni u32 packet_buf[2] = {0}; 1547*14b24e2bSVaishali Kulkarni 1548*14b24e2bSVaishali Kulkarni OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6); 1549*14b24e2bSVaishali Kulkarni return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0); 1550*14b24e2bSVaishali Kulkarni } 1551*14b24e2bSVaishali Kulkarni 1552*14b24e2bSVaishali Kulkarni u8 ecore_mcast_bin_from_mac(u8 *mac) 1553*14b24e2bSVaishali Kulkarni { 1554*14b24e2bSVaishali Kulkarni u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, 1555*14b24e2bSVaishali Kulkarni mac, ETH_ALEN); 1556*14b24e2bSVaishali Kulkarni 1557*14b24e2bSVaishali Kulkarni return crc & 0xff; 1558*14b24e2bSVaishali Kulkarni } 1559*14b24e2bSVaishali Kulkarni 1560*14b24e2bSVaishali Kulkarni static enum _ecore_status_t 1561*14b24e2bSVaishali Kulkarni ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, 1562*14b24e2bSVaishali Kulkarni u16 opaque_fid, 1563*14b24e2bSVaishali Kulkarni struct ecore_filter_mcast *p_filter_cmd, 1564*14b24e2bSVaishali Kulkarni enum spq_mode comp_mode, 1565*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_comp_data) 1566*14b24e2bSVaishali Kulkarni { 1567*14b24e2bSVaishali Kulkarni unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1568*14b24e2bSVaishali Kulkarni struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 1569*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 1570*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 1571*14b24e2bSVaishali Kulkarni u8 abs_vport_id = 0; 1572*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc; 1573*14b24e2bSVaishali Kulkarni int i; 1574*14b24e2bSVaishali Kulkarni 1575*14b24e2bSVaishali Kulkarni if (p_filter_cmd->opcode == ECORE_FILTER_ADD) 1576*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1577*14b24e2bSVaishali Kulkarni &abs_vport_id); 1578*14b24e2bSVaishali Kulkarni else 1579*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1580*14b24e2bSVaishali Kulkarni &abs_vport_id); 1581*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1582*14b24e2bSVaishali Kulkarni return rc; 1583*14b24e2bSVaishali Kulkarni 1584*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 1585*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1586*14b24e2bSVaishali Kulkarni init_data.cid = ecore_spq_get_cid(p_hwfn); 1587*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1588*14b24e2bSVaishali Kulkarni init_data.comp_mode = comp_mode; 1589*14b24e2bSVaishali Kulkarni init_data.p_comp_data = p_comp_data; 1590*14b24e2bSVaishali Kulkarni 1591*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 1592*14b24e2bSVaishali Kulkarni ETH_RAMROD_VPORT_UPDATE, 1593*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 1594*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) { 1595*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1596*14b24e2bSVaishali Kulkarni return rc; 1597*14b24e2bSVaishali Kulkarni } 1598*14b24e2bSVaishali Kulkarni 1599*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.vport_update; 1600*14b24e2bSVaishali Kulkarni p_ramrod->common.update_approx_mcast_flg = 1; 1601*14b24e2bSVaishali Kulkarni 1602*14b24e2bSVaishali Kulkarni /* explicitly clear out the entire vector */ 1603*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 1604*14b24e2bSVaishali Kulkarni 0, sizeof(p_ramrod->approx_mcast.bins)); 1605*14b24e2bSVaishali Kulkarni OSAL_MEMSET(bins, 0, sizeof(unsigned long) * 1606*14b24e2bSVaishali Kulkarni ETH_MULTICAST_MAC_BINS_IN_REGS); 1607*14b24e2bSVaishali Kulkarni /* filter ADD op is explicit set op and it removes 1608*14b24e2bSVaishali Kulkarni * any existing filters for the vport. 1609*14b24e2bSVaishali Kulkarni */ 1610*14b24e2bSVaishali Kulkarni if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1611*14b24e2bSVaishali Kulkarni for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1612*14b24e2bSVaishali Kulkarni u32 bit; 1613*14b24e2bSVaishali Kulkarni 1614*14b24e2bSVaishali Kulkarni bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1615*14b24e2bSVaishali Kulkarni OSAL_SET_BIT(bit, bins); 1616*14b24e2bSVaishali Kulkarni } 1617*14b24e2bSVaishali Kulkarni 1618*14b24e2bSVaishali Kulkarni /* Convert to correct endianity */ 1619*14b24e2bSVaishali Kulkarni for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1620*14b24e2bSVaishali Kulkarni struct vport_update_ramrod_mcast *p_ramrod_bins; 1621*14b24e2bSVaishali Kulkarni u32 *p_bins = (u32 *)bins; 1622*14b24e2bSVaishali Kulkarni 1623*14b24e2bSVaishali Kulkarni p_ramrod_bins = &p_ramrod->approx_mcast; 1624*14b24e2bSVaishali Kulkarni p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); 1625*14b24e2bSVaishali Kulkarni } 1626*14b24e2bSVaishali Kulkarni } 1627*14b24e2bSVaishali Kulkarni 1628*14b24e2bSVaishali Kulkarni p_ramrod->common.vport_id = abs_vport_id; 1629*14b24e2bSVaishali Kulkarni 1630*14b24e2bSVaishali Kulkarni rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1631*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1632*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc); 1633*14b24e2bSVaishali Kulkarni 1634*14b24e2bSVaishali Kulkarni return rc; 1635*14b24e2bSVaishali Kulkarni } 1636*14b24e2bSVaishali Kulkarni 1637*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_filter_mcast_cmd(struct ecore_dev *p_dev, 1638*14b24e2bSVaishali Kulkarni struct ecore_filter_mcast *p_filter_cmd, 1639*14b24e2bSVaishali Kulkarni enum spq_mode comp_mode, 1640*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_comp_data) 1641*14b24e2bSVaishali Kulkarni { 1642*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 1643*14b24e2bSVaishali Kulkarni int i; 1644*14b24e2bSVaishali Kulkarni 1645*14b24e2bSVaishali Kulkarni /* only ADD and REMOVE operations are supported for multi-cast */ 1646*14b24e2bSVaishali Kulkarni if ((p_filter_cmd->opcode != ECORE_FILTER_ADD && 1647*14b24e2bSVaishali Kulkarni (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) || 1648*14b24e2bSVaishali Kulkarni (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) { 1649*14b24e2bSVaishali Kulkarni return ECORE_INVAL; 1650*14b24e2bSVaishali Kulkarni } 1651*14b24e2bSVaishali Kulkarni 1652*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 1653*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1654*14b24e2bSVaishali Kulkarni u16 opaque_fid; 1655*14b24e2bSVaishali Kulkarni 1656*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 1657*14b24e2bSVaishali Kulkarni ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1658*14b24e2bSVaishali Kulkarni continue; 1659*14b24e2bSVaishali Kulkarni } 1660*14b24e2bSVaishali Kulkarni 1661*14b24e2bSVaishali Kulkarni opaque_fid = p_hwfn->hw_info.opaque_fid; 1662*14b24e2bSVaishali Kulkarni rc = ecore_sp_eth_filter_mcast(p_hwfn, 1663*14b24e2bSVaishali Kulkarni opaque_fid, 1664*14b24e2bSVaishali Kulkarni p_filter_cmd, 1665*14b24e2bSVaishali Kulkarni comp_mode, 1666*14b24e2bSVaishali Kulkarni p_comp_data); 1667*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1668*14b24e2bSVaishali Kulkarni break; 1669*14b24e2bSVaishali Kulkarni } 1670*14b24e2bSVaishali Kulkarni 1671*14b24e2bSVaishali Kulkarni return rc; 1672*14b24e2bSVaishali Kulkarni } 1673*14b24e2bSVaishali Kulkarni 1674*14b24e2bSVaishali Kulkarni enum _ecore_status_t ecore_filter_ucast_cmd(struct ecore_dev *p_dev, 1675*14b24e2bSVaishali Kulkarni struct ecore_filter_ucast *p_filter_cmd, 1676*14b24e2bSVaishali Kulkarni enum spq_mode comp_mode, 1677*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_comp_data) 1678*14b24e2bSVaishali Kulkarni { 1679*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_SUCCESS; 1680*14b24e2bSVaishali Kulkarni int i; 1681*14b24e2bSVaishali Kulkarni 1682*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 1683*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1684*14b24e2bSVaishali Kulkarni u16 opaque_fid; 1685*14b24e2bSVaishali Kulkarni 1686*14b24e2bSVaishali Kulkarni if (IS_VF(p_dev)) { 1687*14b24e2bSVaishali Kulkarni rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1688*14b24e2bSVaishali Kulkarni continue; 1689*14b24e2bSVaishali Kulkarni } 1690*14b24e2bSVaishali Kulkarni 1691*14b24e2bSVaishali Kulkarni opaque_fid = p_hwfn->hw_info.opaque_fid; 1692*14b24e2bSVaishali Kulkarni rc = ecore_sp_eth_filter_ucast(p_hwfn, 1693*14b24e2bSVaishali Kulkarni opaque_fid, 1694*14b24e2bSVaishali Kulkarni p_filter_cmd, 1695*14b24e2bSVaishali Kulkarni comp_mode, 1696*14b24e2bSVaishali Kulkarni p_comp_data); 1697*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 1698*14b24e2bSVaishali Kulkarni break; 1699*14b24e2bSVaishali Kulkarni } 1700*14b24e2bSVaishali Kulkarni 1701*14b24e2bSVaishali Kulkarni return rc; 1702*14b24e2bSVaishali Kulkarni } 1703*14b24e2bSVaishali Kulkarni 1704*14b24e2bSVaishali Kulkarni /* Statistics related code */ 1705*14b24e2bSVaishali Kulkarni static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn, 1706*14b24e2bSVaishali Kulkarni u32 *p_addr, u32 *p_len, 1707*14b24e2bSVaishali Kulkarni u16 statistics_bin) 1708*14b24e2bSVaishali Kulkarni { 1709*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) { 1710*14b24e2bSVaishali Kulkarni *p_addr = BAR0_MAP_REG_PSDM_RAM + 1711*14b24e2bSVaishali Kulkarni PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1712*14b24e2bSVaishali Kulkarni *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1713*14b24e2bSVaishali Kulkarni } else { 1714*14b24e2bSVaishali Kulkarni struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1715*14b24e2bSVaishali Kulkarni struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1716*14b24e2bSVaishali Kulkarni 1717*14b24e2bSVaishali Kulkarni *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1718*14b24e2bSVaishali Kulkarni *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1719*14b24e2bSVaishali Kulkarni } 1720*14b24e2bSVaishali Kulkarni } 1721*14b24e2bSVaishali Kulkarni 1722*14b24e2bSVaishali Kulkarni static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn, 1723*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1724*14b24e2bSVaishali Kulkarni struct ecore_eth_stats *p_stats, 1725*14b24e2bSVaishali Kulkarni u16 statistics_bin) 1726*14b24e2bSVaishali Kulkarni { 1727*14b24e2bSVaishali Kulkarni struct eth_pstorm_per_queue_stat pstats; 1728*14b24e2bSVaishali Kulkarni u32 pstats_addr = 0, pstats_len = 0; 1729*14b24e2bSVaishali Kulkarni 1730*14b24e2bSVaishali Kulkarni __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1731*14b24e2bSVaishali Kulkarni statistics_bin); 1732*14b24e2bSVaishali Kulkarni 1733*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 1734*14b24e2bSVaishali Kulkarni ecore_memcpy_from(p_hwfn, p_ptt, &pstats, 1735*14b24e2bSVaishali Kulkarni pstats_addr, pstats_len); 1736*14b24e2bSVaishali Kulkarni 1737*14b24e2bSVaishali Kulkarni p_stats->common.tx_ucast_bytes += 1738*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1739*14b24e2bSVaishali Kulkarni p_stats->common.tx_mcast_bytes += 1740*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1741*14b24e2bSVaishali Kulkarni p_stats->common.tx_bcast_bytes += 1742*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1743*14b24e2bSVaishali Kulkarni p_stats->common.tx_ucast_pkts += 1744*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1745*14b24e2bSVaishali Kulkarni p_stats->common.tx_mcast_pkts += 1746*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1747*14b24e2bSVaishali Kulkarni p_stats->common.tx_bcast_pkts += 1748*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1749*14b24e2bSVaishali Kulkarni p_stats->common.tx_err_drop_pkts += 1750*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(pstats.error_drop_pkts); 1751*14b24e2bSVaishali Kulkarni } 1752*14b24e2bSVaishali Kulkarni 1753*14b24e2bSVaishali Kulkarni static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn, 1754*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1755*14b24e2bSVaishali Kulkarni struct ecore_eth_stats *p_stats, 1756*14b24e2bSVaishali Kulkarni u16 statistics_bin) 1757*14b24e2bSVaishali Kulkarni { 1758*14b24e2bSVaishali Kulkarni struct tstorm_per_port_stat tstats; 1759*14b24e2bSVaishali Kulkarni u32 tstats_addr, tstats_len; 1760*14b24e2bSVaishali Kulkarni 1761*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) { 1762*14b24e2bSVaishali Kulkarni tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1763*14b24e2bSVaishali Kulkarni TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1764*14b24e2bSVaishali Kulkarni tstats_len = sizeof(struct tstorm_per_port_stat); 1765*14b24e2bSVaishali Kulkarni } else { 1766*14b24e2bSVaishali Kulkarni struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1767*14b24e2bSVaishali Kulkarni struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1768*14b24e2bSVaishali Kulkarni 1769*14b24e2bSVaishali Kulkarni tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1770*14b24e2bSVaishali Kulkarni tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1771*14b24e2bSVaishali Kulkarni } 1772*14b24e2bSVaishali Kulkarni 1773*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&tstats, 0, sizeof(tstats)); 1774*14b24e2bSVaishali Kulkarni ecore_memcpy_from(p_hwfn, p_ptt, &tstats, 1775*14b24e2bSVaishali Kulkarni tstats_addr, tstats_len); 1776*14b24e2bSVaishali Kulkarni 1777*14b24e2bSVaishali Kulkarni p_stats->common.mftag_filter_discards += 1778*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(tstats.mftag_filter_discard); 1779*14b24e2bSVaishali Kulkarni p_stats->common.mac_filter_discards += 1780*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1781*14b24e2bSVaishali Kulkarni } 1782*14b24e2bSVaishali Kulkarni 1783*14b24e2bSVaishali Kulkarni static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn, 1784*14b24e2bSVaishali Kulkarni u32 *p_addr, u32 *p_len, 1785*14b24e2bSVaishali Kulkarni u16 statistics_bin) 1786*14b24e2bSVaishali Kulkarni { 1787*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) { 1788*14b24e2bSVaishali Kulkarni *p_addr = BAR0_MAP_REG_USDM_RAM + 1789*14b24e2bSVaishali Kulkarni USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1790*14b24e2bSVaishali Kulkarni *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1791*14b24e2bSVaishali Kulkarni } else { 1792*14b24e2bSVaishali Kulkarni struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1793*14b24e2bSVaishali Kulkarni struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1794*14b24e2bSVaishali Kulkarni 1795*14b24e2bSVaishali Kulkarni *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1796*14b24e2bSVaishali Kulkarni *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1797*14b24e2bSVaishali Kulkarni } 1798*14b24e2bSVaishali Kulkarni } 1799*14b24e2bSVaishali Kulkarni 1800*14b24e2bSVaishali Kulkarni static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn, 1801*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1802*14b24e2bSVaishali Kulkarni struct ecore_eth_stats *p_stats, 1803*14b24e2bSVaishali Kulkarni u16 statistics_bin) 1804*14b24e2bSVaishali Kulkarni { 1805*14b24e2bSVaishali Kulkarni struct eth_ustorm_per_queue_stat ustats; 1806*14b24e2bSVaishali Kulkarni u32 ustats_addr = 0, ustats_len = 0; 1807*14b24e2bSVaishali Kulkarni 1808*14b24e2bSVaishali Kulkarni __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1809*14b24e2bSVaishali Kulkarni statistics_bin); 1810*14b24e2bSVaishali Kulkarni 1811*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 1812*14b24e2bSVaishali Kulkarni ecore_memcpy_from(p_hwfn, p_ptt, &ustats, 1813*14b24e2bSVaishali Kulkarni ustats_addr, ustats_len); 1814*14b24e2bSVaishali Kulkarni 1815*14b24e2bSVaishali Kulkarni p_stats->common.rx_ucast_bytes += 1816*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1817*14b24e2bSVaishali Kulkarni p_stats->common.rx_mcast_bytes += 1818*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1819*14b24e2bSVaishali Kulkarni p_stats->common.rx_bcast_bytes += 1820*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1821*14b24e2bSVaishali Kulkarni p_stats->common.rx_ucast_pkts += 1822*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1823*14b24e2bSVaishali Kulkarni p_stats->common.rx_mcast_pkts += 1824*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1825*14b24e2bSVaishali Kulkarni p_stats->common.rx_bcast_pkts += 1826*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1827*14b24e2bSVaishali Kulkarni } 1828*14b24e2bSVaishali Kulkarni 1829*14b24e2bSVaishali Kulkarni static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn, 1830*14b24e2bSVaishali Kulkarni u32 *p_addr, u32 *p_len, 1831*14b24e2bSVaishali Kulkarni u16 statistics_bin) 1832*14b24e2bSVaishali Kulkarni { 1833*14b24e2bSVaishali Kulkarni if (IS_PF(p_hwfn->p_dev)) { 1834*14b24e2bSVaishali Kulkarni *p_addr = BAR0_MAP_REG_MSDM_RAM + 1835*14b24e2bSVaishali Kulkarni MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1836*14b24e2bSVaishali Kulkarni *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1837*14b24e2bSVaishali Kulkarni } else { 1838*14b24e2bSVaishali Kulkarni struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1839*14b24e2bSVaishali Kulkarni struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1840*14b24e2bSVaishali Kulkarni 1841*14b24e2bSVaishali Kulkarni *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1842*14b24e2bSVaishali Kulkarni *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1843*14b24e2bSVaishali Kulkarni } 1844*14b24e2bSVaishali Kulkarni } 1845*14b24e2bSVaishali Kulkarni 1846*14b24e2bSVaishali Kulkarni static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn, 1847*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1848*14b24e2bSVaishali Kulkarni struct ecore_eth_stats *p_stats, 1849*14b24e2bSVaishali Kulkarni u16 statistics_bin) 1850*14b24e2bSVaishali Kulkarni { 1851*14b24e2bSVaishali Kulkarni struct eth_mstorm_per_queue_stat mstats; 1852*14b24e2bSVaishali Kulkarni u32 mstats_addr = 0, mstats_len = 0; 1853*14b24e2bSVaishali Kulkarni 1854*14b24e2bSVaishali Kulkarni __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1855*14b24e2bSVaishali Kulkarni statistics_bin); 1856*14b24e2bSVaishali Kulkarni 1857*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 1858*14b24e2bSVaishali Kulkarni ecore_memcpy_from(p_hwfn, p_ptt, &mstats, 1859*14b24e2bSVaishali Kulkarni mstats_addr, mstats_len); 1860*14b24e2bSVaishali Kulkarni 1861*14b24e2bSVaishali Kulkarni p_stats->common.no_buff_discards += 1862*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(mstats.no_buff_discard); 1863*14b24e2bSVaishali Kulkarni p_stats->common.packet_too_big_discard += 1864*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(mstats.packet_too_big_discard); 1865*14b24e2bSVaishali Kulkarni p_stats->common.ttl0_discard += 1866*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(mstats.ttl0_discard); 1867*14b24e2bSVaishali Kulkarni p_stats->common.tpa_coalesced_pkts += 1868*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1869*14b24e2bSVaishali Kulkarni p_stats->common.tpa_coalesced_events += 1870*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1871*14b24e2bSVaishali Kulkarni p_stats->common.tpa_aborts_num += 1872*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(mstats.tpa_aborts_num); 1873*14b24e2bSVaishali Kulkarni p_stats->common.tpa_coalesced_bytes += 1874*14b24e2bSVaishali Kulkarni HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1875*14b24e2bSVaishali Kulkarni } 1876*14b24e2bSVaishali Kulkarni 1877*14b24e2bSVaishali Kulkarni static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, 1878*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1879*14b24e2bSVaishali Kulkarni struct ecore_eth_stats *p_stats) 1880*14b24e2bSVaishali Kulkarni { 1881*14b24e2bSVaishali Kulkarni struct ecore_eth_stats_common *p_common = &p_stats->common; 1882*14b24e2bSVaishali Kulkarni struct port_stats port_stats; 1883*14b24e2bSVaishali Kulkarni int j; 1884*14b24e2bSVaishali Kulkarni 1885*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&port_stats, 0, sizeof(port_stats)); 1886*14b24e2bSVaishali Kulkarni 1887*14b24e2bSVaishali Kulkarni ecore_memcpy_from(p_hwfn, p_ptt, &port_stats, 1888*14b24e2bSVaishali Kulkarni p_hwfn->mcp_info->port_addr + 1889*14b24e2bSVaishali Kulkarni OFFSETOF(struct public_port, stats), 1890*14b24e2bSVaishali Kulkarni sizeof(port_stats)); 1891*14b24e2bSVaishali Kulkarni 1892*14b24e2bSVaishali Kulkarni p_common->rx_64_byte_packets += port_stats.eth.r64; 1893*14b24e2bSVaishali Kulkarni p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1894*14b24e2bSVaishali Kulkarni p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1895*14b24e2bSVaishali Kulkarni p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1896*14b24e2bSVaishali Kulkarni p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1897*14b24e2bSVaishali Kulkarni p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1898*14b24e2bSVaishali Kulkarni p_common->rx_crc_errors += port_stats.eth.rfcs; 1899*14b24e2bSVaishali Kulkarni p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1900*14b24e2bSVaishali Kulkarni p_common->rx_pause_frames += port_stats.eth.rxpf; 1901*14b24e2bSVaishali Kulkarni p_common->rx_pfc_frames += port_stats.eth.rxpp; 1902*14b24e2bSVaishali Kulkarni p_common->rx_align_errors += port_stats.eth.raln; 1903*14b24e2bSVaishali Kulkarni p_common->rx_carrier_errors += port_stats.eth.rfcr; 1904*14b24e2bSVaishali Kulkarni p_common->rx_oversize_packets += port_stats.eth.rovr; 1905*14b24e2bSVaishali Kulkarni p_common->rx_jabbers += port_stats.eth.rjbr; 1906*14b24e2bSVaishali Kulkarni p_common->rx_undersize_packets += port_stats.eth.rund; 1907*14b24e2bSVaishali Kulkarni p_common->rx_fragments += port_stats.eth.rfrg; 1908*14b24e2bSVaishali Kulkarni p_common->tx_64_byte_packets += port_stats.eth.t64; 1909*14b24e2bSVaishali Kulkarni p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1910*14b24e2bSVaishali Kulkarni p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1911*14b24e2bSVaishali Kulkarni p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1912*14b24e2bSVaishali Kulkarni p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1913*14b24e2bSVaishali Kulkarni p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1914*14b24e2bSVaishali Kulkarni p_common->tx_pause_frames += port_stats.eth.txpf; 1915*14b24e2bSVaishali Kulkarni p_common->tx_pfc_frames += port_stats.eth.txpp; 1916*14b24e2bSVaishali Kulkarni p_common->rx_mac_bytes += port_stats.eth.rbyte; 1917*14b24e2bSVaishali Kulkarni p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1918*14b24e2bSVaishali Kulkarni p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1919*14b24e2bSVaishali Kulkarni p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1920*14b24e2bSVaishali Kulkarni p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1921*14b24e2bSVaishali Kulkarni p_common->tx_mac_bytes += port_stats.eth.tbyte; 1922*14b24e2bSVaishali Kulkarni p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1923*14b24e2bSVaishali Kulkarni p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1924*14b24e2bSVaishali Kulkarni p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1925*14b24e2bSVaishali Kulkarni p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1926*14b24e2bSVaishali Kulkarni for (j = 0; j < 8; j++) { 1927*14b24e2bSVaishali Kulkarni p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1928*14b24e2bSVaishali Kulkarni p_common->brb_discards += port_stats.brb.brb_discard[j]; 1929*14b24e2bSVaishali Kulkarni } 1930*14b24e2bSVaishali Kulkarni 1931*14b24e2bSVaishali Kulkarni if (ECORE_IS_BB(p_hwfn->p_dev)) { 1932*14b24e2bSVaishali Kulkarni struct ecore_eth_stats_bb *p_bb = &p_stats->bb; 1933*14b24e2bSVaishali Kulkarni 1934*14b24e2bSVaishali Kulkarni p_bb->rx_1519_to_1522_byte_packets += 1935*14b24e2bSVaishali Kulkarni port_stats.eth.u0.bb0.r1522; 1936*14b24e2bSVaishali Kulkarni p_bb->rx_1519_to_2047_byte_packets += 1937*14b24e2bSVaishali Kulkarni port_stats.eth.u0.bb0.r2047; 1938*14b24e2bSVaishali Kulkarni p_bb->rx_2048_to_4095_byte_packets += 1939*14b24e2bSVaishali Kulkarni port_stats.eth.u0.bb0.r4095; 1940*14b24e2bSVaishali Kulkarni p_bb->rx_4096_to_9216_byte_packets += 1941*14b24e2bSVaishali Kulkarni port_stats.eth.u0.bb0.r9216; 1942*14b24e2bSVaishali Kulkarni p_bb->rx_9217_to_16383_byte_packets += 1943*14b24e2bSVaishali Kulkarni port_stats.eth.u0.bb0.r16383; 1944*14b24e2bSVaishali Kulkarni p_bb->tx_1519_to_2047_byte_packets += 1945*14b24e2bSVaishali Kulkarni port_stats.eth.u1.bb1.t2047; 1946*14b24e2bSVaishali Kulkarni p_bb->tx_2048_to_4095_byte_packets += 1947*14b24e2bSVaishali Kulkarni port_stats.eth.u1.bb1.t4095; 1948*14b24e2bSVaishali Kulkarni p_bb->tx_4096_to_9216_byte_packets += 1949*14b24e2bSVaishali Kulkarni port_stats.eth.u1.bb1.t9216; 1950*14b24e2bSVaishali Kulkarni p_bb->tx_9217_to_16383_byte_packets += 1951*14b24e2bSVaishali Kulkarni port_stats.eth.u1.bb1.t16383; 1952*14b24e2bSVaishali Kulkarni p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 1953*14b24e2bSVaishali Kulkarni p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 1954*14b24e2bSVaishali Kulkarni } else { 1955*14b24e2bSVaishali Kulkarni struct ecore_eth_stats_ah *p_ah = &p_stats->ah; 1956*14b24e2bSVaishali Kulkarni 1957*14b24e2bSVaishali Kulkarni p_ah->rx_1519_to_max_byte_packets += 1958*14b24e2bSVaishali Kulkarni port_stats.eth.u0.ah0.r1519_to_max; 1959*14b24e2bSVaishali Kulkarni p_ah->tx_1519_to_max_byte_packets = 1960*14b24e2bSVaishali Kulkarni port_stats.eth.u1.ah1.t1519_to_max; 1961*14b24e2bSVaishali Kulkarni } 1962*14b24e2bSVaishali Kulkarni } 1963*14b24e2bSVaishali Kulkarni 1964*14b24e2bSVaishali Kulkarni void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, 1965*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 1966*14b24e2bSVaishali Kulkarni struct ecore_eth_stats *stats, 1967*14b24e2bSVaishali Kulkarni u16 statistics_bin, bool b_get_port_stats) 1968*14b24e2bSVaishali Kulkarni { 1969*14b24e2bSVaishali Kulkarni __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1970*14b24e2bSVaishali Kulkarni __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1971*14b24e2bSVaishali Kulkarni __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1972*14b24e2bSVaishali Kulkarni __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1973*14b24e2bSVaishali Kulkarni 1974*14b24e2bSVaishali Kulkarni #ifndef ASIC_ONLY 1975*14b24e2bSVaishali Kulkarni /* Avoid getting PORT stats for emulation.*/ 1976*14b24e2bSVaishali Kulkarni if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 1977*14b24e2bSVaishali Kulkarni return; 1978*14b24e2bSVaishali Kulkarni #endif 1979*14b24e2bSVaishali Kulkarni 1980*14b24e2bSVaishali Kulkarni if (b_get_port_stats && p_hwfn->mcp_info) 1981*14b24e2bSVaishali Kulkarni __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats); 1982*14b24e2bSVaishali Kulkarni } 1983*14b24e2bSVaishali Kulkarni 1984*14b24e2bSVaishali Kulkarni static void _ecore_get_vport_stats(struct ecore_dev *p_dev, 1985*14b24e2bSVaishali Kulkarni struct ecore_eth_stats *stats) 1986*14b24e2bSVaishali Kulkarni { 1987*14b24e2bSVaishali Kulkarni u8 fw_vport = 0; 1988*14b24e2bSVaishali Kulkarni int i; 1989*14b24e2bSVaishali Kulkarni 1990*14b24e2bSVaishali Kulkarni OSAL_MEMSET(stats, 0, sizeof(*stats)); 1991*14b24e2bSVaishali Kulkarni 1992*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 1993*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1994*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 1995*14b24e2bSVaishali Kulkarni ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 1996*14b24e2bSVaishali Kulkarni 1997*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev)) { 1998*14b24e2bSVaishali Kulkarni /* The main vport index is relative first */ 1999*14b24e2bSVaishali Kulkarni if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) { 2000*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "No vport available!\n"); 2001*14b24e2bSVaishali Kulkarni goto out; 2002*14b24e2bSVaishali Kulkarni } 2003*14b24e2bSVaishali Kulkarni } 2004*14b24e2bSVaishali Kulkarni 2005*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev) && !p_ptt) { 2006*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2007*14b24e2bSVaishali Kulkarni continue; 2008*14b24e2bSVaishali Kulkarni } 2009*14b24e2bSVaishali Kulkarni 2010*14b24e2bSVaishali Kulkarni __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 2011*14b24e2bSVaishali Kulkarni IS_PF(p_dev) ? true : false); 2012*14b24e2bSVaishali Kulkarni 2013*14b24e2bSVaishali Kulkarni out: 2014*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev) && p_ptt) 2015*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 2016*14b24e2bSVaishali Kulkarni } 2017*14b24e2bSVaishali Kulkarni } 2018*14b24e2bSVaishali Kulkarni 2019*14b24e2bSVaishali Kulkarni void ecore_get_vport_stats(struct ecore_dev *p_dev, 2020*14b24e2bSVaishali Kulkarni struct ecore_eth_stats *stats) 2021*14b24e2bSVaishali Kulkarni { 2022*14b24e2bSVaishali Kulkarni u32 i; 2023*14b24e2bSVaishali Kulkarni 2024*14b24e2bSVaishali Kulkarni if (!p_dev) { 2025*14b24e2bSVaishali Kulkarni OSAL_MEMSET(stats, 0, sizeof(*stats)); 2026*14b24e2bSVaishali Kulkarni return; 2027*14b24e2bSVaishali Kulkarni } 2028*14b24e2bSVaishali Kulkarni 2029*14b24e2bSVaishali Kulkarni _ecore_get_vport_stats(p_dev, stats); 2030*14b24e2bSVaishali Kulkarni 2031*14b24e2bSVaishali Kulkarni if (!p_dev->reset_stats) 2032*14b24e2bSVaishali Kulkarni return; 2033*14b24e2bSVaishali Kulkarni 2034*14b24e2bSVaishali Kulkarni /* Reduce the statistics baseline */ 2035*14b24e2bSVaishali Kulkarni for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++) 2036*14b24e2bSVaishali Kulkarni ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i]; 2037*14b24e2bSVaishali Kulkarni } 2038*14b24e2bSVaishali Kulkarni 2039*14b24e2bSVaishali Kulkarni /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 2040*14b24e2bSVaishali Kulkarni void ecore_reset_vport_stats(struct ecore_dev *p_dev) 2041*14b24e2bSVaishali Kulkarni { 2042*14b24e2bSVaishali Kulkarni int i; 2043*14b24e2bSVaishali Kulkarni 2044*14b24e2bSVaishali Kulkarni for_each_hwfn(p_dev, i) { 2045*14b24e2bSVaishali Kulkarni struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2046*14b24e2bSVaishali Kulkarni struct eth_mstorm_per_queue_stat mstats; 2047*14b24e2bSVaishali Kulkarni struct eth_ustorm_per_queue_stat ustats; 2048*14b24e2bSVaishali Kulkarni struct eth_pstorm_per_queue_stat pstats; 2049*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 2050*14b24e2bSVaishali Kulkarni ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 2051*14b24e2bSVaishali Kulkarni u32 addr = 0, len = 0; 2052*14b24e2bSVaishali Kulkarni 2053*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev) && !p_ptt) { 2054*14b24e2bSVaishali Kulkarni DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2055*14b24e2bSVaishali Kulkarni continue; 2056*14b24e2bSVaishali Kulkarni } 2057*14b24e2bSVaishali Kulkarni 2058*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 2059*14b24e2bSVaishali Kulkarni __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 2060*14b24e2bSVaishali Kulkarni ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 2061*14b24e2bSVaishali Kulkarni 2062*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 2063*14b24e2bSVaishali Kulkarni __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 2064*14b24e2bSVaishali Kulkarni ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 2065*14b24e2bSVaishali Kulkarni 2066*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 2067*14b24e2bSVaishali Kulkarni __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 2068*14b24e2bSVaishali Kulkarni ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 2069*14b24e2bSVaishali Kulkarni 2070*14b24e2bSVaishali Kulkarni if (IS_PF(p_dev)) 2071*14b24e2bSVaishali Kulkarni ecore_ptt_release(p_hwfn, p_ptt); 2072*14b24e2bSVaishali Kulkarni } 2073*14b24e2bSVaishali Kulkarni 2074*14b24e2bSVaishali Kulkarni /* PORT statistics are not necessarily reset, so we need to 2075*14b24e2bSVaishali Kulkarni * read and create a baseline for future statistics. 2076*14b24e2bSVaishali Kulkarni */ 2077*14b24e2bSVaishali Kulkarni if (!p_dev->reset_stats) 2078*14b24e2bSVaishali Kulkarni DP_INFO(p_dev, "Reset stats not allocated\n"); 2079*14b24e2bSVaishali Kulkarni else 2080*14b24e2bSVaishali Kulkarni _ecore_get_vport_stats(p_dev, p_dev->reset_stats); 2081*14b24e2bSVaishali Kulkarni } 2082*14b24e2bSVaishali Kulkarni 2083*14b24e2bSVaishali Kulkarni void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, 2084*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 2085*14b24e2bSVaishali Kulkarni struct ecore_arfs_config_params *p_cfg_params) 2086*14b24e2bSVaishali Kulkarni { 2087*14b24e2bSVaishali Kulkarni if (p_cfg_params->arfs_enable) { 2088*14b24e2bSVaishali Kulkarni ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 2089*14b24e2bSVaishali Kulkarni p_cfg_params->tcp, 2090*14b24e2bSVaishali Kulkarni p_cfg_params->udp, 2091*14b24e2bSVaishali Kulkarni p_cfg_params->ipv4, 2092*14b24e2bSVaishali Kulkarni p_cfg_params->ipv6); 2093*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2094*14b24e2bSVaishali Kulkarni "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", 2095*14b24e2bSVaishali Kulkarni p_cfg_params->tcp ? "Enable" : "Disable", 2096*14b24e2bSVaishali Kulkarni p_cfg_params->udp ? "Enable" : "Disable", 2097*14b24e2bSVaishali Kulkarni p_cfg_params->ipv4 ? "Enable" : "Disable", 2098*14b24e2bSVaishali Kulkarni p_cfg_params->ipv6 ? "Enable" : "Disable"); 2099*14b24e2bSVaishali Kulkarni } else { 2100*14b24e2bSVaishali Kulkarni ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2101*14b24e2bSVaishali Kulkarni } 2102*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n", 2103*14b24e2bSVaishali Kulkarni p_cfg_params->arfs_enable ? "Enable" : "Disable"); 2104*14b24e2bSVaishali Kulkarni } 2105*14b24e2bSVaishali Kulkarni 2106*14b24e2bSVaishali Kulkarni enum _ecore_status_t 2107*14b24e2bSVaishali Kulkarni ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, 2108*14b24e2bSVaishali Kulkarni struct ecore_ptt *p_ptt, 2109*14b24e2bSVaishali Kulkarni struct ecore_spq_comp_cb *p_cb, 2110*14b24e2bSVaishali Kulkarni dma_addr_t p_addr, u16 length, 2111*14b24e2bSVaishali Kulkarni u16 qid, u8 vport_id, 2112*14b24e2bSVaishali Kulkarni bool b_is_add) 2113*14b24e2bSVaishali Kulkarni { 2114*14b24e2bSVaishali Kulkarni struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL; 2115*14b24e2bSVaishali Kulkarni struct ecore_spq_entry *p_ent = OSAL_NULL; 2116*14b24e2bSVaishali Kulkarni struct ecore_sp_init_data init_data; 2117*14b24e2bSVaishali Kulkarni u16 abs_rx_q_id = 0; 2118*14b24e2bSVaishali Kulkarni u8 abs_vport_id = 0; 2119*14b24e2bSVaishali Kulkarni enum _ecore_status_t rc = ECORE_NOTIMPL; 2120*14b24e2bSVaishali Kulkarni 2121*14b24e2bSVaishali Kulkarni rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 2122*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2123*14b24e2bSVaishali Kulkarni return rc; 2124*14b24e2bSVaishali Kulkarni 2125*14b24e2bSVaishali Kulkarni rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); 2126*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2127*14b24e2bSVaishali Kulkarni return rc; 2128*14b24e2bSVaishali Kulkarni 2129*14b24e2bSVaishali Kulkarni /* Get SPQ entry */ 2130*14b24e2bSVaishali Kulkarni OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 2131*14b24e2bSVaishali Kulkarni init_data.cid = ecore_spq_get_cid(p_hwfn); 2132*14b24e2bSVaishali Kulkarni 2133*14b24e2bSVaishali Kulkarni init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2134*14b24e2bSVaishali Kulkarni 2135*14b24e2bSVaishali Kulkarni if (p_cb) { 2136*14b24e2bSVaishali Kulkarni init_data.comp_mode = ECORE_SPQ_MODE_CB; 2137*14b24e2bSVaishali Kulkarni init_data.p_comp_data = p_cb; 2138*14b24e2bSVaishali Kulkarni } else { 2139*14b24e2bSVaishali Kulkarni init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 2140*14b24e2bSVaishali Kulkarni } 2141*14b24e2bSVaishali Kulkarni 2142*14b24e2bSVaishali Kulkarni rc = ecore_sp_init_request(p_hwfn, &p_ent, 2143*14b24e2bSVaishali Kulkarni ETH_RAMROD_GFT_UPDATE_FILTER, 2144*14b24e2bSVaishali Kulkarni PROTOCOLID_ETH, &init_data); 2145*14b24e2bSVaishali Kulkarni if (rc != ECORE_SUCCESS) 2146*14b24e2bSVaishali Kulkarni return rc; 2147*14b24e2bSVaishali Kulkarni 2148*14b24e2bSVaishali Kulkarni p_ramrod = &p_ent->ramrod.rx_update_gft; 2149*14b24e2bSVaishali Kulkarni 2150*14b24e2bSVaishali Kulkarni DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); 2151*14b24e2bSVaishali Kulkarni p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length); 2152*14b24e2bSVaishali Kulkarni p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id); 2153*14b24e2bSVaishali Kulkarni p_ramrod->vport_id = abs_vport_id; 2154*14b24e2bSVaishali Kulkarni p_ramrod->filter_type = RFS_FILTER_TYPE; 2155*14b24e2bSVaishali Kulkarni p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER 2156*14b24e2bSVaishali Kulkarni : GFT_DELETE_FILTER; 2157*14b24e2bSVaishali Kulkarni 2158*14b24e2bSVaishali Kulkarni DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2159*14b24e2bSVaishali Kulkarni "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", 2160*14b24e2bSVaishali Kulkarni abs_vport_id, abs_rx_q_id, 2161*14b24e2bSVaishali Kulkarni b_is_add ? "Adding" : "Removing", 2162*14b24e2bSVaishali Kulkarni (u64)p_addr, length); 2163*14b24e2bSVaishali Kulkarni 2164*14b24e2bSVaishali Kulkarni return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 2165*14b24e2bSVaishali Kulkarni } 2166