114b24e2Vaishali Kulkarni/*
214b24e2Vaishali Kulkarni* CDDL HEADER START
314b24e2Vaishali Kulkarni*
414b24e2Vaishali Kulkarni* The contents of this file are subject to the terms of the
514b24e2Vaishali Kulkarni* Common Development and Distribution License, v.1,  (the "License").
614b24e2Vaishali Kulkarni* You may not use this file except in compliance with the License.
714b24e2Vaishali Kulkarni*
814b24e2Vaishali Kulkarni* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
914b24e2Vaishali Kulkarni* or http://opensource.org/licenses/CDDL-1.0.
1014b24e2Vaishali Kulkarni* See the License for the specific language governing permissions
1114b24e2Vaishali Kulkarni* and limitations under the License.
1214b24e2Vaishali Kulkarni*
1314b24e2Vaishali Kulkarni* When distributing Covered Code, include this CDDL HEADER in each
1414b24e2Vaishali Kulkarni* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1514b24e2Vaishali Kulkarni* If applicable, add the following below this CDDL HEADER, with the
1614b24e2Vaishali Kulkarni* fields enclosed by brackets "[]" replaced with your own identifying
1714b24e2Vaishali Kulkarni* information: Portions Copyright [yyyy] [name of copyright owner]
1814b24e2Vaishali Kulkarni*
1914b24e2Vaishali Kulkarni* CDDL HEADER END
2014b24e2Vaishali Kulkarni*/
2114b24e2Vaishali Kulkarni
2214b24e2Vaishali Kulkarni/*
2314b24e2Vaishali Kulkarni* Copyright 2014-2017 Cavium, Inc.
2414b24e2Vaishali Kulkarni* The contents of this file are subject to the terms of the Common Development
2514b24e2Vaishali Kulkarni* and Distribution License, v.1,  (the "License").
2614b24e2Vaishali Kulkarni
2714b24e2Vaishali Kulkarni* You may not use this file except in compliance with the License.
2814b24e2Vaishali Kulkarni
2914b24e2Vaishali Kulkarni* You can obtain a copy of the License at available
3014b24e2Vaishali Kulkarni* at http://opensource.org/licenses/CDDL-1.0
3114b24e2Vaishali Kulkarni
3214b24e2Vaishali Kulkarni* See the License for the specific language governing permissions and
3314b24e2Vaishali Kulkarni* limitations under the License.
3414b24e2Vaishali Kulkarni*/
3514b24e2Vaishali Kulkarni
3614b24e2Vaishali Kulkarni#include "bcm_osal.h"
3714b24e2Vaishali Kulkarni#include "reg_addr.h"
3814b24e2Vaishali Kulkarni#include "ecore_gtt_reg_addr.h"
3914b24e2Vaishali Kulkarni#include "ecore.h"
4014b24e2Vaishali Kulkarni#include "ecore_chain.h"
4114b24e2Vaishali Kulkarni#include "ecore_status.h"
4214b24e2Vaishali Kulkarni#include "ecore_hw.h"
4314b24e2Vaishali Kulkarni#include "ecore_rt_defs.h"
4414b24e2Vaishali Kulkarni#include "ecore_init_ops.h"
4514b24e2Vaishali Kulkarni#include "ecore_int.h"
4614b24e2Vaishali Kulkarni#include "ecore_cxt.h"
4714b24e2Vaishali Kulkarni#include "ecore_spq.h"
4814b24e2Vaishali Kulkarni#include "ecore_init_fw_funcs.h"
4914b24e2Vaishali Kulkarni#include "ecore_sp_commands.h"
5014b24e2Vaishali Kulkarni#include "ecore_dev_api.h"
5114b24e2Vaishali Kulkarni#include "ecore_sriov.h"
5214b24e2Vaishali Kulkarni#include "ecore_vf.h"
5314b24e2Vaishali Kulkarni#include "ecore_ll2.h"
5414b24e2Vaishali Kulkarni#include "ecore_fcoe.h"
5514b24e2Vaishali Kulkarni#include "ecore_iscsi.h"
5614b24e2Vaishali Kulkarni#include "ecore_ooo.h"
5714b24e2Vaishali Kulkarni#include "ecore_mcp.h"
5814b24e2Vaishali Kulkarni#include "ecore_hw_defs.h"
5914b24e2Vaishali Kulkarni#include "mcp_public.h"
6014b24e2Vaishali Kulkarni#include "ecore_roce.h"
6114b24e2Vaishali Kulkarni#include "ecore_iro.h"
6214b24e2Vaishali Kulkarni#include "nvm_cfg.h"
6314b24e2Vaishali Kulkarni#include "ecore_dev_api.h"
6414b24e2Vaishali Kulkarni#include "ecore_dcbx.h"
6514b24e2Vaishali Kulkarni#include "pcics_reg_driver.h"
6614b24e2Vaishali Kulkarni#include "ecore_l2.h"
6714b24e2Vaishali Kulkarni
6814b24e2Vaishali Kulkarni/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
6914b24e2Vaishali Kulkarni * registers involved are not split and thus configuration is a race where
7014b24e2Vaishali Kulkarni * some of the PFs configuration might be lost.
7114b24e2Vaishali Kulkarni * Eventually, this needs to move into a MFW-covered HW-lock as arbitration
7214b24e2Vaishali Kulkarni * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where
7314b24e2Vaishali Kulkarni * there's more than a single compiled ecore component in system].
7414b24e2Vaishali Kulkarni */
7514b24e2Vaishali Kulkarnistatic osal_spinlock_t qm_lock;
7614b24e2Vaishali Kulkarnistatic bool qm_lock_init = false;
7714b24e2Vaishali Kulkarni
7814b24e2Vaishali Kulkarni/* Configurable */
7914b24e2Vaishali Kulkarni#define ECORE_MIN_DPIS		(4)  /* The minimal number of DPIs required to
8014b24e2Vaishali Kulkarni				      * load the driver. The number was
8114b24e2Vaishali Kulkarni				      * arbitrarily set.
8214b24e2Vaishali Kulkarni				      */
8314b24e2Vaishali Kulkarni
8414b24e2Vaishali Kulkarni/* Derived */
8514b24e2Vaishali Kulkarni#define ECORE_MIN_PWM_REGION	((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
8614b24e2Vaishali Kulkarni
8714b24e2Vaishali Kulkarnienum BAR_ID {
8814b24e2Vaishali Kulkarni	BAR_ID_0,	/* used for GRC */
8914b24e2Vaishali Kulkarni	BAR_ID_1	/* Used for doorbells */
9014b24e2Vaishali Kulkarni};
9114b24e2Vaishali Kulkarni
9214b24e2Vaishali Kulkarnistatic u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
9314b24e2Vaishali Kulkarni{
9414b24e2Vaishali Kulkarni	u32 bar_reg = (bar_id == BAR_ID_0 ?
9514b24e2Vaishali Kulkarni		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
9614b24e2Vaishali Kulkarni	u32 val;
9714b24e2Vaishali Kulkarni
9814b24e2Vaishali Kulkarni	if (IS_VF(p_hwfn->p_dev)) {
9914b24e2Vaishali Kulkarni		/* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
10014b24e2Vaishali Kulkarni		 * read from actual register, but we're currently not using
10114b24e2Vaishali Kulkarni		 * it for actual doorbelling.
10214b24e2Vaishali Kulkarni		 */
10314b24e2Vaishali Kulkarni		return 1 << 17;
10414b24e2Vaishali Kulkarni	}
10514b24e2Vaishali Kulkarni
10614b24e2Vaishali Kulkarni	val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
10714b24e2Vaishali Kulkarni	if (val)
10814b24e2Vaishali Kulkarni		return 1 << (val + 15);
10914b24e2Vaishali Kulkarni
11014b24e2Vaishali Kulkarni	/* The above registers were updated in the past only in CMT mode. Since
11114b24e2Vaishali Kulkarni	 * they were found to be useful MFW started updating them from 8.7.7.0.
11214b24e2Vaishali Kulkarni	 * In older MFW versions they are set to 0 which means disabled.
11314b24e2Vaishali Kulkarni	 */
11414b24e2Vaishali Kulkarni	if (p_hwfn->p_dev->num_hwfns > 1) {
11514b24e2Vaishali Kulkarni		DP_NOTICE(p_hwfn, false,
11614b24e2Vaishali Kulkarni			  "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
11714b24e2Vaishali Kulkarni		return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
11814b24e2Vaishali Kulkarni	} else {
11914b24e2Vaishali Kulkarni		DP_NOTICE(p_hwfn, false,
12014b24e2Vaishali Kulkarni			  "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
12114b24e2Vaishali Kulkarni		return 512 * 1024;
12214b24e2Vaishali Kulkarni	}
12314b24e2Vaishali Kulkarni}
12414b24e2Vaishali Kulkarni
12514b24e2Vaishali Kulkarnivoid ecore_init_dp(struct ecore_dev	*p_dev,
12614b24e2Vaishali Kulkarni		   u32			dp_module,
12714b24e2Vaishali Kulkarni		   u8			dp_level,
12814b24e2Vaishali Kulkarni		   void		 *dp_ctx)
12914b24e2Vaishali Kulkarni{
13014b24e2Vaishali Kulkarni	u32 i;
13114b24e2Vaishali Kulkarni
13214b24e2Vaishali Kulkarni	p_dev->dp_level = dp_level;
13314b24e2Vaishali Kulkarni	p_dev->dp_module = dp_module;
13414b24e2Vaishali Kulkarni	p_dev->dp_ctx = dp_ctx;
13514b24e2Vaishali Kulkarni	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
13614b24e2Vaishali Kulkarni		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
13714b24e2Vaishali Kulkarni
13814b24e2Vaishali Kulkarni		p_hwfn->dp_level = dp_level;
13914b24e2Vaishali Kulkarni		p_hwfn->dp_module = dp_module;
14014b24e2Vaishali Kulkarni		p_hwfn->dp_ctx = dp_ctx;
14114b24e2Vaishali Kulkarni	}
14214b24e2Vaishali Kulkarni}
14314b24e2Vaishali Kulkarni
14414b24e2Vaishali Kulkarnivoid ecore_init_struct(struct ecore_dev *p_dev)
14514b24e2Vaishali Kulkarni{
14614b24e2Vaishali Kulkarni	u8 i;
14714b24e2Vaishali Kulkarni
14814b24e2Vaishali Kulkarni	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
14914b24e2Vaishali Kulkarni		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
15014b24e2Vaishali Kulkarni
15114b24e2Vaishali Kulkarni		p_hwfn->p_dev = p_dev;
15214b24e2Vaishali Kulkarni		p_hwfn->my_id = i;
15314b24e2Vaishali Kulkarni		p_hwfn->b_active = false;
15414b24e2Vaishali Kulkarni
15514b24e2Vaishali Kulkarni		OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
15614b24e2Vaishali Kulkarni		OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
15714b24e2Vaishali Kulkarni	}
15814b24e2Vaishali Kulkarni
15914b24e2Vaishali Kulkarni	/* hwfn 0 is always active */
16014b24e2Vaishali Kulkarni	p_dev->hwfns[0].b_active = true;
16114b24e2Vaishali Kulkarni
16214b24e2Vaishali Kulkarni	/* set the default cache alignment to 128 (may be overridden later) */
16314b24e2Vaishali Kulkarni	p_dev->cache_shift = 7;
16414b24e2Vaishali Kulkarni}
16514b24e2Vaishali Kulkarni
16614b24e2Vaishali Kulkarnistatic void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
16714b24e2Vaishali Kulkarni{
16814b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
16914b24e2Vaishali Kulkarni
17014b24e2Vaishali Kulkarni	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
17114b24e2Vaishali Kulkarni	qm_info->qm_pq_params = OSAL_NULL;
17214b24e2Vaishali Kulkarni	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
17314b24e2Vaishali Kulkarni	qm_info->qm_vport_params = OSAL_NULL;
17414b24e2Vaishali Kulkarni	OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
17514b24e2Vaishali Kulkarni	qm_info->qm_port_params = OSAL_NULL;
17614b24e2Vaishali Kulkarni	OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
17714b24e2Vaishali Kulkarni	qm_info->wfq_data = OSAL_NULL;
17814b24e2Vaishali Kulkarni}
17914b24e2Vaishali Kulkarni
18014b24e2Vaishali Kulkarnivoid ecore_resc_free(struct ecore_dev *p_dev)
18114b24e2Vaishali Kulkarni{
18214b24e2Vaishali Kulkarni	int i;
18314b24e2Vaishali Kulkarni
18414b24e2Vaishali Kulkarni	if (IS_VF(p_dev)) {
18514b24e2Vaishali Kulkarni		for_each_hwfn(p_dev, i)
18614b24e2Vaishali Kulkarni			ecore_l2_free(&p_dev->hwfns[i]);
18714b24e2Vaishali Kulkarni		return;
18814b24e2Vaishali Kulkarni	}
18914b24e2Vaishali Kulkarni
19014b24e2Vaishali Kulkarni	OSAL_FREE(p_dev, p_dev->fw_data);
19114b24e2Vaishali Kulkarni	p_dev->fw_data = OSAL_NULL;
19214b24e2Vaishali Kulkarni
19314b24e2Vaishali Kulkarni	OSAL_FREE(p_dev, p_dev->reset_stats);
19414b24e2Vaishali Kulkarni	p_dev->reset_stats = OSAL_NULL;
19514b24e2Vaishali Kulkarni
19614b24e2Vaishali Kulkarni	for_each_hwfn(p_dev, i) {
19714b24e2Vaishali Kulkarni		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
19814b24e2Vaishali Kulkarni
19914b24e2Vaishali Kulkarni		ecore_cxt_mngr_free(p_hwfn);
20014b24e2Vaishali Kulkarni		ecore_qm_info_free(p_hwfn);
20114b24e2Vaishali Kulkarni		ecore_spq_free(p_hwfn);
20214b24e2Vaishali Kulkarni		ecore_eq_free(p_hwfn);
20314b24e2Vaishali Kulkarni		ecore_consq_free(p_hwfn);
20414b24e2Vaishali Kulkarni		ecore_int_free(p_hwfn);
20514b24e2Vaishali Kulkarni#ifdef CONFIG_ECORE_LL2
20614b24e2Vaishali Kulkarni		ecore_ll2_free(p_hwfn);
20714b24e2Vaishali Kulkarni#endif
20814b24e2Vaishali Kulkarni#ifdef CONFIG_ECORE_FCOE
20914b24e2Vaishali Kulkarni		if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE)
21014b24e2Vaishali Kulkarni			ecore_fcoe_free(p_hwfn);
21114b24e2Vaishali Kulkarni#endif
21214b24e2Vaishali Kulkarni#ifdef CONFIG_ECORE_ISCSI
21314b24e2Vaishali Kulkarni		if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
21414b24e2Vaishali Kulkarni			ecore_iscsi_free(p_hwfn);
21514b24e2Vaishali Kulkarni			ecore_ooo_free(p_hwfn);
21614b24e2Vaishali Kulkarni		}
21714b24e2Vaishali Kulkarni#endif
21814b24e2Vaishali Kulkarni#ifdef CONFIG_ECORE_ROCE
21914b24e2Vaishali Kulkarni		if (ECORE_IS_RDMA_PERSONALITY(p_hwfn))
22014b24e2Vaishali Kulkarni			ecore_rdma_info_free(p_hwfn);
22114b24e2Vaishali Kulkarni#endif
22214b24e2Vaishali Kulkarni		ecore_iov_free(p_hwfn);
22314b24e2Vaishali Kulkarni		ecore_l2_free(p_hwfn);
22414b24e2Vaishali Kulkarni		ecore_dmae_info_free(p_hwfn);
22514b24e2Vaishali Kulkarni		ecore_dcbx_info_free(p_hwfn);
22614b24e2Vaishali Kulkarni		/* @@@TBD Flush work-queue ?*/
22714b24e2Vaishali Kulkarni	}
22814b24e2Vaishali Kulkarni}
22914b24e2Vaishali Kulkarni
23014b24e2Vaishali Kulkarni/******************** QM initialization *******************/
23114b24e2Vaishali Kulkarni
23214b24e2Vaishali Kulkarni/* bitmaps for indicating active traffic classes. Special case for Arrowhead 4 port */
23314b24e2Vaishali Kulkarni#define ACTIVE_TCS_BMAP 0x9f /* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
23414b24e2Vaishali Kulkarni#define ACTIVE_TCS_BMAP_4PORT_K2 0xf /* 0..3 actually used, OOO and high priority stuff all use 3 */
23514b24e2Vaishali Kulkarni
23614b24e2Vaishali Kulkarni/* determines the physical queue flags for a given PF. */
23714b24e2Vaishali Kulkarnistatic u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
23814b24e2Vaishali Kulkarni{
23914b24e2Vaishali Kulkarni	u32 flags;
24014b24e2Vaishali Kulkarni
24114b24e2Vaishali Kulkarni	/* common flags */
24214b24e2Vaishali Kulkarni	flags = PQ_FLAGS_LB;
24314b24e2Vaishali Kulkarni
24414b24e2Vaishali Kulkarni	/* feature flags */
24514b24e2Vaishali Kulkarni	if (IS_ECORE_SRIOV(p_hwfn->p_dev))
24614b24e2Vaishali Kulkarni		flags |= PQ_FLAGS_VFS;
24714b24e2Vaishali Kulkarni	if (IS_ECORE_DCQCN(p_hwfn))
24814b24e2Vaishali Kulkarni		flags |= PQ_FLAGS_RLS;
24914b24e2Vaishali Kulkarni
25014b24e2Vaishali Kulkarni	/* protocol flags */
25114b24e2Vaishali Kulkarni	switch (p_hwfn->hw_info.personality) {
25214b24e2Vaishali Kulkarni	case ECORE_PCI_ETH:
25314b24e2Vaishali Kulkarni		flags |= PQ_FLAGS_MCOS;
25414b24e2Vaishali Kulkarni		break;
25514b24e2Vaishali Kulkarni	case ECORE_PCI_FCOE:
25614b24e2Vaishali Kulkarni		flags |= PQ_FLAGS_OFLD;
25714b24e2Vaishali Kulkarni		break;
25814b24e2Vaishali Kulkarni	case ECORE_PCI_ISCSI:
25914b24e2Vaishali Kulkarni		flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
26014b24e2Vaishali Kulkarni		break;
26114b24e2Vaishali Kulkarni	case ECORE_PCI_ETH_ROCE:
26214b24e2Vaishali Kulkarni		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
26314b24e2Vaishali Kulkarni		break;
26414b24e2Vaishali Kulkarni	case ECORE_PCI_ETH_IWARP:
26514b24e2Vaishali Kulkarni		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
26614b24e2Vaishali Kulkarni		break;
26714b24e2Vaishali Kulkarni	default:
26814b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "unknown personality %d\n", p_hwfn->hw_info.personality);
26914b24e2Vaishali Kulkarni		return 0;
27014b24e2Vaishali Kulkarni	}
27114b24e2Vaishali Kulkarni
27214b24e2Vaishali Kulkarni	return flags;
27314b24e2Vaishali Kulkarni}
27414b24e2Vaishali Kulkarni
27514b24e2Vaishali Kulkarni
27614b24e2Vaishali Kulkarni/* Getters for resource amounts necessary for qm initialization */
27714b24e2Vaishali Kulkarniu8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
27814b24e2Vaishali Kulkarni{
27914b24e2Vaishali Kulkarni	return p_hwfn->hw_info.num_hw_tc;
28014b24e2Vaishali Kulkarni}
28114b24e2Vaishali Kulkarni
28214b24e2Vaishali Kulkarniu16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
28314b24e2Vaishali Kulkarni{
28414b24e2Vaishali Kulkarni	return IS_ECORE_SRIOV(p_hwfn->p_dev) ? p_hwfn->p_dev->p_iov_info->total_vfs : 0;
28514b24e2Vaishali Kulkarni}
28614b24e2Vaishali Kulkarni
28714b24e2Vaishali Kulkarni#define NUM_DEFAULT_RLS 1
28814b24e2Vaishali Kulkarni
28914b24e2Vaishali Kulkarniu16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
29014b24e2Vaishali Kulkarni{
29114b24e2Vaishali Kulkarni	u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
29214b24e2Vaishali Kulkarni
29314b24e2Vaishali Kulkarni	/* num RLs can't exceed resource amount of rls or vports or the dcqcn qps */
29414b24e2Vaishali Kulkarni	num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
29514b24e2Vaishali Kulkarni				     (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_VPORT),
29614b24e2Vaishali Kulkarni						     ROCE_DCQCN_RP_MAX_QPS));
29714b24e2Vaishali Kulkarni
29814b24e2Vaishali Kulkarni	/* make sure after we reserve the default and VF rls we'll have something left */
29914b24e2Vaishali Kulkarni	if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
30014b24e2Vaishali Kulkarni		if (IS_ECORE_DCQCN(p_hwfn))
30114b24e2Vaishali Kulkarni			DP_NOTICE(p_hwfn, false, "no rate limiters left for PF rate limiting [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
30214b24e2Vaishali Kulkarni		return 0;
30314b24e2Vaishali Kulkarni	}
30414b24e2Vaishali Kulkarni
30514b24e2Vaishali Kulkarni	/* subtract rls necessary for VFs and one default one for the PF */
30614b24e2Vaishali Kulkarni	num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
30714b24e2Vaishali Kulkarni
30814b24e2Vaishali Kulkarni	return num_pf_rls;
30914b24e2Vaishali Kulkarni}
31014b24e2Vaishali Kulkarni
31114b24e2Vaishali Kulkarniu16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
31214b24e2Vaishali Kulkarni{
31314b24e2Vaishali Kulkarni	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
31414b24e2Vaishali Kulkarni
31514b24e2Vaishali Kulkarni	/* all pqs share the same vport (hence the 1 below), except for vfs and pf_rl pqs */
31614b24e2Vaishali Kulkarni	return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
31714b24e2Vaishali Kulkarni	       (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn) + 1;
31814b24e2Vaishali Kulkarni}
31914b24e2Vaishali Kulkarni
32014b24e2Vaishali Kulkarni/* calc amount of PQs according to the requested flags */
32114b24e2Vaishali Kulkarniu16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
32214b24e2Vaishali Kulkarni{
32314b24e2Vaishali Kulkarni	u32 pq_flags = ecore_get_pq_flags(p_hwfn);
32414b24e2Vaishali Kulkarni
32514b24e2Vaishali Kulkarni	return (!!(PQ_FLAGS_RLS & pq_flags)) * ecore_init_qm_get_num_pf_rls(p_hwfn) +
32614b24e2Vaishali Kulkarni	       (!!(PQ_FLAGS_MCOS & pq_flags)) * ecore_init_qm_get_num_tcs(p_hwfn) +
32714b24e2Vaishali Kulkarni	       (!!(PQ_FLAGS_LB & pq_flags)) +
32814b24e2Vaishali Kulkarni	       (!!(PQ_FLAGS_OOO & pq_flags)) +
32914b24e2Vaishali Kulkarni	       (!!(PQ_FLAGS_ACK & pq_flags)) +
33014b24e2Vaishali Kulkarni	       (!!(PQ_FLAGS_OFLD & pq_flags)) +
33114b24e2Vaishali Kulkarni	       (!!(PQ_FLAGS_LLT & pq_flags)) +
33214b24e2Vaishali Kulkarni	       (!!(PQ_FLAGS_VFS & pq_flags)) * ecore_init_qm_get_num_vfs(p_hwfn);
33314b24e2Vaishali Kulkarni}
33414b24e2Vaishali Kulkarni
33514b24e2Vaishali Kulkarni/* initialize the top level QM params */
33614b24e2Vaishali Kulkarnistatic void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
33714b24e2Vaishali Kulkarni{
33814b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
33914b24e2Vaishali Kulkarni	bool four_port;
34014b24e2Vaishali Kulkarni
34114b24e2Vaishali Kulkarni	/* pq and vport bases for this PF */
34214b24e2Vaishali Kulkarni	qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
34314b24e2Vaishali Kulkarni	qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
34414b24e2Vaishali Kulkarni
34514b24e2Vaishali Kulkarni	/* rate limiting and weighted fair queueing are always enabled */
34614b24e2Vaishali Kulkarni	qm_info->vport_rl_en = 1;
34714b24e2Vaishali Kulkarni	qm_info->vport_wfq_en = 1;
34814b24e2Vaishali Kulkarni
34914b24e2Vaishali Kulkarni	/* TC config is different for AH 4 port */
35014b24e2Vaishali Kulkarni	four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
35114b24e2Vaishali Kulkarni
35214b24e2Vaishali Kulkarni	/* in AH 4 port we have fewer TCs per port */
35314b24e2Vaishali Kulkarni	qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : NUM_OF_PHYS_TCS;
35414b24e2Vaishali Kulkarni
35514b24e2Vaishali Kulkarni	/* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and 4 otherwise */
35614b24e2Vaishali Kulkarni	if (!qm_info->ooo_tc)
35714b24e2Vaishali Kulkarni		qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : DCBX_TCP_OOO_TC;
35814b24e2Vaishali Kulkarni}
35914b24e2Vaishali Kulkarni
36014b24e2Vaishali Kulkarni/* initialize qm vport params */
36114b24e2Vaishali Kulkarnistatic void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
36214b24e2Vaishali Kulkarni{
36314b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
36414b24e2Vaishali Kulkarni	u8 i;
36514b24e2Vaishali Kulkarni
36614b24e2Vaishali Kulkarni	/* all vports participate in weighted fair queueing */
36714b24e2Vaishali Kulkarni	for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
36814b24e2Vaishali Kulkarni		qm_info->qm_vport_params[i].vport_wfq = 1;
36914b24e2Vaishali Kulkarni}
37014b24e2Vaishali Kulkarni
37114b24e2Vaishali Kulkarni/* initialize qm port params */
37214b24e2Vaishali Kulkarnistatic void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
37314b24e2Vaishali Kulkarni{
37414b24e2Vaishali Kulkarni	/* Initialize qm port parameters */
37514b24e2Vaishali Kulkarni	u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
37614b24e2Vaishali Kulkarni
37714b24e2Vaishali Kulkarni	/* indicate how ooo and high pri traffic is dealt with */
37814b24e2Vaishali Kulkarni	active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
37914b24e2Vaishali Kulkarni		ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
38014b24e2Vaishali Kulkarni
38114b24e2Vaishali Kulkarni	for (i = 0; i < num_ports; i++) {
38214b24e2Vaishali Kulkarni		struct init_qm_port_params *p_qm_port =
38314b24e2Vaishali Kulkarni			&p_hwfn->qm_info.qm_port_params[i];
38414b24e2Vaishali Kulkarni
38514b24e2Vaishali Kulkarni		p_qm_port->active = 1;
38614b24e2Vaishali Kulkarni		p_qm_port->active_phys_tcs = active_phys_tcs;
38714b24e2Vaishali Kulkarni		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
38814b24e2Vaishali Kulkarni		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
38914b24e2Vaishali Kulkarni	}
39014b24e2Vaishali Kulkarni}
39114b24e2Vaishali Kulkarni
39214b24e2Vaishali Kulkarni/* Reset the params which must be reset for qm init. QM init may be called as
39314b24e2Vaishali Kulkarni * a result of flows other than driver load (e.g. dcbx renegotiation). Other
39414b24e2Vaishali Kulkarni * params may be affected by the init but would simply recalculate to the same
39514b24e2Vaishali Kulkarni * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
39614b24e2Vaishali Kulkarni * affected as these amounts stay the same.
39714b24e2Vaishali Kulkarni */
39814b24e2Vaishali Kulkarnistatic void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
39914b24e2Vaishali Kulkarni{
40014b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
40114b24e2Vaishali Kulkarni
40214b24e2Vaishali Kulkarni	qm_info->num_pqs = 0;
40314b24e2Vaishali Kulkarni	qm_info->num_vports = 0;
40414b24e2Vaishali Kulkarni	qm_info->num_pf_rls = 0;
40514b24e2Vaishali Kulkarni	qm_info->num_vf_pqs = 0;
40614b24e2Vaishali Kulkarni	qm_info->first_vf_pq = 0;
40714b24e2Vaishali Kulkarni	qm_info->first_mcos_pq = 0;
40814b24e2Vaishali Kulkarni	qm_info->first_rl_pq = 0;
40914b24e2Vaishali Kulkarni}
41014b24e2Vaishali Kulkarni
41114b24e2Vaishali Kulkarnistatic void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
41214b24e2Vaishali Kulkarni{
41314b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
41414b24e2Vaishali Kulkarni
41514b24e2Vaishali Kulkarni	qm_info->num_vports++;
41614b24e2Vaishali Kulkarni
41714b24e2Vaishali Kulkarni	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
41814b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
41914b24e2Vaishali Kulkarni}
42014b24e2Vaishali Kulkarni
42114b24e2Vaishali Kulkarni/* initialize a single pq and manage qm_info resources accounting.
42214b24e2Vaishali Kulkarni * The pq_init_flags param determines whether the PQ is rate limited (for VF or PF)
42314b24e2Vaishali Kulkarni * and whether a new vport is allocated to the pq or not (i.e. vport will be shared)
42414b24e2Vaishali Kulkarni */
42514b24e2Vaishali Kulkarni
42614b24e2Vaishali Kulkarni/* flags for pq init */
42714b24e2Vaishali Kulkarni#define PQ_INIT_SHARE_VPORT	(1 << 0)
42814b24e2Vaishali Kulkarni#define PQ_INIT_PF_RL		(1 << 1)
42914b24e2Vaishali Kulkarni#define PQ_INIT_VF_RL		(1 << 2)
43014b24e2Vaishali Kulkarni
43114b24e2Vaishali Kulkarni/* defines for pq init */
43214b24e2Vaishali Kulkarni#define PQ_INIT_DEFAULT_WRR_GROUP	1
43314b24e2Vaishali Kulkarni#define PQ_INIT_DEFAULT_TC		0
43414b24e2Vaishali Kulkarni#define PQ_INIT_OFLD_TC			(p_hwfn->hw_info.offload_tc)
43514b24e2Vaishali Kulkarni
43614b24e2Vaishali Kulkarnistatic void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
43714b24e2Vaishali Kulkarni			     struct ecore_qm_info *qm_info,
43814b24e2Vaishali Kulkarni			     u8 tc, u32 pq_init_flags)
43914b24e2Vaishali Kulkarni{
44014b24e2Vaishali Kulkarni	u16 pq_idx = qm_info->num_pqs, max_pq = ecore_init_qm_get_num_pqs(p_hwfn);
44114b24e2Vaishali Kulkarni
44214b24e2Vaishali Kulkarni	if (pq_idx > max_pq)
44314b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
44414b24e2Vaishali Kulkarni
44514b24e2Vaishali Kulkarni	/* init pq params */
44614b24e2Vaishali Kulkarni	qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports;
44714b24e2Vaishali Kulkarni	qm_info->qm_pq_params[pq_idx].tc_id = tc;
44814b24e2Vaishali Kulkarni	qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
44914b24e2Vaishali Kulkarni	qm_info->qm_pq_params[pq_idx].rl_valid =
45014b24e2Vaishali Kulkarni		(pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
45114b24e2Vaishali Kulkarni
45214b24e2Vaishali Kulkarni	/* qm params accounting */
45314b24e2Vaishali Kulkarni	qm_info->num_pqs++;
45414b24e2Vaishali Kulkarni	if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
45514b24e2Vaishali Kulkarni		qm_info->num_vports++;
45614b24e2Vaishali Kulkarni
45714b24e2Vaishali Kulkarni	if (pq_init_flags & PQ_INIT_PF_RL)
45814b24e2Vaishali Kulkarni		qm_info->num_pf_rls++;
45914b24e2Vaishali Kulkarni
46014b24e2Vaishali Kulkarni	if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
46114b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", qm_info->num_vports, ecore_init_qm_get_num_vports(p_hwfn));
46214b24e2Vaishali Kulkarni
46314b24e2Vaishali Kulkarni	if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
46414b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", qm_info->num_pf_rls, ecore_init_qm_get_num_pf_rls(p_hwfn));
46514b24e2Vaishali Kulkarni}
46614b24e2Vaishali Kulkarni
46714b24e2Vaishali Kulkarni/* get pq index according to PQ_FLAGS */
46814b24e2Vaishali Kulkarnistatic u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
46914b24e2Vaishali Kulkarni					     u32 pq_flags)
47014b24e2Vaishali Kulkarni{
47114b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
47214b24e2Vaishali Kulkarni
47314b24e2Vaishali Kulkarni	/* Can't have multiple flags set here */
47414b24e2Vaishali Kulkarni	if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
47514b24e2Vaishali Kulkarni		goto err;
47614b24e2Vaishali Kulkarni
47714b24e2Vaishali Kulkarni	switch (pq_flags) {
47814b24e2Vaishali Kulkarni	case PQ_FLAGS_RLS:
47914b24e2Vaishali Kulkarni		return &qm_info->first_rl_pq;
48014b24e2Vaishali Kulkarni	case PQ_FLAGS_MCOS:
48114b24e2Vaishali Kulkarni		return &qm_info->first_mcos_pq;
48214b24e2Vaishali Kulkarni	case PQ_FLAGS_LB:
48314b24e2Vaishali Kulkarni		return &qm_info->pure_lb_pq;
48414b24e2Vaishali Kulkarni	case PQ_FLAGS_OOO:
48514b24e2Vaishali Kulkarni		return &qm_info->ooo_pq;
48614b24e2Vaishali Kulkarni	case PQ_FLAGS_ACK:
48714b24e2Vaishali Kulkarni		return &qm_info->pure_ack_pq;
48814b24e2Vaishali Kulkarni	case PQ_FLAGS_OFLD:
48914b24e2Vaishali Kulkarni		return &qm_info->offload_pq;
49014b24e2Vaishali Kulkarni	case PQ_FLAGS_LLT:
49114b24e2Vaishali Kulkarni		return &qm_info->low_latency_pq;
49214b24e2Vaishali Kulkarni	case PQ_FLAGS_VFS:
49314b24e2Vaishali Kulkarni		return &qm_info->first_vf_pq;
49414b24e2Vaishali Kulkarni	default:
49514b24e2Vaishali Kulkarni		goto err;
49614b24e2Vaishali Kulkarni	}
49714b24e2Vaishali Kulkarni
49814b24e2Vaishali Kulkarnierr:
49914b24e2Vaishali Kulkarni	DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
50014b24e2Vaishali Kulkarni	return OSAL_NULL;
50114b24e2Vaishali Kulkarni}
50214b24e2Vaishali Kulkarni
50314b24e2Vaishali Kulkarni/* save pq index in qm info */
50414b24e2Vaishali Kulkarnistatic void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
50514b24e2Vaishali Kulkarni				  u32 pq_flags, u16 pq_val)
50614b24e2Vaishali Kulkarni{
50714b24e2Vaishali Kulkarni	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
50814b24e2Vaishali Kulkarni
50914b24e2Vaishali Kulkarni	*base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
51014b24e2Vaishali Kulkarni}
51114b24e2Vaishali Kulkarni
51214b24e2Vaishali Kulkarni/* get tx pq index, with the PQ TX base already set (ready for context init) */
51314b24e2Vaishali Kulkarniu16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
51414b24e2Vaishali Kulkarni{
51514b24e2Vaishali Kulkarni	u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
51614b24e2Vaishali Kulkarni
51714b24e2Vaishali Kulkarni	return *base_pq_idx + CM_TX_PQ_BASE;
51814b24e2Vaishali Kulkarni}
51914b24e2Vaishali Kulkarni
52014b24e2Vaishali Kulkarniu16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
52114b24e2Vaishali Kulkarni{
52214b24e2Vaishali Kulkarni	u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
52314b24e2Vaishali Kulkarni
52414b24e2Vaishali Kulkarni	if (tc > max_tc)
52514b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
52614b24e2Vaishali Kulkarni
52714b24e2Vaishali Kulkarni	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
52814b24e2Vaishali Kulkarni}
52914b24e2Vaishali Kulkarni
53014b24e2Vaishali Kulkarniu16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
53114b24e2Vaishali Kulkarni{
53214b24e2Vaishali Kulkarni	u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
53314b24e2Vaishali Kulkarni
53414b24e2Vaishali Kulkarni	if (vf > max_vf)
53514b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
53614b24e2Vaishali Kulkarni
53714b24e2Vaishali Kulkarni	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
53814b24e2Vaishali Kulkarni}
53914b24e2Vaishali Kulkarni
54014b24e2Vaishali Kulkarniu16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
54114b24e2Vaishali Kulkarni{
54214b24e2Vaishali Kulkarni	u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
54314b24e2Vaishali Kulkarni
54414b24e2Vaishali Kulkarni	if (rl > max_rl)
54514b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
54614b24e2Vaishali Kulkarni
54714b24e2Vaishali Kulkarni	return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
54814b24e2Vaishali Kulkarni}
54914b24e2Vaishali Kulkarni
55014b24e2Vaishali Kulkarni/* Functions for creating specific types of pqs */
55114b24e2Vaishali Kulkarnistatic void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
55214b24e2Vaishali Kulkarni{
55314b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
55414b24e2Vaishali Kulkarni
55514b24e2Vaishali Kulkarni	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
55614b24e2Vaishali Kulkarni		return;
55714b24e2Vaishali Kulkarni
55814b24e2Vaishali Kulkarni	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
55914b24e2Vaishali Kulkarni	ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
56014b24e2Vaishali Kulkarni}
56114b24e2Vaishali Kulkarni
56214b24e2Vaishali Kulkarnistatic void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
56314b24e2Vaishali Kulkarni{
56414b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
56514b24e2Vaishali Kulkarni
56614b24e2Vaishali Kulkarni	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
56714b24e2Vaishali Kulkarni		return;
56814b24e2Vaishali Kulkarni
56914b24e2Vaishali Kulkarni	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
57014b24e2Vaishali Kulkarni	ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
57114b24e2Vaishali Kulkarni}
57214b24e2Vaishali Kulkarni
57314b24e2Vaishali Kulkarnistatic void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
57414b24e2Vaishali Kulkarni{
57514b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
57614b24e2Vaishali Kulkarni
57714b24e2Vaishali Kulkarni	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
57814b24e2Vaishali Kulkarni		return;
57914b24e2Vaishali Kulkarni
58014b24e2Vaishali Kulkarni	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
58114b24e2Vaishali Kulkarni	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
58214b24e2Vaishali Kulkarni}
58314b24e2Vaishali Kulkarni
58414b24e2Vaishali Kulkarnistatic void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
58514b24e2Vaishali Kulkarni{
58614b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
58714b24e2Vaishali Kulkarni
58814b24e2Vaishali Kulkarni	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
58914b24e2Vaishali Kulkarni		return;
59014b24e2Vaishali Kulkarni
59114b24e2Vaishali Kulkarni	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
59214b24e2Vaishali Kulkarni	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
59314b24e2Vaishali Kulkarni}
59414b24e2Vaishali Kulkarni
59514b24e2Vaishali Kulkarnistatic void ecore_init_qm_low_latency_pq(struct ecore_hwfn *p_hwfn)
59614b24e2Vaishali Kulkarni{
59714b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
59814b24e2Vaishali Kulkarni
59914b24e2Vaishali Kulkarni	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
60014b24e2Vaishali Kulkarni		return;
60114b24e2Vaishali Kulkarni
60214b24e2Vaishali Kulkarni	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
60314b24e2Vaishali Kulkarni	ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
60414b24e2Vaishali Kulkarni}
60514b24e2Vaishali Kulkarni
60614b24e2Vaishali Kulkarnistatic void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
60714b24e2Vaishali Kulkarni{
60814b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
60914b24e2Vaishali Kulkarni	u8 tc_idx;
61014b24e2Vaishali Kulkarni
61114b24e2Vaishali Kulkarni	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
61214b24e2Vaishali Kulkarni		return;
61314b24e2Vaishali Kulkarni
61414b24e2Vaishali Kulkarni	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
61514b24e2Vaishali Kulkarni	for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
61614b24e2Vaishali Kulkarni		ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
61714b24e2Vaishali Kulkarni}
61814b24e2Vaishali Kulkarni
61914b24e2Vaishali Kulkarnistatic void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
62014b24e2Vaishali Kulkarni{
62114b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
62214b24e2Vaishali Kulkarni	u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
62314b24e2Vaishali Kulkarni
62414b24e2Vaishali Kulkarni	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
62514b24e2Vaishali Kulkarni		return;
62614b24e2Vaishali Kulkarni
62714b24e2Vaishali Kulkarni	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
62814b24e2Vaishali Kulkarni	qm_info->num_vf_pqs = num_vfs;
62914b24e2Vaishali Kulkarni	for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
63014b24e2Vaishali Kulkarni		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
63114b24e2Vaishali Kulkarni}
63214b24e2Vaishali Kulkarni
63314b24e2Vaishali Kulkarnistatic void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
63414b24e2Vaishali Kulkarni{
63514b24e2Vaishali Kulkarni	u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
63614b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
63714b24e2Vaishali Kulkarni
63814b24e2Vaishali Kulkarni	if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
63914b24e2Vaishali Kulkarni		return;
64014b24e2Vaishali Kulkarni
64114b24e2Vaishali Kulkarni	ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
64214b24e2Vaishali Kulkarni	for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
64314b24e2Vaishali Kulkarni		ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
64414b24e2Vaishali Kulkarni}
64514b24e2Vaishali Kulkarni
64614b24e2Vaishali Kulkarnistatic void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
64714b24e2Vaishali Kulkarni{
64814b24e2Vaishali Kulkarni	/* rate limited pqs, must come first (FW assumption) */
64914b24e2Vaishali Kulkarni	ecore_init_qm_rl_pqs(p_hwfn);
65014b24e2Vaishali Kulkarni
65114b24e2Vaishali Kulkarni	/* pqs for multi cos */
65214b24e2Vaishali Kulkarni	ecore_init_qm_mcos_pqs(p_hwfn);
65314b24e2Vaishali Kulkarni
65414b24e2Vaishali Kulkarni	/* pure loopback pq */
65514b24e2Vaishali Kulkarni	ecore_init_qm_lb_pq(p_hwfn);
65614b24e2Vaishali Kulkarni
65714b24e2Vaishali Kulkarni	/* out of order pq */
65814b24e2Vaishali Kulkarni	ecore_init_qm_ooo_pq(p_hwfn);
65914b24e2Vaishali Kulkarni
66014b24e2Vaishali Kulkarni	/* pure ack pq */
66114b24e2Vaishali Kulkarni	ecore_init_qm_pure_ack_pq(p_hwfn);
66214b24e2Vaishali Kulkarni
66314b24e2Vaishali Kulkarni	/* pq for offloaded protocol */
66414b24e2Vaishali Kulkarni	ecore_init_qm_offload_pq(p_hwfn);
66514b24e2Vaishali Kulkarni
66614b24e2Vaishali Kulkarni	/* low latency pq */
66714b24e2Vaishali Kulkarni	ecore_init_qm_low_latency_pq(p_hwfn);
66814b24e2Vaishali Kulkarni
66914b24e2Vaishali Kulkarni	/* done sharing vports */
67014b24e2Vaishali Kulkarni	ecore_init_qm_advance_vport(p_hwfn);
67114b24e2Vaishali Kulkarni
67214b24e2Vaishali Kulkarni	/* pqs for vfs */
67314b24e2Vaishali Kulkarni	ecore_init_qm_vf_pqs(p_hwfn);
67414b24e2Vaishali Kulkarni}
67514b24e2Vaishali Kulkarni
67614b24e2Vaishali Kulkarni/* compare values of getters against resources amounts */
67714b24e2Vaishali Kulkarnistatic enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
67814b24e2Vaishali Kulkarni{
67914b24e2Vaishali Kulkarni	if (ecore_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, ECORE_VPORT)) {
68014b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
68114b24e2Vaishali Kulkarni		return ECORE_INVAL;
68214b24e2Vaishali Kulkarni	}
68314b24e2Vaishali Kulkarni
68414b24e2Vaishali Kulkarni	if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
68514b24e2Vaishali Kulkarni		DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
68614b24e2Vaishali Kulkarni		return ECORE_INVAL;
68714b24e2Vaishali Kulkarni	}
68814b24e2Vaishali Kulkarni
68914b24e2Vaishali Kulkarni	return ECORE_SUCCESS;
69014b24e2Vaishali Kulkarni}
69114b24e2Vaishali Kulkarni
69214b24e2Vaishali Kulkarni/*
69314b24e2Vaishali Kulkarni * Function for verbose printing of the qm initialization results
69414b24e2Vaishali Kulkarni */
69514b24e2Vaishali Kulkarnistatic void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
69614b24e2Vaishali Kulkarni{
69714b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
69814b24e2Vaishali Kulkarni	struct init_qm_vport_params *vport;
69914b24e2Vaishali Kulkarni	struct init_qm_port_params *port;
70014b24e2Vaishali Kulkarni	struct init_qm_pq_params *pq;
70114b24e2Vaishali Kulkarni	int i, tc;
70214b24e2Vaishali Kulkarni
70314b24e2Vaishali Kulkarni	/* top level params */
70414b24e2Vaishali Kulkarni	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
70514b24e2Vaishali Kulkarni		   qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq, qm_info->offload_pq, qm_info->pure_ack_pq);
70614b24e2Vaishali Kulkarni	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
70714b24e2Vaishali Kulkarni		   qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs, qm_info->num_vf_pqs, qm_info->num_vports, qm_info->max_phys_tcs_per_port);
70814b24e2Vaishali Kulkarni	DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
70914b24e2Vaishali Kulkarni		   qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl, qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
71014b24e2Vaishali Kulkarni
71114b24e2Vaishali Kulkarni	/* port table */
71214b24e2Vaishali Kulkarni	for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
71314b24e2Vaishali Kulkarni		port = &(qm_info->qm_port_params[i]);
71414b24e2Vaishali Kulkarni		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
71514b24e2Vaishali Kulkarni			   i, port->active, port->active_phys_tcs, port->num_pbf_cmd_lines, port->num_btb_blocks, port->reserved);
71614b24e2Vaishali Kulkarni	}
71714b24e2Vaishali Kulkarni
71814b24e2Vaishali Kulkarni	/* vport table */
71914b24e2Vaishali Kulkarni	for (i = 0; i < qm_info->num_vports; i++) {
72014b24e2Vaishali Kulkarni		vport = &(qm_info->qm_vport_params[i]);
72114b24e2Vaishali Kulkarni		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
72214b24e2Vaishali Kulkarni			   qm_info->start_vport + i, vport->vport_rl, vport->vport_wfq);
72314b24e2Vaishali Kulkarni		for (tc = 0; tc < NUM_OF_TCS; tc++)
72414b24e2Vaishali Kulkarni			DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ", vport->first_tx_pq_id[tc]);
72514b24e2Vaishali Kulkarni		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
72614b24e2Vaishali Kulkarni	}
72714b24e2Vaishali Kulkarni
72814b24e2Vaishali Kulkarni	/* pq table */
72914b24e2Vaishali Kulkarni	for (i = 0; i < qm_info->num_pqs; i++) {
73014b24e2Vaishali Kulkarni		pq = &(qm_info->qm_pq_params[i]);
73114b24e2Vaishali Kulkarni		DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
73214b24e2Vaishali Kulkarni			   qm_info->start_pq + i, pq->vport_id, pq->tc_id, pq->wrr_group, pq->rl_valid);
73314b24e2Vaishali Kulkarni	}
73414b24e2Vaishali Kulkarni}
73514b24e2Vaishali Kulkarni
73614b24e2Vaishali Kulkarnistatic void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
73714b24e2Vaishali Kulkarni{
73814b24e2Vaishali Kulkarni	/* reset params required for init run */
73914b24e2Vaishali Kulkarni	ecore_init_qm_reset_params(p_hwfn);
74014b24e2Vaishali Kulkarni
74114b24e2Vaishali Kulkarni	/* init QM top level params */
74214b24e2Vaishali Kulkarni	ecore_init_qm_params(p_hwfn);
74314b24e2Vaishali Kulkarni
74414b24e2Vaishali Kulkarni	/* init QM port params */
74514b24e2Vaishali Kulkarni	ecore_init_qm_port_params(p_hwfn);
74614b24e2Vaishali Kulkarni
74714b24e2Vaishali Kulkarni	/* init QM vport params */
74814b24e2Vaishali Kulkarni	ecore_init_qm_vport_params(p_hwfn);
74914b24e2Vaishali Kulkarni
75014b24e2Vaishali Kulkarni	/* init QM physical queue params */
75114b24e2Vaishali Kulkarni	ecore_init_qm_pq_params(p_hwfn);
75214b24e2Vaishali Kulkarni
75314b24e2Vaishali Kulkarni	/* display all that init */
75414b24e2Vaishali Kulkarni	ecore_dp_init_qm_params(p_hwfn);
75514b24e2Vaishali Kulkarni}
75614b24e2Vaishali Kulkarni
75714b24e2Vaishali Kulkarni/* This function reconfigures the QM pf on the fly.
75814b24e2Vaishali Kulkarni * For this purpose we:
75914b24e2Vaishali Kulkarni * 1. reconfigure the QM database
76014b24e2Vaishali Kulkarni * 2. set new values to runtime array
76114b24e2Vaishali Kulkarni * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
76214b24e2Vaishali Kulkarni * 4. activate init tool in QM_PF stage
76314b24e2Vaishali Kulkarni * 5. send an sdm_qm_cmd through rbc interface to release the QM
76414b24e2Vaishali Kulkarni */
76514b24e2Vaishali Kulkarnienum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
76614b24e2Vaishali Kulkarni				     struct ecore_ptt *p_ptt)
76714b24e2Vaishali Kulkarni{
76814b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
76914b24e2Vaishali Kulkarni	bool b_rc;
77014b24e2Vaishali Kulkarni	enum _ecore_status_t rc;
77114b24e2Vaishali Kulkarni
77214b24e2Vaishali Kulkarni	/* initialize ecore's qm data structure */
77314b24e2Vaishali Kulkarni	ecore_init_qm_info(p_hwfn);
77414b24e2Vaishali Kulkarni
77514b24e2Vaishali Kulkarni	/* stop PF's qm queues */
77614b24e2Vaishali Kulkarni	OSAL_SPIN_LOCK(&qm_lock);
77714b24e2Vaishali Kulkarni	b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
77814b24e2Vaishali Kulkarni				      qm_info->start_pq, qm_info->num_pqs);
77914b24e2Vaishali Kulkarni	OSAL_SPIN_UNLOCK(&qm_lock);
78014b24e2Vaishali Kulkarni	if (!b_rc)
78114b24e2Vaishali Kulkarni		return ECORE_INVAL;
78214b24e2Vaishali Kulkarni
78314b24e2Vaishali Kulkarni	/* clear the QM_PF runtime phase leftovers from previous init */
78414b24e2Vaishali Kulkarni	ecore_init_clear_rt_data(p_hwfn);
78514b24e2Vaishali Kulkarni
78614b24e2Vaishali Kulkarni	/* prepare QM portion of runtime array */
78714b24e2Vaishali Kulkarni	ecore_qm_init_pf(p_hwfn);
78814b24e2Vaishali Kulkarni
78914b24e2Vaishali Kulkarni	/* activate init tool on runtime array */
79014b24e2Vaishali Kulkarni	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
79114b24e2Vaishali Kulkarni			    p_hwfn->hw_info.hw_mode);
79214b24e2Vaishali Kulkarni	if (rc != ECORE_SUCCESS)
79314b24e2Vaishali Kulkarni		return rc;
79414b24e2Vaishali Kulkarni
79514b24e2Vaishali Kulkarni	/* start PF's qm queues */
79614b24e2Vaishali Kulkarni	OSAL_SPIN_LOCK(&qm_lock);
79714b24e2Vaishali Kulkarni	b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
79814b24e2Vaishali Kulkarni				      qm_info->start_pq, qm_info->num_pqs);
79914b24e2Vaishali Kulkarni	OSAL_SPIN_UNLOCK(&qm_lock);
80014b24e2Vaishali Kulkarni	if (!b_rc)
80114b24e2Vaishali Kulkarni		return ECORE_INVAL;
80214b24e2Vaishali Kulkarni
80314b24e2Vaishali Kulkarni	return ECORE_SUCCESS;
80414b24e2Vaishali Kulkarni}
80514b24e2Vaishali Kulkarni
80614b24e2Vaishali Kulkarnistatic enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
80714b24e2Vaishali Kulkarni{
80814b24e2Vaishali Kulkarni	struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
80914b24e2Vaishali Kulkarni	enum _ecore_status_t rc;
81014b24e2Vaishali Kulkarni
81114b24e2Vaishali Kulkarni	rc = ecore_init_qm_sanity(p_hwfn);
81214b24e2Vaishali Kulkarni	if (rc != ECORE_SUCCESS)
81314b24e2Vaishali Kulkarni		goto alloc_err;
81414b24e2Vaishali Kulkarni
81514b24e2Vaishali Kulkarni	qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
81614b24e2Vaishali Kulkarni					    sizeof(struct init_qm_pq_params) *
81714b24e2Vaishali Kulkarni					    ecore_init_qm_get_num_pqs(p_hwfn));
81814b24e2Vaishali Kulkarni	if (!qm_info->qm_pq_params)
81914b24e2Vaishali Kulkarni		goto alloc_err;
82014b24e2Vaishali Kulkarni
82114b24e2Vaishali Kulkarni	qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
82214b24e2Vaishali Kulkarni					       sizeof(struct init_qm_vport_params) *
82314b24e2Vaishali Kulkarni					       ecore_init_qm_get_num_vports(p_hwfn));
82414b24e2Vaishali Kulkarni	if (!qm_info->qm_vport_params)
82514b24e2Vaishali Kulkarni		goto alloc_err;
82614b24e2Vaishali Kulkarni
82714b24e2Vaishali Kulkarni	qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
82814b24e2Vaishali Kulkarni					      sizeof(struct init_qm_port_params) *
82914b24e2Vaishali Kulkarni					      p_hwfn->p_dev->num_ports_in_engine);
83014b24e2Vaishali Kulkarni	if (!qm_info->qm_port_params)
83114b24e2Vaishali Kulkarni		goto alloc_err;
83214b24e2Vaishali Kulkarni
83314b24e2Vaishali Kulkarni	qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
83414b24e2Vaishali Kulkarni					sizeof(struct ecore_wfq_data) *
83514b24e2Vaishali Kulkarni					ecore_init_qm_get_num_vports(p_hwfn));
83614b24e2Vaishali Kulkarni	if (!qm_info->wfq_data)
83714b24e2Vaishali Kulkarni		goto alloc_err;
83814b24e2Vaishali Kulkarni
83914b24e2Vaishali Kulkarni	return ECORE_SUCCESS;
84014b24e2Vaishali Kulkarni
84114b24e2Vaishali Kulkarnialloc_err:
84214b24e2Vaishali Kulkarni	DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
84314b24e2Vaishali Kulkarni	ecore_qm_info_free(p_hwfn);
84414b24e2Vaishali Kulkarni	return ECORE_NOMEM;
84514b24e2Vaishali Kulkarni}
84614b24e2Vaishali Kulkarni/******************** End QM initialization ***************/
84714b24e2Vaishali Kulkarni
84814b24e2Vaishali Kulkarnienum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
84914b24e2Vaishali Kulkarni{
85014b24e2Vaishali Kulkarni	enum _ecore_status_t rc = ECORE_SUCCESS;
85114b24e2Vaishali Kulkarni	u32 rdma_tasks, excess_tasks;
85214b24e2Vaishali Kulkarni	u32 line_count;
85314b24e2Vaishali Kulkarni	int i;
85414b24e2Vaishali Kulkarni
85514b24e2Vaishali Kulkarni	if (IS_VF(p_dev)) {
85614b24e2Vaishali Kulkarni		for_each_hwfn(p_dev, i) {
85714b24e2Vaishali Kulkarni			rc = ecore_l2_alloc(&p_dev->hwfns[i]);
85814b24e2Vaishali Kulkarni			if (rc != ECORE_SUCCESS)
85914b24e2Vaishali Kulkarni				return rc;
86014b24e2Vaishali Kulkarni		}
86114b24e2Vaishali Kulkarni		return rc;
86214b24e2Vaishali Kulkarni	}
86314b24e2Vaishali Kulkarni
86414b24e2Vaishali Kulkarni	p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
86514b24e2Vaishali Kulkarni				     sizeof(*p_dev->fw_data));
86614b24e2Vaishali Kulkarni	if (!p_dev->fw_data)
86714b24e2Vaishali Kulkarni		return ECORE_NOMEM;
86814b24e2Vaishali Kulkarni
86914b24e2Vaishali Kulkarni	for_each_hwfn(p_dev, i) {
87014b24e2Vaishali Kulkarni		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
87114b24e2Vaishali Kulkarni		u32 n_eqes, num_cons;
87214b24e2Vaishali Kulkarni
87314b24e2Vaishali Kulkarni		/* First allocate the context manager structure */
87414b24e2Vaishali Kulkarni		rc = ecore_cxt_mngr_alloc(p_hwfn);
87514b24e2Vaishali Kulkarni		if (rc)
87614b24e2Vaishali Kulkarni			goto alloc_err;
87714b24e2Vaishali Kulkarni
87814b24e2Vaishali Kulkarni		/* Set the HW cid/tid numbers (in the contest manager)
87914b24e2Vaishali Kulkarni		 * Must be done prior to any further computations.
88014b24e2Vaishali Kulkarni		 */
88114b24e2Vaishali Kulkarni		rc = ecore_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
88214b24e2Vaishali Kulkarni		if (rc)
88314b24e2Vaishali Kulkarni			goto alloc_err;
88414b24e2Vaishali Kulkarni
88514b24e2Vaishali Kulkarni		rc = ecore_alloc_qm_data(p_hwfn);
88614b24e2Vaishali Kulkarni		if (rc)
88714b24e2Vaishali Kulkarni			goto alloc_err;
88814b24e2Vaishali Kulkarni
88914b24e2Vaishali Kulkarni		/* init qm info */
89014b24e2Vaishali Kulkarni		ecore_init_qm_info(p_hwfn);
89114b24e2Vaishali Kulkarni
89214b24e2Vaishali Kulkarni		/* Compute the ILT client partition */
89314b24e2Vaishali Kulkarni		rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
89414b24e2Vaishali Kulkarni		if (rc) {
89514b24e2Vaishali Kulkarni			DP_NOTICE(p_hwfn, false, "too many ILT lines; re-computing with less lines\n");
89614b24e2Vaishali Kulkarni			/* In case there are not enough ILT lines we reduce the
89714b24e2Vaishali Kulkarni			 * number of RDMA tasks and re-compute.
89814b24e2Vaishali Kulkarni			 */
89914b24e2Vaishali Kulkarni			excess_tasks = ecore_cxt_cfg_ilt_compute_excess(
90014b24e2Vaishali Kulkarni					p_hwfn, line_count);
90114b24e2Vaishali Kulkarni			if (!excess_tasks)
90214b24e2Vaishali Kulkarni				goto alloc_err;
90314b24e2Vaishali Kulkarni
90414b24e2Vaishali Kulkarni			rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
90514b24e2Vaishali Kulkarni			rc = ecore_cxt_set_pf_params(p_hwfn, rdma_tasks);
90614b24e2Vaishali Kulkarni			if (rc)
90714b24e2Vaishali Kulkarni				goto alloc_err;
90814b24e2Vaishali Kulkarni
90914b24e2Vaishali Kulkarni			rc = ecore_cxt_cfg_ilt_compute(p_hwfn, &line_count);
91014b24e2Vaishali Kulkarni			if (rc) {
91114b24e2Vaishali Kulkarni				DP_ERR(p_hwfn, "failed ILT compute. Requested too many lines: %u\n",
91214b24e2Vaishali Kulkarni				       line_count);
91314b24e2Vaishali Kulkarni
91414b24e2Vaishali Kulkarni				goto alloc_err;
91514b24e2Vaishali Kulkarni			}
91614b24e2Vaishali Kulkarni		}
91714b24e2Vaishali Kulkarni
91814b24e2Vaishali Kulkarni		/* CID map / ILT shadow table / T2
91914b24e2Vaishali Kulkarni		 * The talbes sizes are determined by the computations above
92014b24e2Vaishali Kulkarni		 */
92114b24e2Vaishali Kulkarni		rc = ecore_cxt_tables_alloc(p_hwfn);
92214b24e2Vaishali Kulkarni		if (rc)
92314b24e2Vaishali Kulkarni			goto alloc_err;
92414b24e2Vaishali Kulkarni
92514b24e2Vaishali Kulkarni		/* SPQ, must follow ILT because initializes SPQ context */
92614b24e2Vaishali Kulkarni		rc = ecore_spq_alloc(p_hwfn);
92714b24e2Vaishali Kulkarni		if (rc)
92814b24e2Vaishali Kulkarni			goto alloc_err;
92914b24e2Vaishali Kulkarni
93014b24e2Vaishali Kulkarni		/* SP status block allocation */
93114b24e2Vaishali Kulkarni		p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
93214b24e2Vaishali Kulkarni							   RESERVED_PTT_DPC);
93314b24e2Vaishali Kulkarni
93414b24e2Vaishali Kulkarni		rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
93514b24e2Vaishali Kulkarni		if (rc)
93614b24e2Vaishali Kulkarni			goto alloc_err;
93714b24e2Vaishali Kulkarni
93814b24e2Vaishali Kulkarni		rc = ecore_iov_alloc(p_hwfn);
93914b24e2Vaishali Kulkarni		if (rc)
94014b24e2Vaishali Kulkarni			goto alloc_err;
94114b24e2Vaishali Kulkarni
94214b24e2Vaishali Kulkarni		/* EQ */
94314b24e2Vaishali Kulkarni		n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
94414b24e2Vaishali Kulkarni		if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
94514b24e2Vaishali Kulkarni			/* Calculate the EQ size
94614b24e2Vaishali Kulkarni			 * ---------------------
94714b24e2Vaishali Kulkarni			 * Each ICID may generate up to one event at a time i.e.
94814b24e2Vaishali Kulkarni			 * the event must be handled/cleared before a new one
94914b24e2Vaishali Kulkarni			 * can be generated. We calculate the sum of events per
95014b24e2Vaishali Kulkarni			 * protocol and create an EQ deep enough to handle the
95114b24e2Vaishali Kulkarni			 * worst case:
95214b24e2Vaishali Kulkarni			 * - Core - according to SPQ.
95314b24e2Vaishali Kulkarni			 * - RoCE - per QP there are a couple of ICIDs, one
95414b24e2Vaishali Kulkarni			 *	  responder and one requester, each can
95514b24e2Vaishali Kulkarni			 *	  generate an EQE => n_eqes_qp = 2 * n_qp.
95614b24e2Vaishali Kulkarni			 *	  Each CQ can generate an EQE. There are 2 CQs
95714b24e2Vaishali Kulkarni			 *	  per QP => n_eqes_cq = 2 * n_qp.
95814b24e2Vaishali Kulkarni			 *	  Hence the RoCE total is 4 * n_qp or
95914b24e2Vaishali Kulkarni			 *	  2 * num_cons.
960