1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 /*
37 * Copyright 2018 Joyent, Inc.
38 */
39
40 #include "bcm_osal.h"
41 #include "ecore.h"
42 #include "ecore_status.h"
43 #include "nvm_map.h"
44 #include "nvm_cfg.h"
45 #include "ecore_mcp.h"
46 #include "mcp_public.h"
47 #include "reg_addr.h"
48 #include "ecore_hw.h"
49 #include "ecore_init_fw_funcs.h"
50 #include "ecore_sriov.h"
51 #include "ecore_vf.h"
52 #include "ecore_iov_api.h"
53 #include "ecore_gtt_reg_addr.h"
54 #include "ecore_iro.h"
55 #include "ecore_dcbx.h"
56 #include "ecore_sp_commands.h"
57
58 #define CHIP_MCP_RESP_ITER_US 10
59 #define EMUL_MCP_RESP_ITER_US 1000 * 1000
60
61 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
62 #define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
63
64 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
65 ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
66 _val)
67
68 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
69 ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
70
71 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
72 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
73 offsetof(struct public_drv_mb, _field), _val)
74
75 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
76 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
77 offsetof(struct public_drv_mb, _field))
78
79 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
80 DRV_ID_PDA_COMP_VER_SHIFT)
81
82 #define MCP_BYTES_PER_MBIT_SHIFT 17
83
84 #ifndef ASIC_ONLY
85 static int loaded;
86 static int loaded_port[MAX_NUM_PORTS] = { 0 };
87 #endif
88
ecore_mcp_is_init(struct ecore_hwfn * p_hwfn)89 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
90 {
91 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
92 return false;
93 return true;
94 }
95
ecore_mcp_cmd_port_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)96 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
97 struct ecore_ptt *p_ptt)
98 {
99 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
100 PUBLIC_PORT);
101 u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
102
103 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
104 MFW_PORT(p_hwfn));
105 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
106 "port_addr = 0x%x, port_id 0x%02x\n",
107 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
108 }
109
ecore_mcp_read_mb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)110 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
111 struct ecore_ptt *p_ptt)
112 {
113 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
114 OSAL_BE32 tmp;
115 u32 i;
116
117 #ifndef ASIC_ONLY
118 if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
119 return;
120 #endif
121
122 if (!p_hwfn->mcp_info->public_base)
123 return;
124
125 for (i = 0; i < length; i++) {
126 tmp = ecore_rd(p_hwfn, p_ptt,
127 p_hwfn->mcp_info->mfw_mb_addr +
128 (i << 2) + sizeof(u32));
129
130 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
131 OSAL_BE32_TO_CPU(tmp);
132 }
133 }
134
ecore_mcp_free(struct ecore_hwfn * p_hwfn)135 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
136 {
137 if (p_hwfn->mcp_info) {
138 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
139 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
140 #ifdef CONFIG_ECORE_LOCK_ALLOC
141 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
142 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
143 #endif
144 }
145 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
146 p_hwfn->mcp_info = OSAL_NULL;
147
148 return ECORE_SUCCESS;
149 }
150
ecore_load_mcp_offsets(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)151 enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
152 struct ecore_ptt *p_ptt)
153 {
154 struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
155 u32 drv_mb_offsize, mfw_mb_offsize;
156 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
157
158 #ifndef ASIC_ONLY
159 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
160 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
161 p_info->public_base = 0;
162 return ECORE_INVAL;
163 }
164 #endif
165
166 p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
167 if (!p_info->public_base)
168 return ECORE_INVAL;
169
170 p_info->public_base |= GRCBASE_MCP;
171
172 /* Calculate the driver and MFW mailbox address */
173 drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
174 SECTION_OFFSIZE_ADDR(p_info->public_base,
175 PUBLIC_DRV_MB));
176 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
177 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
178 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
179 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
180
181 /* Set the MFW MB address */
182 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
183 SECTION_OFFSIZE_ADDR(p_info->public_base,
184 PUBLIC_MFW_MB));
185 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
186 p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
187 p_info->mfw_mb_addr);
188
189 /* Get the current driver mailbox sequence before sending
190 * the first command
191 */
192 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
193 DRV_MSG_SEQ_NUMBER_MASK;
194
195 /* Get current FW pulse sequence */
196 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
197 DRV_PULSE_SEQ_MASK;
198
199 p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
200 MISCS_REG_GENERIC_POR_0);
201
202 return ECORE_SUCCESS;
203 }
204
ecore_mcp_cmd_init(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)205 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
206 struct ecore_ptt *p_ptt)
207 {
208 struct ecore_mcp_info *p_info;
209 u32 size;
210
211 /* Allocate mcp_info structure */
212 p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
213 sizeof(*p_hwfn->mcp_info));
214 if (!p_hwfn->mcp_info)
215 goto err;
216 p_info = p_hwfn->mcp_info;
217
218 if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
219 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
220 /* Do not free mcp_info here, since public_base indicate that
221 * the MCP is not initialized
222 */
223 return ECORE_SUCCESS;
224 }
225
226 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
227 p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
228 p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
229 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
230 goto err;
231
232 /* Initialize the MFW spinlock */
233 #ifdef CONFIG_ECORE_LOCK_ALLOC
234 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
235 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
236 #endif
237 OSAL_SPIN_LOCK_INIT(&p_info->lock);
238 OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
239
240 return ECORE_SUCCESS;
241
242 err:
243 DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
244 ecore_mcp_free(p_hwfn);
245 return ECORE_NOMEM;
246
247 }
248
249 /* Locks the MFW mailbox of a PF to ensure a single access.
250 * The lock is achieved in most cases by holding a spinlock, causing other
251 * threads to wait till a previous access is done.
252 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
253 * access is achieved by setting a blocking flag, which will fail other
254 * competing contexts to send their mailboxes.
255 */
ecore_mcp_mb_lock(struct ecore_hwfn * p_hwfn,u32 cmd)256 static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
257 u32 cmd)
258 {
259 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
260
261 /* The spinlock shouldn't be acquired when the mailbox command is
262 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
263 * pending [UN]LOAD_REQ command of another PF together with a spinlock
264 * (i.e. interrupts are disabled) - can lead to a deadlock.
265 * It is assumed that for a single PF, no other mailbox commands can be
266 * sent from another context while sending LOAD_REQ, and that any
267 * parallel commands to UNLOAD_REQ can be cancelled.
268 */
269 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
270 p_hwfn->mcp_info->block_mb_sending = false;
271
272 /* There's at least a single command that is sent by ecore during the
273 * load sequence [expectation of MFW].
274 */
275 if ((p_hwfn->mcp_info->block_mb_sending) &&
276 (cmd != DRV_MSG_CODE_FEATURE_SUPPORT)) {
277 DP_NOTICE(p_hwfn, false,
278 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
279 cmd);
280 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
281 return ECORE_BUSY;
282 }
283
284 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
285 p_hwfn->mcp_info->block_mb_sending = true;
286 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
287 }
288
289 return ECORE_SUCCESS;
290 }
291
ecore_mcp_mb_unlock(struct ecore_hwfn * p_hwfn,u32 cmd)292 static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
293 {
294 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
295 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
296 }
297
ecore_mcp_reset(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)298 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
299 struct ecore_ptt *p_ptt)
300 {
301 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
302 u32 delay = CHIP_MCP_RESP_ITER_US;
303 u32 org_mcp_reset_seq, cnt = 0;
304 enum _ecore_status_t rc = ECORE_SUCCESS;
305
306 #ifndef ASIC_ONLY
307 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
308 delay = EMUL_MCP_RESP_ITER_US;
309 #endif
310
311 /* Ensure that only a single thread is accessing the mailbox at a
312 * certain time.
313 */
314 rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
315 if (rc != ECORE_SUCCESS)
316 return rc;
317
318 /* Set drv command along with the updated sequence */
319 org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
320 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
321
322 do {
323 /* Wait for MFW response */
324 OSAL_UDELAY(delay);
325 /* Give the FW up to 500 second (50*1000*10usec) */
326 } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
327 MISCS_REG_GENERIC_POR_0)) &&
328 (cnt++ < ECORE_MCP_RESET_RETRIES));
329
330 if (org_mcp_reset_seq !=
331 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
332 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
333 "MCP was reset after %d usec\n", cnt * delay);
334 } else {
335 DP_ERR(p_hwfn, "Failed to reset MCP\n");
336 rc = ECORE_AGAIN;
337 }
338
339 ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
340
341 return rc;
342 }
343
ecore_mcp_print_cpu_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)344 void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
345 struct ecore_ptt *p_ptt)
346 {
347 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
348
349 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
350 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
351 cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
352 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
353 cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
354 OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
355 cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
356
357 DP_NOTICE(p_hwfn, false,
358 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
359 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
360 }
361
ecore_do_mcp_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)362 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
363 struct ecore_ptt *p_ptt,
364 u32 cmd, u32 param,
365 u32 *o_mcp_resp, u32 *o_mcp_param)
366 {
367 u32 delay = CHIP_MCP_RESP_ITER_US;
368 u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
369 u32 seq, cnt = 1, actual_mb_seq __unused;
370 enum _ecore_status_t rc = ECORE_SUCCESS;
371
372 #ifndef ASIC_ONLY
373 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
374 delay = EMUL_MCP_RESP_ITER_US;
375 /* There is a built-in delay of 100usec in each MFW response read */
376 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
377 max_retries /= 10;
378 #endif
379
380 /* Get actual driver mailbox sequence */
381 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
382 DRV_MSG_SEQ_NUMBER_MASK;
383
384 /* Use MCP history register to check if MCP reset occurred between
385 * init time and now.
386 */
387 if (p_hwfn->mcp_info->mcp_hist !=
388 ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
389 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
390 ecore_load_mcp_offsets(p_hwfn, p_ptt);
391 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
392 }
393 seq = ++p_hwfn->mcp_info->drv_mb_seq;
394
395 /* Set drv param */
396 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
397
398 /* Set drv command along with the updated sequence */
399 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
400
401 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
402 "wrote command (%x) to MFW MB param 0x%08x\n",
403 (cmd | seq), param);
404
405 do {
406 /* Wait for MFW response */
407 OSAL_UDELAY(delay);
408 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
409
410 /* Give the FW up to 5 second (500*10ms) */
411 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
412 (cnt++ < max_retries));
413
414 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
415 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
416 cnt * delay, *o_mcp_resp, seq);
417
418 /* Is this a reply to our command? */
419 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
420 *o_mcp_resp &= FW_MSG_CODE_MASK;
421 /* Get the MCP param */
422 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
423 } else {
424 /* FW BUG! */
425 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
426 cmd, param);
427 ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
428 *o_mcp_resp = 0;
429 rc = ECORE_AGAIN;
430 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
431 }
432 return rc;
433 }
434
ecore_mcp_cmd_and_union(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_mb_params * p_mb_params)435 static enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
436 struct ecore_ptt *p_ptt,
437 struct ecore_mcp_mb_params *p_mb_params)
438 {
439 union drv_union_data union_data;
440 u32 union_data_addr;
441 enum _ecore_status_t rc;
442
443 /* MCP not initialized */
444 if (!ecore_mcp_is_init(p_hwfn)) {
445 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
446 return ECORE_BUSY;
447 }
448
449 if (p_mb_params->data_src_size > sizeof(union_data) ||
450 p_mb_params->data_dst_size > sizeof(union_data)) {
451 DP_ERR(p_hwfn,
452 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
453 p_mb_params->data_src_size, p_mb_params->data_dst_size,
454 sizeof(union_data));
455 return ECORE_INVAL;
456 }
457
458 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
459 offsetof(struct public_drv_mb, union_data);
460
461 /* Ensure that only a single thread is accessing the mailbox at a
462 * certain time.
463 */
464 rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
465 if (rc != ECORE_SUCCESS)
466 return rc;
467
468 OSAL_MEM_ZERO(&union_data, sizeof(union_data));
469 if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
470 OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
471 p_mb_params->data_src_size);
472 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
473 sizeof(union_data));
474
475 rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
476 p_mb_params->param, &p_mb_params->mcp_resp,
477 &p_mb_params->mcp_param);
478
479 if (p_mb_params->p_data_dst != OSAL_NULL &&
480 p_mb_params->data_dst_size)
481 ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
482 union_data_addr, p_mb_params->data_dst_size);
483
484 ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
485
486 return rc;
487 }
488
ecore_mcp_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)489 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
490 struct ecore_ptt *p_ptt, u32 cmd, u32 param,
491 u32 *o_mcp_resp, u32 *o_mcp_param)
492 {
493 struct ecore_mcp_mb_params mb_params;
494 enum _ecore_status_t rc;
495
496 #ifndef ASIC_ONLY
497 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
498 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
499 loaded--;
500 loaded_port[p_hwfn->port_id]--;
501 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
502 loaded);
503 }
504 return ECORE_SUCCESS;
505 }
506 #endif
507
508 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
509 mb_params.cmd = cmd;
510 mb_params.param = param;
511 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
512 if (rc != ECORE_SUCCESS)
513 return rc;
514
515 *o_mcp_resp = mb_params.mcp_resp;
516 *o_mcp_param = mb_params.mcp_param;
517
518 return ECORE_SUCCESS;
519 }
520
ecore_mcp_nvm_wr_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 i_txn_size,u32 * i_buf)521 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
522 struct ecore_ptt *p_ptt,
523 u32 cmd,
524 u32 param,
525 u32 *o_mcp_resp,
526 u32 *o_mcp_param,
527 u32 i_txn_size,
528 u32 *i_buf)
529 {
530 struct ecore_mcp_mb_params mb_params;
531 enum _ecore_status_t rc;
532
533 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
534 mb_params.cmd = cmd;
535 mb_params.param = param;
536 mb_params.p_data_src = i_buf;
537 mb_params.data_src_size = (u8) i_txn_size;
538 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
539 if (rc != ECORE_SUCCESS)
540 return rc;
541
542 *o_mcp_resp = mb_params.mcp_resp;
543 *o_mcp_param = mb_params.mcp_param;
544
545 return ECORE_SUCCESS;
546 }
547
ecore_mcp_nvm_rd_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 * o_txn_size,u32 * o_buf)548 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
549 struct ecore_ptt *p_ptt,
550 u32 cmd,
551 u32 param,
552 u32 *o_mcp_resp,
553 u32 *o_mcp_param,
554 u32 *o_txn_size,
555 u32 *o_buf)
556 {
557 struct ecore_mcp_mb_params mb_params;
558 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
559 enum _ecore_status_t rc;
560
561 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
562 mb_params.cmd = cmd;
563 mb_params.param = param;
564 mb_params.p_data_dst = raw_data;
565
566 /* Use the maximal value since the actual one is part of the response */
567 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
568
569 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
570 if (rc != ECORE_SUCCESS)
571 return rc;
572
573 *o_mcp_resp = mb_params.mcp_resp;
574 *o_mcp_param = mb_params.mcp_param;
575
576 *o_txn_size = *o_mcp_param;
577 OSAL_MEMCPY(o_buf, raw_data, *o_txn_size);
578
579 return ECORE_SUCCESS;
580 }
581
582 #ifndef ASIC_ONLY
ecore_mcp_mf_workaround(struct ecore_hwfn * p_hwfn,u32 * p_load_code)583 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
584 u32 *p_load_code)
585 {
586 static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
587
588 if (!loaded) {
589 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
590 } else if (!loaded_port[p_hwfn->port_id]) {
591 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
592 } else {
593 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
594 }
595
596 /* On CMT, always tell that it's engine */
597 if (p_hwfn->p_dev->num_hwfns > 1)
598 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
599
600 *p_load_code = load_phase;
601 loaded++;
602 loaded_port[p_hwfn->port_id]++;
603
604 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
605 "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
606 *p_load_code, loaded, p_hwfn->port_id,
607 loaded_port[p_hwfn->port_id]);
608 }
609 #endif
610
611 static bool
ecore_mcp_can_force_load(u8 drv_role,u8 exist_drv_role,enum ecore_override_force_load override_force_load)612 ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
613 enum ecore_override_force_load override_force_load)
614 {
615 bool can_force_load = false;
616
617 switch (override_force_load) {
618 case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
619 can_force_load = true;
620 break;
621 case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
622 can_force_load = false;
623 break;
624 default:
625 can_force_load = (drv_role == DRV_ROLE_OS &&
626 exist_drv_role == DRV_ROLE_PREBOOT) ||
627 (drv_role == DRV_ROLE_KDUMP &&
628 exist_drv_role == DRV_ROLE_OS);
629 break;
630 }
631
632 return can_force_load;
633 }
634
ecore_mcp_cancel_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)635 static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
636 struct ecore_ptt *p_ptt)
637 {
638 u32 resp = 0, param = 0;
639 enum _ecore_status_t rc;
640
641 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
642 &resp, ¶m);
643 if (rc != ECORE_SUCCESS)
644 DP_NOTICE(p_hwfn, false,
645 "Failed to send cancel load request, rc = %d\n", rc);
646
647 return rc;
648 }
649
650 #define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
651 #define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
652 #define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
653 #define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
654 #define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
655 #define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
656 #define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
657
ecore_get_config_bitmap(void)658 static u32 ecore_get_config_bitmap(void)
659 {
660 u32 config_bitmap = 0x0;
661
662 #ifdef CONFIG_ECORE_L2
663 config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
664 #endif
665 #ifdef CONFIG_ECORE_SRIOV
666 config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
667 #endif
668 #ifdef CONFIG_ECORE_ROCE
669 config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
670 #endif
671 #ifdef CONFIG_ECORE_IWARP
672 config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
673 #endif
674 #ifdef CONFIG_ECORE_FCOE
675 config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
676 #endif
677 #ifdef CONFIG_ECORE_ISCSI
678 config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
679 #endif
680 #ifdef CONFIG_ECORE_LL2
681 config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
682 #endif
683
684 return config_bitmap;
685 }
686
687 struct ecore_load_req_in_params {
688 u8 hsi_ver;
689 #define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
690 #define ECORE_LOAD_REQ_HSI_VER_1 1
691 u32 drv_ver_0;
692 u32 drv_ver_1;
693 u32 fw_ver;
694 u8 drv_role;
695 u8 timeout_val;
696 u8 force_cmd;
697 bool avoid_eng_reset;
698 };
699
700 struct ecore_load_req_out_params {
701 u32 load_code;
702 u32 exist_drv_ver_0;
703 u32 exist_drv_ver_1;
704 u32 exist_fw_ver;
705 u8 exist_drv_role;
706 u8 mfw_hsi_ver;
707 bool drv_exists;
708 };
709
710 static enum _ecore_status_t
__ecore_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_load_req_in_params * p_in_params,struct ecore_load_req_out_params * p_out_params)711 __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
712 struct ecore_load_req_in_params *p_in_params,
713 struct ecore_load_req_out_params *p_out_params)
714 {
715 struct ecore_mcp_mb_params mb_params;
716 struct load_req_stc load_req;
717 struct load_rsp_stc load_rsp;
718 u32 hsi_ver;
719 enum _ecore_status_t rc;
720
721 OSAL_MEM_ZERO(&load_req, sizeof(load_req));
722 load_req.drv_ver_0 = p_in_params->drv_ver_0;
723 load_req.drv_ver_1 = p_in_params->drv_ver_1;
724 load_req.fw_ver = p_in_params->fw_ver;
725 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
726 p_in_params->drv_role);
727 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
728 p_in_params->timeout_val);
729 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
730 p_in_params->force_cmd);
731 ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
732 p_in_params->avoid_eng_reset);
733
734 hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
735 DRV_ID_MCP_HSI_VER_CURRENT :
736 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
737
738 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
739 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
740 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
741 mb_params.p_data_src = &load_req;
742 mb_params.data_src_size = sizeof(load_req);
743 mb_params.p_data_dst = &load_rsp;
744 mb_params.data_dst_size = sizeof(load_rsp);
745
746 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
747 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
748 mb_params.param,
749 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
750 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
751 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
752 ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
753
754 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
755 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
756 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
757 load_req.drv_ver_0, load_req.drv_ver_1,
758 load_req.fw_ver, load_req.misc0,
759 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
760 ECORE_MFW_GET_FIELD(load_req.misc0,
761 LOAD_REQ_LOCK_TO),
762 ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
763 ECORE_MFW_GET_FIELD(load_req.misc0,
764 LOAD_REQ_FLAGS0));
765
766 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
767 if (rc != ECORE_SUCCESS) {
768 DP_NOTICE(p_hwfn, false,
769 "Failed to send load request, rc = %d\n", rc);
770 return rc;
771 }
772
773 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
774 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
775 p_out_params->load_code = mb_params.mcp_resp;
776
777 if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
778 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
779 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
780 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
781 load_rsp.drv_ver_0, load_rsp.drv_ver_1,
782 load_rsp.fw_ver, load_rsp.misc0,
783 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
784 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
785 ECORE_MFW_GET_FIELD(load_rsp.misc0,
786 LOAD_RSP_FLAGS0));
787
788 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
789 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
790 p_out_params->exist_fw_ver = load_rsp.fw_ver;
791 p_out_params->exist_drv_role =
792 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
793 p_out_params->mfw_hsi_ver =
794 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
795 p_out_params->drv_exists =
796 ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
797 LOAD_RSP_FLAGS0_DRV_EXISTS;
798 }
799
800 return ECORE_SUCCESS;
801 }
802
eocre_get_mfw_drv_role(struct ecore_hwfn * p_hwfn,enum ecore_drv_role drv_role,u8 * p_mfw_drv_role)803 static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
804 enum ecore_drv_role drv_role,
805 u8 *p_mfw_drv_role)
806 {
807 switch (drv_role)
808 {
809 case ECORE_DRV_ROLE_OS:
810 *p_mfw_drv_role = DRV_ROLE_OS;
811 break;
812 case ECORE_DRV_ROLE_KDUMP:
813 *p_mfw_drv_role = DRV_ROLE_KDUMP;
814 break;
815 default:
816 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
817 return ECORE_INVAL;
818 }
819
820 return ECORE_SUCCESS;
821 }
822
823 enum ecore_load_req_force {
824 ECORE_LOAD_REQ_FORCE_NONE,
825 ECORE_LOAD_REQ_FORCE_PF,
826 ECORE_LOAD_REQ_FORCE_ALL,
827 };
828
829 static enum _ecore_status_t
ecore_get_mfw_force_cmd(struct ecore_hwfn * p_hwfn,enum ecore_load_req_force force_cmd,u8 * p_mfw_force_cmd)830 ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
831 enum ecore_load_req_force force_cmd,
832 u8 *p_mfw_force_cmd)
833 {
834 switch (force_cmd) {
835 case ECORE_LOAD_REQ_FORCE_NONE:
836 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
837 break;
838 case ECORE_LOAD_REQ_FORCE_PF:
839 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
840 break;
841 case ECORE_LOAD_REQ_FORCE_ALL:
842 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
843 break;
844 default:
845 DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
846 return ECORE_INVAL;
847 }
848
849 return ECORE_SUCCESS;
850 }
851
ecore_mcp_load_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_load_req_params * p_params)852 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
853 struct ecore_ptt *p_ptt,
854 struct ecore_load_req_params *p_params)
855 {
856 struct ecore_load_req_out_params out_params;
857 struct ecore_load_req_in_params in_params;
858 u8 mfw_drv_role, mfw_force_cmd;
859 enum _ecore_status_t rc;
860
861 #ifndef ASIC_ONLY
862 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
863 ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
864 return ECORE_SUCCESS;
865 }
866 #endif
867
868 OSAL_MEM_ZERO(&in_params, sizeof(in_params));
869 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
870 in_params.drv_ver_0 = ECORE_VERSION;
871 in_params.drv_ver_1 = ecore_get_config_bitmap();
872 in_params.fw_ver = STORM_FW_VERSION;
873 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
874 if (rc != ECORE_SUCCESS)
875 return rc;
876
877 in_params.drv_role = mfw_drv_role;
878 in_params.timeout_val = p_params->timeout_val;
879 rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
880 &mfw_force_cmd);
881 if (rc != ECORE_SUCCESS)
882 return rc;
883
884 in_params.force_cmd = mfw_force_cmd;
885 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
886
887 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
888 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
889 if (rc != ECORE_SUCCESS)
890 return rc;
891
892 /* First handle cases where another load request should/might be sent:
893 * - MFW expects the old interface [HSI version = 1]
894 * - MFW responds that a force load request is required
895 */
896 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
897 DP_INFO(p_hwfn,
898 "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
899
900 /* The previous load request set the mailbox blocking */
901 p_hwfn->mcp_info->block_mb_sending = false;
902
903 in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
904 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
905 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
906 &out_params);
907 if (rc != ECORE_SUCCESS)
908 return rc;
909 } else if (out_params.load_code ==
910 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
911 /* The previous load request set the mailbox blocking */
912 p_hwfn->mcp_info->block_mb_sending = false;
913
914 if (ecore_mcp_can_force_load(in_params.drv_role,
915 out_params.exist_drv_role,
916 p_params->override_force_load)) {
917 DP_INFO(p_hwfn,
918 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
919 in_params.drv_role, in_params.fw_ver,
920 in_params.drv_ver_1, in_params.drv_ver_0,
921 out_params.exist_drv_role,
922 out_params.exist_fw_ver,
923 out_params.exist_drv_ver_1,
924 out_params.exist_drv_ver_0);
925 DP_INFO(p_hwfn, "Sending a force load request\n");
926
927 rc = ecore_get_mfw_force_cmd(p_hwfn,
928 ECORE_LOAD_REQ_FORCE_ALL,
929 &mfw_force_cmd);
930 if (rc != ECORE_SUCCESS)
931 return rc;
932
933 in_params.force_cmd = mfw_force_cmd;
934 OSAL_MEM_ZERO(&out_params, sizeof(out_params));
935 rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
936 &out_params);
937 if (rc != ECORE_SUCCESS)
938 return rc;
939 } else {
940 DP_NOTICE(p_hwfn, false,
941 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
942 in_params.drv_role, in_params.fw_ver,
943 in_params.drv_ver_0, in_params.drv_ver_1,
944 out_params.exist_drv_role,
945 out_params.exist_fw_ver,
946 out_params.exist_drv_ver_0,
947 out_params.exist_drv_ver_1);
948 DP_NOTICE(p_hwfn, false,
949 "Avoid sending a force load request to prevent disruption of active PFs\n");
950
951 ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
952 return ECORE_BUSY;
953 }
954 }
955
956 /* Now handle the other types of responses.
957 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
958 * expected here after the additional revised load requests were sent.
959 */
960 switch (out_params.load_code) {
961 case FW_MSG_CODE_DRV_LOAD_ENGINE:
962 case FW_MSG_CODE_DRV_LOAD_PORT:
963 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
964 if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
965 out_params.drv_exists) {
966 /* The role and fw/driver version match, but the PF is
967 * already loaded and has not been unloaded gracefully.
968 * This is unexpected since a quasi-FLR request was
969 * previously sent as part of ecore_hw_prepare().
970 */
971 DP_NOTICE(p_hwfn, false,
972 "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
973 return ECORE_INVAL;
974 }
975 break;
976 case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
977 case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
978 case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
979 case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
980 DP_NOTICE(p_hwfn, false,
981 "MFW refused a load request [resp 0x%08x]. Aborting.\n",
982 out_params.load_code);
983 return ECORE_BUSY;
984 default:
985 DP_NOTICE(p_hwfn, false,
986 "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
987 out_params.load_code);
988 break;
989 }
990
991 p_params->load_code = out_params.load_code;
992
993 return ECORE_SUCCESS;
994 }
995
ecore_mcp_unload_req(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)996 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
997 struct ecore_ptt *p_ptt)
998 {
999 u32 wol_param, mcp_resp, mcp_param;
1000
1001 switch (p_hwfn->p_dev->wol_config) {
1002 case ECORE_OV_WOL_DISABLED:
1003 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1004 break;
1005 case ECORE_OV_WOL_ENABLED:
1006 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1007 break;
1008 default:
1009 DP_NOTICE(p_hwfn, true,
1010 "Unknown WoL configuration %02x\n",
1011 p_hwfn->p_dev->wol_config);
1012 /* Fallthrough */
1013 case ECORE_OV_WOL_DEFAULT:
1014 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1015 }
1016
1017 return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1018 &mcp_resp, &mcp_param);
1019 }
1020
ecore_mcp_unload_done(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1021 enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
1022 struct ecore_ptt *p_ptt)
1023 {
1024 struct ecore_mcp_mb_params mb_params;
1025 struct mcp_mac wol_mac;
1026
1027 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1028 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1029
1030 /* Set the primary MAC if WoL is enabled */
1031 if (p_hwfn->p_dev->wol_config == ECORE_OV_WOL_ENABLED) {
1032 u8 *p_mac = p_hwfn->p_dev->wol_mac;
1033
1034 OSAL_MEM_ZERO(&wol_mac, sizeof(wol_mac));
1035 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1036 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1037 p_mac[4] << 8 | p_mac[5];
1038
1039 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFDOWN),
1040 "Setting WoL MAC: %02x:%02x:%02x:%02x:%02x:%02x --> [%08x,%08x]\n",
1041 p_mac[0], p_mac[1], p_mac[2], p_mac[3], p_mac[4],
1042 p_mac[5], wol_mac.mac_upper, wol_mac.mac_lower);
1043
1044 mb_params.p_data_src = &wol_mac;
1045 mb_params.data_src_size = sizeof(wol_mac);
1046 }
1047
1048 return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1049 }
1050
ecore_mcp_handle_vf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1051 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
1052 struct ecore_ptt *p_ptt)
1053 {
1054 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1055 PUBLIC_PATH);
1056 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1057 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1058 ECORE_PATH_ID(p_hwfn));
1059 u32 disabled_vfs[VF_MAX_STATIC / 32];
1060 int i;
1061
1062 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1063 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1064 mfw_path_offsize, path_addr);
1065
1066 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1067 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
1068 path_addr +
1069 offsetof(struct public_path,
1070 mcp_vf_disabled) +
1071 sizeof(u32) * i);
1072 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1073 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1074 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1075 }
1076
1077 if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1078 OSAL_VF_FLR_UPDATE(p_hwfn);
1079 }
1080
ecore_mcp_ack_vf_flr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * vfs_to_ack)1081 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
1082 struct ecore_ptt *p_ptt,
1083 u32 *vfs_to_ack)
1084 {
1085 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1086 PUBLIC_FUNC);
1087 u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1088 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1089 MCP_PF_ID(p_hwfn));
1090 struct ecore_mcp_mb_params mb_params;
1091 enum _ecore_status_t rc;
1092 int i;
1093
1094 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1095 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
1096 "Acking VFs [%08x,...,%08x] - %08x\n",
1097 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1098
1099 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1100 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1101 mb_params.p_data_src = vfs_to_ack;
1102 mb_params.data_src_size = VF_MAX_STATIC / 8;
1103 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1104 if (rc != ECORE_SUCCESS) {
1105 DP_NOTICE(p_hwfn, false,
1106 "Failed to pass ACK for VF flr to MFW\n");
1107 return ECORE_TIMEOUT;
1108 }
1109
1110 /* TMP - clear the ACK bits; should be done by MFW */
1111 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1112 ecore_wr(p_hwfn, p_ptt,
1113 func_addr +
1114 offsetof(struct public_func, drv_ack_vf_disabled) +
1115 i * sizeof(u32), 0);
1116
1117 return rc;
1118 }
1119
ecore_mcp_handle_transceiver_change(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1120 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
1121 struct ecore_ptt *p_ptt)
1122 {
1123 u32 transceiver_state;
1124
1125 transceiver_state = ecore_rd(p_hwfn, p_ptt,
1126 p_hwfn->mcp_info->port_addr +
1127 offsetof(struct public_port,
1128 transceiver_data));
1129
1130 DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
1131 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1132 transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
1133 offsetof(struct public_port,
1134 transceiver_data)));
1135
1136 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
1137
1138 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1139 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
1140 else
1141 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
1142 }
1143
ecore_mcp_read_eee_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_link_state * p_link)1144 static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
1145 struct ecore_ptt *p_ptt,
1146 struct ecore_mcp_link_state *p_link)
1147 {
1148 u32 eee_status, val;
1149
1150 p_link->eee_adv_caps = 0;
1151 p_link->eee_lp_adv_caps = 0;
1152 eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1153 offsetof(struct public_port, eee_status));
1154 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1155 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_SHIFT;
1156 if (val & EEE_1G_ADV)
1157 p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
1158 if (val & EEE_10G_ADV)
1159 p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
1160 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_SHIFT;
1161 if (val & EEE_1G_ADV)
1162 p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
1163 if (val & EEE_10G_ADV)
1164 p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
1165 }
1166
ecore_mcp_handle_link_change(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_reset)1167 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
1168 struct ecore_ptt *p_ptt,
1169 bool b_reset)
1170 {
1171 struct ecore_mcp_link_state *p_link;
1172 u8 max_bw, min_bw;
1173 u32 status = 0;
1174
1175 /* Prevent SW/attentions from doing this at the same time */
1176 OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
1177
1178 p_link = &p_hwfn->mcp_info->link_output;
1179 OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1180 if (!b_reset) {
1181 status = ecore_rd(p_hwfn, p_ptt,
1182 p_hwfn->mcp_info->port_addr +
1183 offsetof(struct public_port, link_status));
1184 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
1185 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1186 status, (u32)(p_hwfn->mcp_info->port_addr +
1187 offsetof(struct public_port, link_status)));
1188 } else {
1189 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1190 "Resetting link indications\n");
1191 goto out;
1192 }
1193
1194 if (p_hwfn->b_drv_link_init)
1195 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1196 else
1197 p_link->link_up = false;
1198
1199 p_link->full_duplex = true;
1200 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1201 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1202 p_link->speed = 100000;
1203 break;
1204 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1205 p_link->speed = 50000;
1206 break;
1207 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1208 p_link->speed = 40000;
1209 break;
1210 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1211 p_link->speed = 25000;
1212 break;
1213 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1214 p_link->speed = 20000;
1215 break;
1216 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1217 p_link->speed = 10000;
1218 break;
1219 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1220 p_link->full_duplex = false;
1221 /* Fall-through */
1222 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1223 p_link->speed = 1000;
1224 break;
1225 default:
1226 p_link->speed = 0;
1227 }
1228
1229 /* We never store total line speed as p_link->speed is
1230 * again changes according to bandwidth allocation.
1231 */
1232 if (p_link->link_up && p_link->speed)
1233 p_link->line_speed = p_link->speed;
1234 else
1235 p_link->line_speed = 0;
1236
1237 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1238 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1239
1240 /* Max bandwidth configuration */
1241 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1242
1243 /* Mintz bandwidth configuration */
1244 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1245 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
1246 p_link->min_pf_rate);
1247
1248 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1249 p_link->an_complete = !!(status &
1250 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1251 p_link->parallel_detection = !!(status &
1252 LINK_STATUS_PARALLEL_DETECTION_USED);
1253 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1254
1255 p_link->partner_adv_speed |=
1256 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1257 ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
1258 p_link->partner_adv_speed |=
1259 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1260 ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
1261 p_link->partner_adv_speed |=
1262 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1263 ECORE_LINK_PARTNER_SPEED_10G : 0;
1264 p_link->partner_adv_speed |=
1265 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1266 ECORE_LINK_PARTNER_SPEED_20G : 0;
1267 p_link->partner_adv_speed |=
1268 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1269 ECORE_LINK_PARTNER_SPEED_25G : 0;
1270 p_link->partner_adv_speed |=
1271 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1272 ECORE_LINK_PARTNER_SPEED_40G : 0;
1273 p_link->partner_adv_speed |=
1274 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1275 ECORE_LINK_PARTNER_SPEED_50G : 0;
1276 p_link->partner_adv_speed |=
1277 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1278 ECORE_LINK_PARTNER_SPEED_100G : 0;
1279
1280 p_link->partner_tx_flow_ctrl_en =
1281 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1282 p_link->partner_rx_flow_ctrl_en =
1283 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1284
1285 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1286 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1287 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
1288 break;
1289 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1290 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
1291 break;
1292 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1293 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
1294 break;
1295 default:
1296 p_link->partner_adv_pause = 0;
1297 }
1298
1299 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1300
1301 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1302 ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1303
1304 OSAL_LINK_UPDATE(p_hwfn);
1305 out:
1306 OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
1307 }
1308
ecore_mcp_set_link(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,bool b_up)1309 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
1310 struct ecore_ptt *p_ptt,
1311 bool b_up)
1312 {
1313 struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1314 struct ecore_mcp_mb_params mb_params;
1315 struct eth_phy_cfg phy_cfg;
1316 enum _ecore_status_t rc = ECORE_SUCCESS;
1317 u32 cmd;
1318
1319 #ifndef ASIC_ONLY
1320 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1321 return ECORE_SUCCESS;
1322 #endif
1323
1324 /* Set the shmem configuration according to params */
1325 OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
1326 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1327 if (!params->speed.autoneg)
1328 phy_cfg.speed = params->speed.forced_speed;
1329 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1330 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1331 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1332 phy_cfg.adv_speed = params->speed.advertised_speeds;
1333 phy_cfg.loopback_mode = params->loopback_mode;
1334 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1335 if (params->eee.enable)
1336 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1337 if (params->eee.tx_lpi_enable)
1338 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1339 if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
1340 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1341 if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
1342 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1343 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1344 EEE_TX_TIMER_USEC_SHIFT) &
1345 EEE_TX_TIMER_USEC_MASK;
1346 }
1347
1348 p_hwfn->b_drv_link_init = b_up;
1349
1350 if (b_up)
1351 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1352 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
1353 phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1354 phy_cfg.loopback_mode);
1355 else
1356 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
1357
1358 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1359 mb_params.cmd = cmd;
1360 mb_params.p_data_src = &phy_cfg;
1361 mb_params.data_src_size = sizeof(phy_cfg);
1362 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1363
1364 /* if mcp fails to respond we must abort */
1365 if (rc != ECORE_SUCCESS) {
1366 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1367 return rc;
1368 }
1369
1370 /* Mimic link-change attention, done for several reasons:
1371 * - On reset, there's no guarantee MFW would trigger
1372 * an attention.
1373 * - On initialization, older MFWs might not indicate link change
1374 * during LFA, so we'll never get an UP indication.
1375 */
1376 ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1377
1378 return rc;
1379 }
1380
ecore_get_process_kill_counter(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1381 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
1382 struct ecore_ptt *p_ptt)
1383 {
1384 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1385
1386 /* TODO - Add support for VFs */
1387 if (IS_VF(p_hwfn->p_dev))
1388 return ECORE_INVAL;
1389
1390 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1391 PUBLIC_PATH);
1392 path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
1393 path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
1394
1395 proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
1396 path_addr +
1397 offsetof(struct public_path, process_kill)) &
1398 PROCESS_KILL_COUNTER_MASK;
1399
1400 return proc_kill_cnt;
1401 }
1402
ecore_mcp_handle_process_kill(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1403 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
1404 struct ecore_ptt *p_ptt)
1405 {
1406 struct ecore_dev *p_dev = p_hwfn->p_dev;
1407 u32 proc_kill_cnt;
1408
1409 /* Prevent possible attentions/interrupts during the recovery handling
1410 * and till its load phase, during which they will be re-enabled.
1411 */
1412 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1413
1414 DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
1415
1416 /* The following operations should be done once, and thus in CMT mode
1417 * are carried out by only the first HW function.
1418 */
1419 if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
1420 return;
1421
1422 if (p_dev->recov_in_prog) {
1423 DP_NOTICE(p_hwfn, false,
1424 "Ignoring the indication since a recovery process is already in progress\n");
1425 return;
1426 }
1427
1428 p_dev->recov_in_prog = true;
1429
1430 proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
1431 DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
1432
1433 OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
1434 }
1435
ecore_mcp_send_protocol_stats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum MFW_DRV_MSG_TYPE type)1436 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
1437 struct ecore_ptt *p_ptt,
1438 enum MFW_DRV_MSG_TYPE type)
1439 {
1440 enum ecore_mcp_protocol_type stats_type __unused;
1441 union ecore_mcp_protocol_stats stats;
1442 struct ecore_mcp_mb_params mb_params;
1443 u32 hsi_param;
1444 enum _ecore_status_t rc;
1445
1446 switch (type) {
1447 case MFW_DRV_MSG_GET_LAN_STATS:
1448 stats_type = ECORE_MCP_LAN_STATS;
1449 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1450 break;
1451 case MFW_DRV_MSG_GET_FCOE_STATS:
1452 stats_type = ECORE_MCP_FCOE_STATS;
1453 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1454 break;
1455 case MFW_DRV_MSG_GET_ISCSI_STATS:
1456 stats_type = ECORE_MCP_ISCSI_STATS;
1457 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1458 break;
1459 case MFW_DRV_MSG_GET_RDMA_STATS:
1460 stats_type = ECORE_MCP_RDMA_STATS;
1461 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1462 break;
1463 default:
1464 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
1465 return;
1466 }
1467
1468 OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
1469
1470 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1471 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1472 mb_params.param = hsi_param;
1473 mb_params.p_data_src = &stats;
1474 mb_params.data_src_size = sizeof(stats);
1475 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1476 if (rc != ECORE_SUCCESS)
1477 DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
1478 }
1479
ecore_read_pf_bandwidth(struct ecore_hwfn * p_hwfn,struct public_func * p_shmem_info)1480 static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
1481 struct public_func *p_shmem_info)
1482 {
1483 struct ecore_mcp_function_info *p_info;
1484
1485 p_info = &p_hwfn->mcp_info->func_info;
1486
1487 /* TODO - bandwidth min/max should have valid values of 1-100,
1488 * as well as some indication that the feature is disabled.
1489 * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
1490 * limit and correct value to min `1' and max `100' if limit isn't in
1491 * range.
1492 */
1493 p_info->bandwidth_min = (p_shmem_info->config &
1494 FUNC_MF_CFG_MIN_BW_MASK) >>
1495 FUNC_MF_CFG_MIN_BW_SHIFT;
1496 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1497 DP_INFO(p_hwfn,
1498 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1499 p_info->bandwidth_min);
1500 p_info->bandwidth_min = 1;
1501 }
1502
1503 p_info->bandwidth_max = (p_shmem_info->config &
1504 FUNC_MF_CFG_MAX_BW_MASK) >>
1505 FUNC_MF_CFG_MAX_BW_SHIFT;
1506 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1507 DP_INFO(p_hwfn,
1508 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1509 p_info->bandwidth_max);
1510 p_info->bandwidth_max = 100;
1511 }
1512 }
1513
ecore_mcp_get_shmem_func(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct public_func * p_data,int pfid)1514 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
1515 struct ecore_ptt *p_ptt,
1516 struct public_func *p_data,
1517 int pfid)
1518 {
1519 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1520 PUBLIC_FUNC);
1521 u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1522 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1523 u32 i, size;
1524
1525 OSAL_MEM_ZERO(p_data, sizeof(*p_data));
1526
1527 size = OSAL_MIN_T(u32, sizeof(*p_data),
1528 SECTION_SIZE(mfw_path_offsize));
1529 for (i = 0; i < size / sizeof(u32); i++)
1530 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
1531 func_addr + (i << 2));
1532
1533 return size;
1534 }
1535 #if 0
1536 /* This was introduced with FW 8.10.5.0; Hopefully this is only temp. */
1537 enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn,
1538 struct ecore_ptt *p_ptt,
1539 u8 *p_pf)
1540 {
1541 struct public_func shmem_info;
1542 int i;
1543
1544 /* Find first Ethernet interface in port */
1545 for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->p_dev);
1546 i += p_hwfn->p_dev->num_ports_in_engine) {
1547 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1548 MCP_PF_ID_BY_REL(p_hwfn, i));
1549
1550 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1551 continue;
1552
1553 if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
1554 FUNC_MF_CFG_PROTOCOL_ETHERNET) {
1555 *p_pf = (u8)i;
1556 return ECORE_SUCCESS;
1557 }
1558 }
1559
1560 /* This might actually be valid somewhere in the future but for now
1561 * it's highly unlikely.
1562 */
1563 DP_NOTICE(p_hwfn, false,
1564 "Failed to find on port an ethernet interface in MF_SI mode\n");
1565
1566 return ECORE_INVAL;
1567 }
1568 #endif
1569 static void
ecore_mcp_update_bw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1570 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1571 {
1572 struct ecore_mcp_function_info *p_info;
1573 struct public_func shmem_info;
1574 u32 resp = 0, param = 0;
1575
1576 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1577 MCP_PF_ID(p_hwfn));
1578
1579 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1580
1581 p_info = &p_hwfn->mcp_info->func_info;
1582
1583 ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
1584
1585 ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
1586
1587 /* Acknowledge the MFW */
1588 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1589 ¶m);
1590 }
1591
ecore_mcp_update_stag(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1592 static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
1593 struct ecore_ptt *p_ptt)
1594 {
1595 struct public_func shmem_info;
1596 u32 resp = 0, param = 0;
1597
1598 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1599 MCP_PF_ID(p_hwfn));
1600
1601 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1602 FUNC_MF_CFG_OV_STAG_MASK;
1603 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1604 if ((p_hwfn->hw_info.hw_mode & (1 << MODE_MF_SD)) &&
1605 (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET)) {
1606 ecore_wr(p_hwfn, p_ptt,
1607 NIG_REG_LLH_FUNC_TAG_VALUE,
1608 p_hwfn->hw_info.ovlan);
1609 ecore_sp_pf_update_stag(p_hwfn);
1610 }
1611
1612 OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
1613
1614 /* Acknowledge the MFW */
1615 ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1616 &resp, ¶m);
1617 }
1618
ecore_mcp_handle_fan_failure(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1619 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
1620 struct ecore_ptt *p_ptt)
1621 {
1622 /* A single notification should be sent to upper driver in CMT mode */
1623 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1624 return;
1625
1626 DP_NOTICE(p_hwfn, false,
1627 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1628
1629 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
1630 }
1631
1632 struct ecore_mdump_cmd_params {
1633 u32 cmd;
1634 void *p_data_src;
1635 u8 data_src_size;
1636 void *p_data_dst;
1637 u8 data_dst_size;
1638 u32 mcp_resp;
1639 };
1640
1641 static enum _ecore_status_t
ecore_mcp_mdump_cmd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_cmd_params * p_mdump_cmd_params)1642 ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1643 struct ecore_mdump_cmd_params *p_mdump_cmd_params)
1644 {
1645 struct ecore_mcp_mb_params mb_params;
1646 enum _ecore_status_t rc;
1647
1648 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
1649 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1650 mb_params.param = p_mdump_cmd_params->cmd;
1651 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1652 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1653 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1654 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1655 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1656 if (rc != ECORE_SUCCESS)
1657 return rc;
1658
1659 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1660
1661 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1662 DP_INFO(p_hwfn,
1663 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1664 p_mdump_cmd_params->cmd);
1665 rc = ECORE_NOTIMPL;
1666 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1667 DP_INFO(p_hwfn,
1668 "The mdump command is not supported by the MFW\n");
1669 rc = ECORE_NOTIMPL;
1670 }
1671
1672 return rc;
1673 }
1674
ecore_mcp_mdump_ack(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1675 static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
1676 struct ecore_ptt *p_ptt)
1677 {
1678 struct ecore_mdump_cmd_params mdump_cmd_params;
1679
1680 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1681 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1682
1683 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1684 }
1685
ecore_mcp_mdump_set_values(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 epoch)1686 enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
1687 struct ecore_ptt *p_ptt,
1688 u32 epoch)
1689 {
1690 struct ecore_mdump_cmd_params mdump_cmd_params;
1691
1692 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1693 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
1694 mdump_cmd_params.p_data_src = &epoch;
1695 mdump_cmd_params.data_src_size = sizeof(epoch);
1696
1697 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1698 }
1699
ecore_mcp_mdump_trigger(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1700 enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
1701 struct ecore_ptt *p_ptt)
1702 {
1703 struct ecore_mdump_cmd_params mdump_cmd_params;
1704
1705 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1706 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
1707
1708 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1709 }
1710
1711 static enum _ecore_status_t
ecore_mcp_mdump_get_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct mdump_config_stc * p_mdump_config)1712 ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1713 struct mdump_config_stc *p_mdump_config)
1714 {
1715 struct ecore_mdump_cmd_params mdump_cmd_params;
1716 enum _ecore_status_t rc;
1717
1718 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1719 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
1720 mdump_cmd_params.p_data_dst = p_mdump_config;
1721 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
1722
1723 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1724 if (rc != ECORE_SUCCESS)
1725 return rc;
1726
1727 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1728 DP_INFO(p_hwfn,
1729 "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
1730 mdump_cmd_params.mcp_resp);
1731 rc = ECORE_UNKNOWN_ERROR;
1732 }
1733
1734 return rc;
1735 }
1736
1737 enum _ecore_status_t
ecore_mcp_mdump_get_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_info * p_mdump_info)1738 ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1739 struct ecore_mdump_info *p_mdump_info)
1740 {
1741 u32 addr, global_offsize, global_addr;
1742 struct mdump_config_stc mdump_config;
1743 enum _ecore_status_t rc;
1744
1745 OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
1746
1747 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1748 PUBLIC_GLOBAL);
1749 global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
1750 global_addr = SECTION_ADDR(global_offsize, 0);
1751 p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
1752 global_addr +
1753 offsetof(struct public_global,
1754 mdump_reason));
1755
1756 if (p_mdump_info->reason) {
1757 rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
1758 if (rc != ECORE_SUCCESS)
1759 return rc;
1760
1761 p_mdump_info->version = mdump_config.version;
1762 p_mdump_info->config = mdump_config.config;
1763 p_mdump_info->epoch = mdump_config.epoc;
1764 p_mdump_info->num_of_logs = mdump_config.num_of_logs;
1765 p_mdump_info->valid_logs = mdump_config.valid_logs;
1766
1767 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1768 "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
1769 p_mdump_info->reason, p_mdump_info->version,
1770 p_mdump_info->config, p_mdump_info->epoch,
1771 p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
1772 } else {
1773 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1774 "MFW mdump info: reason %d\n", p_mdump_info->reason);
1775 }
1776
1777 return ECORE_SUCCESS;
1778 }
1779
ecore_mcp_mdump_clear_logs(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1780 enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
1781 struct ecore_ptt *p_ptt)
1782 {
1783 struct ecore_mdump_cmd_params mdump_cmd_params;
1784
1785 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1786 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
1787
1788 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1789 }
1790
1791 enum _ecore_status_t
ecore_mcp_mdump_get_retain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mdump_retain_data * p_mdump_retain)1792 ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1793 struct ecore_mdump_retain_data *p_mdump_retain)
1794 {
1795 struct ecore_mdump_cmd_params mdump_cmd_params;
1796 struct mdump_retain_data_stc mfw_mdump_retain;
1797 enum _ecore_status_t rc;
1798
1799 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1800 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1801 mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
1802 mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
1803
1804 rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1805 if (rc != ECORE_SUCCESS)
1806 return rc;
1807
1808 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1809 DP_INFO(p_hwfn,
1810 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1811 mdump_cmd_params.mcp_resp);
1812 return ECORE_UNKNOWN_ERROR;
1813 }
1814
1815 p_mdump_retain->valid = mfw_mdump_retain.valid;
1816 p_mdump_retain->epoch = mfw_mdump_retain.epoch;
1817 p_mdump_retain->pf = mfw_mdump_retain.pf;
1818 p_mdump_retain->status = mfw_mdump_retain.status;
1819
1820 return ECORE_SUCCESS;
1821 }
1822
ecore_mcp_mdump_clr_retain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1823 enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
1824 struct ecore_ptt *p_ptt)
1825 {
1826 struct ecore_mdump_cmd_params mdump_cmd_params;
1827
1828 OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
1829 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
1830
1831 return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1832 }
1833
ecore_mcp_handle_critical_error(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1834 static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
1835 struct ecore_ptt *p_ptt)
1836 {
1837 struct ecore_mdump_retain_data mdump_retain;
1838 enum _ecore_status_t rc;
1839
1840 /* In CMT mode - no need for more than a single acknowledgement to the
1841 * MFW, and no more than a single notification to the upper driver.
1842 */
1843 if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
1844 return;
1845
1846 rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1847 if (rc == ECORE_SUCCESS && mdump_retain.valid) {
1848 DP_NOTICE(p_hwfn, false,
1849 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1850 mdump_retain.epoch, mdump_retain.pf,
1851 mdump_retain.status);
1852 } else {
1853 DP_NOTICE(p_hwfn, false,
1854 "The MFW notified that a critical error occurred in the device\n");
1855 }
1856
1857 if (p_hwfn->p_dev->allow_mdump) {
1858 DP_NOTICE(p_hwfn, false,
1859 "Not acknowledging the notification to allow the MFW crash dump\n");
1860 return;
1861 }
1862
1863 DP_NOTICE(p_hwfn, false,
1864 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1865 ecore_mcp_mdump_ack(p_hwfn, p_ptt);
1866 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
1867 }
1868
ecore_mcp_handle_events(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)1869 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
1870 struct ecore_ptt *p_ptt)
1871 {
1872 struct ecore_mcp_info *info = p_hwfn->mcp_info;
1873 enum _ecore_status_t rc = ECORE_SUCCESS;
1874 bool found = false;
1875 u16 i;
1876
1877 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
1878
1879 /* Read Messages from MFW */
1880 ecore_mcp_read_mb(p_hwfn, p_ptt);
1881
1882 /* Compare current messages to old ones */
1883 for (i = 0; i < info->mfw_mb_length; i++) {
1884 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1885 continue;
1886
1887 found = true;
1888
1889 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
1890 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1891 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1892
1893 switch (i) {
1894 case MFW_DRV_MSG_LINK_CHANGE:
1895 ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1896 break;
1897 case MFW_DRV_MSG_VF_DISABLED:
1898 ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1899 break;
1900 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1901 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1902 ECORE_DCBX_REMOTE_LLDP_MIB);
1903 break;
1904 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1905 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1906 ECORE_DCBX_REMOTE_MIB);
1907 break;
1908 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1909 ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1910 ECORE_DCBX_OPERATIONAL_MIB);
1911 break;
1912 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1913 ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1914 break;
1915 case MFW_DRV_MSG_ERROR_RECOVERY:
1916 ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1917 break;
1918 case MFW_DRV_MSG_GET_LAN_STATS:
1919 case MFW_DRV_MSG_GET_FCOE_STATS:
1920 case MFW_DRV_MSG_GET_ISCSI_STATS:
1921 case MFW_DRV_MSG_GET_RDMA_STATS:
1922 ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1923 break;
1924 case MFW_DRV_MSG_BW_UPDATE:
1925 ecore_mcp_update_bw(p_hwfn, p_ptt);
1926 break;
1927 case MFW_DRV_MSG_S_TAG_UPDATE:
1928 ecore_mcp_update_stag(p_hwfn, p_ptt);
1929 break;
1930 case MFW_DRV_MSG_FAILURE_DETECTED:
1931 ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1932 break;
1933 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1934 ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
1935 break;
1936 case MFW_DRV_MSG_GET_TLV_REQ:
1937 OSAL_MFW_TLV_REQ(p_hwfn);
1938 break;
1939 default:
1940 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1941 rc = ECORE_INVAL;
1942 }
1943 }
1944
1945 /* ACK everything */
1946 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1947 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1948
1949 /* MFW expect answer in BE, so we force write in that format */
1950 ecore_wr(p_hwfn, p_ptt,
1951 info->mfw_mb_addr + sizeof(u32) +
1952 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1953 sizeof(u32) + i * sizeof(u32), val);
1954 }
1955
1956 if (!found) {
1957 DP_NOTICE(p_hwfn, false,
1958 "Received an MFW message indication but no new message!\n");
1959 rc = ECORE_INVAL;
1960 }
1961
1962 /* Copy the new mfw messages into the shadow */
1963 OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1964
1965 return rc;
1966 }
1967
ecore_mcp_get_mfw_ver(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_mfw_ver,u32 * p_running_bundle_id)1968 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
1969 struct ecore_ptt *p_ptt,
1970 u32 *p_mfw_ver,
1971 u32 *p_running_bundle_id)
1972 {
1973 u32 global_offsize;
1974
1975 #ifndef ASIC_ONLY
1976 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1977 DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
1978 return ECORE_SUCCESS;
1979 }
1980 #endif
1981
1982 if (IS_VF(p_hwfn->p_dev)) {
1983 if (p_hwfn->vf_iov_info) {
1984 struct pfvf_acquire_resp_tlv *p_resp;
1985
1986 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1987 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1988 return ECORE_SUCCESS;
1989 } else {
1990 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1991 "VF requested MFW version prior to ACQUIRE\n");
1992 return ECORE_INVAL;
1993 }
1994 }
1995
1996 global_offsize = ecore_rd(p_hwfn, p_ptt,
1997 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1998 PUBLIC_GLOBAL));
1999 *p_mfw_ver = ecore_rd(p_hwfn, p_ptt,
2000 SECTION_ADDR(global_offsize, 0) +
2001 offsetof(struct public_global, mfw_ver));
2002
2003 if (p_running_bundle_id != OSAL_NULL) {
2004 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
2005 SECTION_ADDR(global_offsize, 0) +
2006 offsetof(struct public_global,
2007 running_bundle_id));
2008 }
2009
2010 return ECORE_SUCCESS;
2011 }
2012
ecore_mcp_get_mbi_ver(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_mbi_ver)2013 enum _ecore_status_t ecore_mcp_get_mbi_ver(struct ecore_hwfn *p_hwfn,
2014 struct ecore_ptt *p_ptt,
2015 u32 *p_mbi_ver)
2016 {
2017 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2018
2019 #ifndef ASIC_ONLY
2020 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2021 DP_NOTICE(p_hwfn, false, "Emulation - can't get MBI version\n");
2022 return ECORE_SUCCESS;
2023 }
2024 #endif
2025
2026 if (IS_VF(p_hwfn->p_dev))
2027 return ECORE_INVAL;
2028
2029 /* Read the address of the nvm_cfg */
2030 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2031 if (!nvm_cfg_addr) {
2032 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
2033 return ECORE_INVAL;
2034 }
2035
2036 /* Read the offset of nvm_cfg1 */
2037 nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2038
2039 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2040 offsetof(struct nvm_cfg1, glob) +
2041 offsetof(struct nvm_cfg1_glob, mbi_version);
2042 *p_mbi_ver = ecore_rd(p_hwfn, p_ptt, mbi_ver_addr) &
2043 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2044 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2045 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2046
2047 return ECORE_SUCCESS;
2048 }
2049
ecore_mcp_get_media_type(struct ecore_dev * p_dev,u32 * p_media_type)2050 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
2051 u32 *p_media_type)
2052 {
2053 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
2054 struct ecore_ptt *p_ptt;
2055
2056 /* TODO - Add support for VFs */
2057 if (IS_VF(p_dev))
2058 return ECORE_INVAL;
2059
2060 if (!ecore_mcp_is_init(p_hwfn)) {
2061 DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
2062 return ECORE_BUSY;
2063 }
2064
2065 *p_media_type = MEDIA_UNSPECIFIED;
2066
2067 p_ptt = ecore_ptt_acquire(p_hwfn);
2068 if (!p_ptt)
2069 return ECORE_BUSY;
2070
2071 *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2072 offsetof(struct public_port, media_type));
2073
2074 ecore_ptt_release(p_hwfn, p_ptt);
2075
2076 return ECORE_SUCCESS;
2077 }
2078
2079 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2080 static void
ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn * p_hwfn,enum ecore_pci_personality * p_proto)2081 ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
2082 enum ecore_pci_personality *p_proto)
2083 {
2084 /* There wasn't ever a legacy MFW that published iwarp.
2085 * So at this point, this is either plain l2 or RoCE.
2086 */
2087 if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
2088 &p_hwfn->hw_info.device_capabilities))
2089 *p_proto = ECORE_PCI_ETH_ROCE;
2090 else
2091 *p_proto = ECORE_PCI_ETH;
2092
2093 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2094 "According to Legacy capabilities, L2 personality is %08x\n",
2095 (u32) *p_proto);
2096 }
2097
2098 static enum _ecore_status_t
ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_pci_personality * p_proto)2099 ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
2100 struct ecore_ptt *p_ptt,
2101 enum ecore_pci_personality *p_proto)
2102 {
2103 u32 resp = 0, param = 0;
2104 enum _ecore_status_t rc;
2105
2106 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2107 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2108 if (rc != ECORE_SUCCESS)
2109 return rc;
2110 if (resp != FW_MSG_CODE_OK) {
2111 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2112 "MFW lacks support for command; Returns %08x\n",
2113 resp);
2114 return ECORE_INVAL;
2115 }
2116
2117 switch (param) {
2118 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2119 *p_proto = ECORE_PCI_ETH;
2120 break;
2121 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2122 *p_proto = ECORE_PCI_ETH_ROCE;
2123 break;
2124 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2125 *p_proto = ECORE_PCI_ETH_IWARP;
2126 break;
2127 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2128 *p_proto = ECORE_PCI_ETH_RDMA;
2129 break;
2130 default:
2131 DP_NOTICE(p_hwfn, true,
2132 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2133 param);
2134 return ECORE_INVAL;
2135 }
2136
2137 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
2138 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2139 (u32) *p_proto, resp, param);
2140 return ECORE_SUCCESS;
2141 }
2142
2143 static enum _ecore_status_t
ecore_mcp_get_shmem_proto(struct ecore_hwfn * p_hwfn,struct public_func * p_info,struct ecore_ptt * p_ptt,enum ecore_pci_personality * p_proto)2144 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
2145 struct public_func *p_info,
2146 struct ecore_ptt *p_ptt,
2147 enum ecore_pci_personality *p_proto)
2148 {
2149 enum _ecore_status_t rc = ECORE_SUCCESS;
2150
2151 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2152 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2153 if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
2154 ECORE_SUCCESS)
2155 ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2156 break;
2157 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2158 *p_proto = ECORE_PCI_ISCSI;
2159 break;
2160 case FUNC_MF_CFG_PROTOCOL_FCOE:
2161 *p_proto = ECORE_PCI_FCOE;
2162 break;
2163 case FUNC_MF_CFG_PROTOCOL_ROCE:
2164 DP_NOTICE(p_hwfn, true, "RoCE personality is not a valid value!\n");
2165 rc = ECORE_INVAL;
2166 break;
2167 default:
2168 rc = ECORE_INVAL;
2169 }
2170
2171 return rc;
2172 }
2173
ecore_mcp_fill_shmem_func_info(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2174 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
2175 struct ecore_ptt *p_ptt)
2176 {
2177 struct ecore_mcp_function_info *info;
2178 struct public_func shmem_info;
2179
2180 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2181 MCP_PF_ID(p_hwfn));
2182 info = &p_hwfn->mcp_info->func_info;
2183
2184 info->pause_on_host = (shmem_info.config &
2185 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2186
2187 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2188 &info->protocol)) {
2189 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2190 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2191 return ECORE_INVAL;
2192 }
2193
2194 ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
2195
2196 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2197 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2198 info->mac[1] = (u8)(shmem_info.mac_upper);
2199 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2200 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2201 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2202 info->mac[5] = (u8)(shmem_info.mac_lower);
2203
2204 /* Store primary MAC for later possible WoL */
2205 OSAL_MEMCPY(&p_hwfn->p_dev->wol_mac, info->mac, ETH_ALEN);
2206
2207 } else {
2208 /* TODO - are there protocols for which there's no MAC? */
2209 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
2210 }
2211
2212 /* TODO - are these calculations true for BE machine? */
2213 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2214 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2215 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2216 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2217
2218 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2219
2220 info->mtu = (u16)shmem_info.mtu_size;
2221
2222 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_NONE;
2223 if (ecore_mcp_is_init(p_hwfn)) {
2224 u32 resp = 0, param = 0;
2225 enum _ecore_status_t rc;
2226
2227 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2228 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2229 if (rc != ECORE_SUCCESS)
2230 return rc;
2231 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2232 p_hwfn->hw_info.b_wol_support = ECORE_WOL_SUPPORT_PME;
2233 }
2234 p_hwfn->p_dev->wol_config = (u8)ECORE_OV_WOL_DEFAULT;
2235
2236 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
2237 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2238 info->pause_on_host, info->protocol,
2239 info->bandwidth_min, info->bandwidth_max,
2240 info->mac[0], info->mac[1], info->mac[2],
2241 info->mac[3], info->mac[4], info->mac[5],
2242 info->wwn_port, info->wwn_node, info->ovlan,
2243 (u8)p_hwfn->hw_info.b_wol_support);
2244
2245 return ECORE_SUCCESS;
2246 }
2247
2248 struct ecore_mcp_link_params
ecore_mcp_get_link_params(struct ecore_hwfn * p_hwfn)2249 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
2250 {
2251 if (!p_hwfn || !p_hwfn->mcp_info)
2252 return OSAL_NULL;
2253 return &p_hwfn->mcp_info->link_input;
2254 }
2255
2256 struct ecore_mcp_link_state
ecore_mcp_get_link_state(struct ecore_hwfn * p_hwfn)2257 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
2258 {
2259 if (!p_hwfn || !p_hwfn->mcp_info)
2260 return OSAL_NULL;
2261
2262 #ifndef ASIC_ONLY
2263 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
2264 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
2265 p_hwfn->mcp_info->link_output.link_up = true;
2266 }
2267 #endif
2268
2269 return &p_hwfn->mcp_info->link_output;
2270 }
2271
2272 struct ecore_mcp_link_capabilities
ecore_mcp_get_link_capabilities(struct ecore_hwfn * p_hwfn)2273 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
2274 {
2275 if (!p_hwfn || !p_hwfn->mcp_info)
2276 return OSAL_NULL;
2277 return &p_hwfn->mcp_info->link_capabilities;
2278 }
2279
ecore_mcp_drain(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2280 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
2281 struct ecore_ptt *p_ptt)
2282 {
2283 u32 resp = 0, param = 0;
2284 enum _ecore_status_t rc;
2285
2286 rc = ecore_mcp_cmd(p_hwfn, p_ptt,
2287 DRV_MSG_CODE_NIG_DRAIN, 1000,
2288 &resp, ¶m);
2289
2290 /* Wait for the drain to complete before returning */
2291 OSAL_MSLEEP(1020);
2292
2293 return rc;
2294 }
2295
2296 #ifndef LINUX_REMOVE
2297 const struct ecore_mcp_function_info
ecore_mcp_get_function_info(struct ecore_hwfn * p_hwfn)2298 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
2299 {
2300 if (!p_hwfn || !p_hwfn->mcp_info)
2301 return OSAL_NULL;
2302 return &p_hwfn->mcp_info->func_info;
2303 }
2304 #endif
2305
ecore_mcp_nvm_command(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_nvm_params * params)2306 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
2307 struct ecore_ptt *p_ptt,
2308 struct ecore_mcp_nvm_params *params)
2309 {
2310 enum _ecore_status_t rc;
2311
2312 switch (params->type) {
2313 case ECORE_MCP_NVM_RD:
2314 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2315 params->nvm_common.offset,
2316 ¶ms->nvm_common.resp,
2317 ¶ms->nvm_common.param,
2318 params->nvm_rd.buf_size,
2319 params->nvm_rd.buf);
2320 break;
2321 case ECORE_MCP_CMD:
2322 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2323 params->nvm_common.offset,
2324 ¶ms->nvm_common.resp,
2325 ¶ms->nvm_common.param);
2326 break;
2327 case ECORE_MCP_NVM_WR:
2328 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
2329 params->nvm_common.offset,
2330 ¶ms->nvm_common.resp,
2331 ¶ms->nvm_common.param,
2332 params->nvm_wr.buf_size,
2333 params->nvm_wr.buf);
2334 break;
2335 default:
2336 rc = ECORE_NOTIMPL;
2337 break;
2338 }
2339 return rc;
2340 }
2341
2342 #ifndef LINUX_REMOVE
ecore_mcp_get_personality_cnt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 personalities)2343 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
2344 struct ecore_ptt *p_ptt,
2345 u32 personalities)
2346 {
2347 enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
2348 struct public_func shmem_info;
2349 int i, count = 0, num_pfs;
2350
2351 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
2352
2353 for (i = 0; i < num_pfs; i++) {
2354 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
2355 MCP_PF_ID_BY_REL(p_hwfn, i));
2356 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
2357 continue;
2358
2359 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2360 &protocol) !=
2361 ECORE_SUCCESS)
2362 continue;
2363
2364 if ((1 << ((u32)protocol)) & personalities)
2365 count++;
2366 }
2367
2368 return count;
2369 }
2370 #endif
2371
ecore_mcp_get_flash_size(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 * p_flash_size)2372 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
2373 struct ecore_ptt *p_ptt,
2374 u32 *p_flash_size)
2375 {
2376 u32 flash_size;
2377
2378 #ifndef ASIC_ONLY
2379 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
2380 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
2381 return ECORE_INVAL;
2382 }
2383 #endif
2384
2385 if (IS_VF(p_hwfn->p_dev))
2386 return ECORE_INVAL;
2387
2388 flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2389 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2390 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2391 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2392
2393 *p_flash_size = flash_size;
2394
2395 return ECORE_SUCCESS;
2396 }
2397
ecore_start_recovery_process(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2398 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
2399 struct ecore_ptt *p_ptt)
2400 {
2401 struct ecore_dev *p_dev = p_hwfn->p_dev;
2402
2403 if (p_dev->recov_in_prog) {
2404 DP_NOTICE(p_hwfn, false,
2405 "Avoid triggering a recovery since such a process is already in progress\n");
2406 return ECORE_AGAIN;
2407 }
2408
2409 DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
2410 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2411
2412 return ECORE_SUCCESS;
2413 }
2414
2415 static enum _ecore_status_t
ecore_mcp_config_vf_msix_bb(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vf_id,u8 num)2416 ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
2417 struct ecore_ptt *p_ptt,
2418 u8 vf_id, u8 num)
2419 {
2420 u32 resp = 0, param = 0, rc_param = 0;
2421 enum _ecore_status_t rc;
2422
2423 /* Only Leader can configure MSIX, and need to take CMT into account */
2424 if (!IS_LEAD_HWFN(p_hwfn))
2425 return ECORE_SUCCESS;
2426 num *= p_hwfn->p_dev->num_hwfns;
2427
2428 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2429 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2430 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2431 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2432
2433 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2434 &resp, &rc_param);
2435
2436 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2437 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
2438 vf_id);
2439 rc = ECORE_INVAL;
2440 } else {
2441 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2442 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2443 num, vf_id);
2444 }
2445
2446 return rc;
2447 }
2448
2449 static enum _ecore_status_t
ecore_mcp_config_vf_msix_ah(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 num)2450 ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
2451 struct ecore_ptt *p_ptt,
2452 u8 num)
2453 {
2454 u32 resp = 0, param = num, rc_param = 0;
2455 enum _ecore_status_t rc;
2456
2457 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2458 param, &resp, &rc_param);
2459
2460 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2461 DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
2462 rc = ECORE_INVAL;
2463 } else {
2464 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2465 "Requested 0x%02x MSI-x interrupts for VFs\n",
2466 num);
2467 }
2468
2469 return rc;
2470 }
2471
ecore_mcp_config_vf_msix(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 vf_id,u8 num)2472 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
2473 struct ecore_ptt *p_ptt,
2474 u8 vf_id, u8 num)
2475 {
2476 if (ECORE_IS_BB(p_hwfn->p_dev))
2477 return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2478 else
2479 return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2480 }
2481
2482 enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_mcp_drv_version * p_ver)2483 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2484 struct ecore_mcp_drv_version *p_ver)
2485 {
2486 struct ecore_mcp_mb_params mb_params;
2487 struct drv_version_stc drv_version;
2488 u32 num_words, i;
2489 void *p_name;
2490 OSAL_BE32 val;
2491 enum _ecore_status_t rc;
2492
2493 #ifndef ASIC_ONLY
2494 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
2495 return ECORE_SUCCESS;
2496 #endif
2497
2498 OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
2499 drv_version.version = p_ver->version;
2500 num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
2501 for (i = 0; i < num_words; i++) {
2502 /* The driver name is expected to be in a big-endian format */
2503 p_name = &p_ver->name[i * sizeof(u32)];
2504 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
2505 *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
2506 }
2507
2508 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2509 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2510 mb_params.p_data_src = &drv_version;
2511 mb_params.data_src_size = sizeof(drv_version);
2512 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2513 if (rc != ECORE_SUCCESS)
2514 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2515
2516 return rc;
2517 }
2518
2519 /* A maximal 100 msec waiting time for the MCP to halt */
2520 #define ECORE_MCP_HALT_SLEEP_MS 10
2521 #define ECORE_MCP_HALT_MAX_RETRIES 10
2522
ecore_mcp_halt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2523 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
2524 struct ecore_ptt *p_ptt)
2525 {
2526 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2527 enum _ecore_status_t rc;
2528
2529 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2530 ¶m);
2531 if (rc != ECORE_SUCCESS) {
2532 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2533 return rc;
2534 }
2535
2536 do {
2537 OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
2538 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2539 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2540 break;
2541 } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
2542
2543 if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
2544 DP_NOTICE(p_hwfn, false,
2545 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2546 ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2547 return ECORE_BUSY;
2548 }
2549
2550 return ECORE_SUCCESS;
2551 }
2552
2553 #define ECORE_MCP_RESUME_SLEEP_MS 10
2554
ecore_mcp_resume(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)2555 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
2556 struct ecore_ptt *p_ptt)
2557 {
2558 u32 cpu_mode, cpu_state;
2559
2560 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2561
2562 cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2563 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2564 ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2565
2566 OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
2567 cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2568
2569 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2570 DP_NOTICE(p_hwfn, false,
2571 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2572 cpu_mode, cpu_state);
2573 return ECORE_BUSY;
2574 }
2575
2576 return ECORE_SUCCESS;
2577 }
2578
2579 enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_client client)2580 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
2581 struct ecore_ptt *p_ptt,
2582 enum ecore_ov_client client)
2583 {
2584 enum _ecore_status_t rc;
2585 u32 resp = 0, param = 0;
2586 u32 drv_mb_param;
2587
2588 switch (client) {
2589 case ECORE_OV_CLIENT_DRV:
2590 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2591 break;
2592 case ECORE_OV_CLIENT_USER:
2593 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2594 break;
2595 case ECORE_OV_CLIENT_VENDOR_SPEC:
2596 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2597 break;
2598 default:
2599 DP_NOTICE(p_hwfn, true,
2600 "Invalid client type %d\n", client);
2601 return ECORE_INVAL;
2602 }
2603
2604 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2605 drv_mb_param, &resp, ¶m);
2606 if (rc != ECORE_SUCCESS)
2607 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2608
2609 return rc;
2610 }
2611
2612 enum _ecore_status_t
ecore_mcp_ov_update_driver_state(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_driver_state drv_state)2613 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
2614 struct ecore_ptt *p_ptt,
2615 enum ecore_ov_driver_state drv_state)
2616 {
2617 enum _ecore_status_t rc;
2618 u32 resp = 0, param = 0;
2619 u32 drv_mb_param;
2620
2621 switch (drv_state) {
2622 case ECORE_OV_DRIVER_STATE_NOT_LOADED:
2623 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2624 break;
2625 case ECORE_OV_DRIVER_STATE_DISABLED:
2626 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2627 break;
2628 case ECORE_OV_DRIVER_STATE_ACTIVE:
2629 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2630 break;
2631 default:
2632 DP_NOTICE(p_hwfn, true,
2633 "Invalid driver state %d\n", drv_state);
2634 return ECORE_INVAL;
2635 }
2636
2637 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2638 drv_mb_param, &resp, ¶m);
2639 if (rc != ECORE_SUCCESS)
2640 DP_ERR(p_hwfn, "Failed to send driver state\n");
2641
2642 return rc;
2643 }
2644
2645 enum _ecore_status_t
ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_fc_npiv_tbl * p_table)2646 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2647 struct ecore_fc_npiv_tbl *p_table)
2648 {
2649 enum _ecore_status_t rc = ECORE_SUCCESS;
2650 struct dci_fc_npiv_tbl *p_npiv_table;
2651 u8 *p_buf = OSAL_NULL;
2652 u32 addr, size, i;
2653
2654 p_table->num_wwpn = 0;
2655 p_table->num_wwnn = 0;
2656 addr = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2657 offsetof(struct public_port, fc_npiv_nvram_tbl_addr));
2658 if (addr == NPIV_TBL_INVALID_ADDR) {
2659 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table doesn't exist\n");
2660 return rc;
2661 }
2662
2663 size = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2664 offsetof(struct public_port, fc_npiv_nvram_tbl_size));
2665 if (!size) {
2666 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "NPIV table is empty\n");
2667 return rc;
2668 }
2669
2670 p_buf = OSAL_VZALLOC(p_hwfn->p_dev, size);
2671 if (!p_buf) {
2672 DP_ERR(p_hwfn, "Buffer allocation failed\n");
2673 return ECORE_NOMEM;
2674 }
2675
2676 rc = ecore_mcp_nvm_read(p_hwfn->p_dev, addr, p_buf, size);
2677 if (rc != ECORE_SUCCESS) {
2678 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2679 return rc;
2680 }
2681
2682 p_npiv_table = (struct dci_fc_npiv_tbl *)p_buf;
2683 p_table->num_wwpn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2684 p_table->num_wwnn = (u16)p_npiv_table->fc_npiv_cfg.num_of_npiv;
2685 for (i = 0; i < p_table->num_wwpn; i++) {
2686 OSAL_MEMCPY(p_table->wwpn, p_npiv_table->settings[i].npiv_wwpn,
2687 ECORE_WWN_SIZE);
2688 OSAL_MEMCPY(p_table->wwnn, p_npiv_table->settings[i].npiv_wwnn,
2689 ECORE_WWN_SIZE);
2690 }
2691
2692 OSAL_VFREE(p_hwfn->p_dev, p_buf);
2693
2694 return ECORE_SUCCESS;
2695 }
2696
2697 enum _ecore_status_t
ecore_mcp_ov_update_mtu(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 mtu)2698 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2699 u16 mtu)
2700 {
2701 enum _ecore_status_t rc;
2702 u32 resp = 0, param = 0;
2703 u32 drv_mb_param;
2704
2705 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2706 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2707 drv_mb_param, &resp, ¶m);
2708 if (rc != ECORE_SUCCESS)
2709 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2710
2711 return rc;
2712 }
2713
2714 enum _ecore_status_t
ecore_mcp_ov_update_mac(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 * mac)2715 ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2716 u8 *mac)
2717 {
2718 struct ecore_mcp_mb_params mb_params;
2719 enum _ecore_status_t rc;
2720 u32 mfw_mac[2];
2721
2722 OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
2723 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2724 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2725 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2726 mb_params.param |= MCP_PF_ID(p_hwfn);
2727
2728 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2729 * in 32-bit granularity.
2730 * So the MAC has to be set in native order [and not byte order],
2731 * otherwise it would be read incorrectly by MFW after swap.
2732 */
2733 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2734 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2735
2736 mb_params.p_data_src = (u8 *)mfw_mac;
2737 mb_params.data_src_size = 8;
2738 rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2739 if (rc != ECORE_SUCCESS)
2740 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2741
2742 /* Store primary MAC for later possible WoL */
2743 OSAL_MEMCPY(p_hwfn->p_dev->wol_mac, mac, ETH_ALEN);
2744
2745 return rc;
2746 }
2747
2748 enum _ecore_status_t
ecore_mcp_ov_update_wol(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_wol wol)2749 ecore_mcp_ov_update_wol(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2750 enum ecore_ov_wol wol)
2751 {
2752 enum _ecore_status_t rc;
2753 u32 resp = 0, param = 0;
2754 u32 drv_mb_param;
2755
2756 if (p_hwfn->hw_info.b_wol_support == ECORE_WOL_SUPPORT_NONE) {
2757 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2758 "Can't change WoL configuration when WoL isn't supported\n");
2759 return ECORE_INVAL;
2760 }
2761
2762 switch (wol) {
2763 case ECORE_OV_WOL_DEFAULT:
2764 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2765 break;
2766 case ECORE_OV_WOL_DISABLED:
2767 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2768 break;
2769 case ECORE_OV_WOL_ENABLED:
2770 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2771 break;
2772 default:
2773 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2774 return ECORE_INVAL;
2775 }
2776
2777 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2778 drv_mb_param, &resp, ¶m);
2779 if (rc != ECORE_SUCCESS)
2780 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2781
2782 /* Store the WoL update for a future unload */
2783 p_hwfn->p_dev->wol_config = (u8)wol;
2784
2785 return rc;
2786 }
2787
2788 enum _ecore_status_t
ecore_mcp_ov_update_eswitch(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_ov_eswitch eswitch)2789 ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2790 enum ecore_ov_eswitch eswitch)
2791 {
2792 enum _ecore_status_t rc;
2793 u32 resp = 0, param = 0;
2794 u32 drv_mb_param;
2795
2796 switch (eswitch) {
2797 case ECORE_OV_ESWITCH_NONE:
2798 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2799 break;
2800 case ECORE_OV_ESWITCH_VEB:
2801 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2802 break;
2803 case ECORE_OV_ESWITCH_VEPA:
2804 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2805 break;
2806 default:
2807 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2808 return ECORE_INVAL;
2809 }
2810
2811 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2812 drv_mb_param, &resp, ¶m);
2813 if (rc != ECORE_SUCCESS)
2814 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2815
2816 return rc;
2817 }
2818
ecore_mcp_set_led(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,enum ecore_led_mode mode)2819 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
2820 struct ecore_ptt *p_ptt,
2821 enum ecore_led_mode mode)
2822 {
2823 u32 resp = 0, param = 0, drv_mb_param;
2824 enum _ecore_status_t rc;
2825
2826 switch (mode) {
2827 case ECORE_LED_MODE_ON:
2828 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2829 break;
2830 case ECORE_LED_MODE_OFF:
2831 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2832 break;
2833 case ECORE_LED_MODE_RESTORE:
2834 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2835 break;
2836 default:
2837 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
2838 return ECORE_INVAL;
2839 }
2840
2841 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2842 drv_mb_param, &resp, ¶m);
2843 if (rc != ECORE_SUCCESS)
2844 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2845
2846 return rc;
2847 }
2848
ecore_mcp_mask_parities(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 mask_parities)2849 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
2850 struct ecore_ptt *p_ptt,
2851 u32 mask_parities)
2852 {
2853 enum _ecore_status_t rc;
2854 u32 resp = 0, param = 0;
2855
2856 rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2857 mask_parities, &resp, ¶m);
2858
2859 if (rc != ECORE_SUCCESS) {
2860 DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n");
2861 } else if (resp != FW_MSG_CODE_OK) {
2862 DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n");
2863 rc = ECORE_INVAL;
2864 }
2865
2866 return rc;
2867 }
2868
ecore_mcp_nvm_read(struct ecore_dev * p_dev,u32 addr,u8 * p_buf,u32 len)2869 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
2870 u8 *p_buf, u32 len)
2871 {
2872 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2873 u32 bytes_left, offset, bytes_to_copy, buf_size;
2874 struct ecore_mcp_nvm_params params;
2875 struct ecore_ptt *p_ptt;
2876 enum _ecore_status_t rc = ECORE_SUCCESS;
2877
2878 p_ptt = ecore_ptt_acquire(p_hwfn);
2879 if (!p_ptt)
2880 return ECORE_BUSY;
2881
2882 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2883 bytes_left = len;
2884 offset = 0;
2885 params.type = ECORE_MCP_NVM_RD;
2886 params.nvm_rd.buf_size = &buf_size;
2887 params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
2888 while (bytes_left > 0) {
2889 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
2890 MCP_DRV_NVM_BUF_LEN);
2891 params.nvm_common.offset = (addr + offset) |
2892 (bytes_to_copy <<
2893 DRV_MB_PARAM_NVM_LEN_SHIFT);
2894 params.nvm_rd.buf = (u32 *)(p_buf + offset);
2895 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2896 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
2897 FW_MSG_CODE_NVM_OK)) {
2898 DP_NOTICE(p_dev, false, "MCP command rc = %d\n",
2899 rc);
2900 break;
2901 }
2902
2903 /* This can be a lengthy process, and it's possible scheduler
2904 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2905 */
2906 if (bytes_left % 0x1000 <
2907 (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
2908 OSAL_MSLEEP(1);
2909
2910 offset += *params.nvm_rd.buf_size;
2911 bytes_left -= *params.nvm_rd.buf_size;
2912 }
2913
2914 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2915 ecore_ptt_release(p_hwfn, p_ptt);
2916
2917 return rc;
2918 }
2919
ecore_mcp_phy_read(struct ecore_dev * p_dev,u32 cmd,u32 addr,u8 * p_buf,u32 len)2920 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
2921 u32 addr, u8 *p_buf, u32 len)
2922 {
2923 struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2924 struct ecore_mcp_nvm_params params;
2925 struct ecore_ptt *p_ptt;
2926 enum _ecore_status_t rc;
2927
2928 p_ptt = ecore_ptt_acquire(p_hwfn);
2929 if (!p_ptt)
2930 return ECORE_BUSY;
2931
2932 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params));
2933 params.type = ECORE_MCP_NVM_RD;
2934 params.nvm_rd.buf_size = &len;
2935 params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
2936 DRV_MSG_CODE_PHY_CORE_READ :
2937 DRV_MSG_CODE_PHY_RAW_READ;
2938 params.nvm_common.offset = addr;
2939 params.nvm_rd.buf = (u32 *)p_buf;
2940 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms);
2941 if (rc != ECORE_SUCCESS)
2942 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
2943
2944 p_dev->mcp_nvm_resp = params.nvm_common.resp;
2945 ecore_ptt_release(p_hwfn, p_ptt);
2946
2947 return rc;
2948 }
2949
ecore_mcp_nvm_resp(struct ecore_dev * p_dev,u8 * p_buf)2950