1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1,  (the "License").
26 
27 * You may not use this file except in compliance with the License.
28 
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31 
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35 
36 /* include the precompiled configuration values - only once */
37 #include "bcm_osal.h"
38 #include "ecore_hsi_common.h"
39 #include "ecore.h"
40 #include "ecore_hw.h"
41 #include "ecore_status.h"
42 #include "ecore_rt_defs.h"
43 #include "ecore_init_fw_funcs.h"
44 
45 #ifndef CONFIG_ECORE_BINARY_FW
46 #ifdef CONFIG_ECORE_ZIPPED_FW
47 #include "ecore_init_values_zipped.h"
48 #else
49 #include "ecore_init_values.h"
50 #endif
51 #endif
52 
53 #include "ecore_iro_values.h"
54 #include "ecore_sriov.h"
55 #include "ecore_gtt_values.h"
56 #include "reg_addr.h"
57 #include "ecore_init_ops.h"
58 
59 #define ECORE_INIT_MAX_POLL_COUNT	100
60 #define ECORE_INIT_POLL_PERIOD_US	500
61 
ecore_init_iro_array(struct ecore_dev * p_dev)62 void ecore_init_iro_array(struct ecore_dev *p_dev)
63 {
64 	p_dev->iro_arr = iro_arr;
65 }
66 
67 /* Runtime configuration helpers */
ecore_init_clear_rt_data(struct ecore_hwfn * p_hwfn)68 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
69 {
70 	int i;
71 
72 	for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
73 		p_hwfn->rt_data.b_valid[i] = false;
74 }
75 
ecore_init_store_rt_reg(struct ecore_hwfn * p_hwfn,u32 rt_offset,u32 val)76 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
77 			     u32 rt_offset, u32 val)
78 {
79 	p_hwfn->rt_data.init_val[rt_offset] = val;
80 	p_hwfn->rt_data.b_valid[rt_offset] = true;
81 }
82 
ecore_init_store_rt_agg(struct ecore_hwfn * p_hwfn,u32 rt_offset,u32 * p_val,osal_size_t size)83 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
84 			     u32 rt_offset, u32 *p_val,
85 			     osal_size_t size)
86 {
87 	osal_size_t i;
88 
89 	for (i = 0; i < size / sizeof(u32); i++) {
90 		p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
91 		p_hwfn->rt_data.b_valid[rt_offset + i] = true;
92 
93 	}
94 }
95 
ecore_init_rt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 addr,u16 rt_offset,u16 size,bool b_must_dmae)96 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
97 					  struct ecore_ptt *p_ptt,
98 					  u32 addr,
99 					  u16 rt_offset,
100 					  u16 size,
101 					  bool b_must_dmae)
102 {
103 	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
104 	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
105 	u16 i, segment;
106 	enum _ecore_status_t rc = ECORE_SUCCESS;
107 
108 	/* Since not all RT entries are initialized, go over the RT and
109 	 * for each segment of initialized values use DMA.
110 	 */
111 	for (i = 0; i < size; i++) {
112 		if (!p_valid[i])
113 			continue;
114 
115 		/* In case there isn't any wide-bus configuration here,
116 		 * simply write the data instead of using dmae.
117 		 */
118 		if (!b_must_dmae) {
119 			ecore_wr(p_hwfn, p_ptt, addr + (i << 2),
120 				 p_init_val[i]);
121 			continue;
122 		}
123 
124 		/* Start of a new segment */
125 		for (segment = 1; i + segment < size; segment++)
126 			if (!p_valid[i + segment])
127 				break;
128 
129 		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
130 					 (osal_uintptr_t)(p_init_val + i),
131 					 addr + (i << 2), segment, 0);
132 		if (rc != ECORE_SUCCESS)
133 			return rc;
134 
135 		/* Jump over the entire segment, including invalid entry */
136 		i += segment;
137 	}
138 
139 	return rc;
140 }
141 
ecore_init_alloc(struct ecore_hwfn * p_hwfn)142 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
143 {
144 	struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
145 
146 	if (IS_VF(p_hwfn->p_dev))
147 		return ECORE_SUCCESS;
148 
149 	rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
150 				       sizeof(bool) * RUNTIME_ARRAY_SIZE);
151 	if (!rt_data->b_valid)
152 		return ECORE_NOMEM;
153 
154 	rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
155 					sizeof(u32) * RUNTIME_ARRAY_SIZE);
156 	if (!rt_data->init_val) {
157 		OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
158 		rt_data->b_valid = OSAL_NULL;
159 		return ECORE_NOMEM;
160 	}
161 
162 	return ECORE_SUCCESS;
163 }
164 
ecore_init_free(struct ecore_hwfn * p_hwfn)165 void ecore_init_free(struct ecore_hwfn *p_hwfn)
166 {
167 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
168 	p_hwfn->rt_data.init_val = OSAL_NULL;
169 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
170 	p_hwfn->rt_data.b_valid = OSAL_NULL;
171 }
172 
ecore_init_array_dmae(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 addr,u32 dmae_data_offset,u32 size,const u32 * p_buf,bool b_must_dmae,bool b_can_dmae)173 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
174 				  struct ecore_ptt *p_ptt,
175 				  u32 addr, u32 dmae_data_offset,
176 				  u32 size, const u32 *p_buf,
177 				  bool b_must_dmae, bool b_can_dmae)
178 {
179 	enum _ecore_status_t rc	= ECORE_SUCCESS;
180 
181 	/* Perform DMAE only for lengthy enough sections or for wide-bus */
182 #ifndef ASIC_ONLY
183 	if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
184 	    !b_can_dmae || (!b_must_dmae && (size < 16))) {
185 #else
186 	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
187 #endif
188 		const u32 *data = p_buf + dmae_data_offset;
189 		u32 i;
190 
191 		for (i = 0; i < size; i++)
192 			ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
193 	} else {
194 	    rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
195 				     (osal_uintptr_t)(p_buf +
196 						      dmae_data_offset),
197 				     addr, size, 0);
198 	}
199 
200 	return rc;
201 }
202 
203 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
204 						 struct ecore_ptt *p_ptt,
205 						 u32 addr, u32 fill,
206 						 u32 fill_count)
207 {
208 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
209 
210 	OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
211 
212 	return ecore_dmae_host2grc(p_hwfn, p_ptt,
213 				   (osal_uintptr_t)(&(zero_buffer[0])),
214 				   addr, fill_count,
215 				   ECORE_DMAE_FLAG_RW_REPL_SRC);
216 }
217 
218 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
219 			    struct ecore_ptt *p_ptt,
220 			    u32 addr, u32 fill, u32 fill_count)
221 {
222 	u32 i;
223 
224 	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
225 		ecore_wr(p_hwfn, p_ptt, addr, fill);
226 }
227 
228 
229 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
230 						 struct ecore_ptt *p_ptt,
231 						 struct init_write_op *cmd,
232 						 bool b_must_dmae,
233 						 bool b_can_dmae)
234 {
235 	u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
236 	u32 data = OSAL_LE32_TO_CPU(cmd->data);
237 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
238 #ifdef CONFIG_ECORE_ZIPPED_FW
239 	u32 offset, output_len, input_len, max_size;
240 #endif
241 	struct ecore_dev *p_dev = p_hwfn->p_dev;
242 	union init_array_hdr *hdr;
243 	const u32 *array_data;
244 	enum _ecore_status_t rc = ECORE_SUCCESS;
245 	u32 size;
246 
247 	array_data = p_dev->fw_data->arr_data;
248 
249 	hdr = (union init_array_hdr *) (array_data +
250 					dmae_array_offset);
251 	data = OSAL_LE32_TO_CPU(hdr->raw.data);
252 	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
253 	case INIT_ARR_ZIPPED:
254 #ifdef CONFIG_ECORE_ZIPPED_FW
255 		offset = dmae_array_offset + 1;
256 		input_len = GET_FIELD(data,
257 				      INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
258 		max_size = MAX_ZIPPED_SIZE * 4;
259 		OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
260 
261 		output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
262 					     (u8 *)&array_data[offset],
263 					     max_size, (u8 *)p_hwfn->unzip_buf);
264 		if (output_len) {
265 			rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
266 						   output_len,
267 						   p_hwfn->unzip_buf,
268 						   b_must_dmae, b_can_dmae);
269 		} else {
270 			DP_NOTICE(p_hwfn, true,
271 				  "Failed to unzip dmae data\n");
272 			rc = ECORE_INVAL;
273 		}
274 #else
275 		DP_NOTICE(p_hwfn, true,
276 			  "Using zipped firmware without config enabled\n");
277 		rc = ECORE_INVAL;
278 #endif
279 		break;
280 	case INIT_ARR_PATTERN:
281 	{
282 		u32 repeats = GET_FIELD(data,
283 					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
284 		u32 i;
285 
286 		size = GET_FIELD(data,
287 				 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
288 
289 		for (i = 0; i < repeats; i++, addr += size << 2) {
290 			rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
291 						   dmae_array_offset + 1,
292 						   size, array_data,
293 						   b_must_dmae, b_can_dmae);
294 			if (rc)
295 				break;
296 		}
297 		break;
298 	}
299 	case INIT_ARR_STANDARD:
300 		size = GET_FIELD(data,
301 				 INIT_ARRAY_STANDARD_HDR_SIZE);
302 		rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
303 					   dmae_array_offset + 1,
304 					   size, array_data,
305 					   b_must_dmae, b_can_dmae);
306 		break;
307 	}
308 
309 	return rc;
310 }
311 
312 /* init_ops write command */
313 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
314 					      struct ecore_ptt *p_ptt,
315 					      struct init_write_op *p_cmd,
316 					      bool b_can_dmae)
317 {
318 	u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
319 	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
320 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
321 	enum _ecore_status_t rc	= ECORE_SUCCESS;
322 
323 	/* Sanitize */
324 	if (b_must_dmae && !b_can_dmae) {
325 		DP_NOTICE(p_hwfn, true,
326 			  "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
327 			  addr);
328 		return ECORE_INVAL;
329 	}
330 
331 	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
332 	case INIT_SRC_INLINE:
333 		data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
334 		ecore_wr(p_hwfn, p_ptt, addr, data);
335 		break;
336 	case INIT_SRC_ZEROS:
337 		data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
338 		if (b_must_dmae || (b_can_dmae && (data >= 64)))
339 			rc = ecore_init_fill_dmae(p_hwfn, p_ptt,
340 						  addr, 0, data);
341 		else
342 			ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
343 		break;
344 	case INIT_SRC_ARRAY:
345 		rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
346 					  b_must_dmae, b_can_dmae);
347 		break;
348 	case INIT_SRC_RUNTIME:
349 		ecore_init_rt(p_hwfn, p_ptt, addr,
350 			      OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
351 			      OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
352 			      b_must_dmae);
353 		break;
354 	}
355 
356 	return rc;
357 }
358 
359 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
360 {
361 	return (val == expected_val);
362 }
363 
364 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
365 {
366 	return (val & expected_val) == expected_val;
367 }
368 
369 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
370 {
371 	return (val | expected_val) > 0;
372 }
373 
374 /* init_ops read/poll commands */
375 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
376 			      struct ecore_ptt *p_ptt,
377 			      struct init_read_op *cmd)
378 {
379 	bool (*comp_check)(u32 val, u32 expected_val);
380 	u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
381 	u32 data, addr, poll;
382 	int i;
383 
384 	data = OSAL_LE32_TO_CPU(cmd->op_data);
385 	addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
386 	poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
387 
388 #ifndef ASIC_ONLY
389 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
390 		delay *= 100;
391 #endif
392 
393 	val = ecore_rd(p_hwfn, p_ptt, addr);
394 
395 	if (poll == INIT_POLL_NONE)
396 		return;
397 
398 	switch (poll) {
399 	case INIT_POLL_EQ:
400 		comp_check = comp_eq;
401 		break;
402 	case INIT_POLL_OR:
403 		comp_check = comp_or;
404 		break;
405 	case INIT_POLL_AND:
406 		comp_check = comp_and;
407 		break;
408 	default:
409 		DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
410 		       cmd->op_data);
411 		return;
412 	}
413 
414 	data = OSAL_LE32_TO_CPU(cmd->expected_val);
415 	for (i = 0;
416 	     i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data);
417 	     i++) {
418 		OSAL_UDELAY(delay);
419 		val = ecore_rd(p_hwfn, p_ptt, addr);
420 	}
421 
422 	if (i == ECORE_INIT_MAX_POLL_COUNT)
423 		DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
424 		       addr,
425 		       OSAL_LE32_TO_CPU(cmd->expected_val), val,
426 		       OSAL_LE32_TO_CPU(cmd->op_data));
427 }
428 
429 /* init_ops callbacks entry point */
430 static void ecore_init_cmd_cb(struct ecore_hwfn  *p_hwfn,
431 			      struct ecore_ptt   *p_ptt,
432 			      struct init_callback_op *p_cmd)
433 {
434 	DP_NOTICE(p_hwfn, true, "Currently init values have no need of callbacks\n");
435 }
436 
437 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
438 				    u16 *p_offset, int modes)
439 {
440 	struct ecore_dev *p_dev = p_hwfn->p_dev;
441 	const u8 *modes_tree_buf;
442 	u8 arg1, arg2, tree_val;
443 
444 	modes_tree_buf = p_dev->fw_data->modes_tree_buf;
445 	tree_val = modes_tree_buf[(*p_offset)++];
446 	switch(tree_val) {
447 	case INIT_MODE_OP_NOT:
448 		return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
449 	case INIT_MODE_OP_OR:
450 		arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
451 		arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
452 		return arg1 | arg2;
453 	case INIT_MODE_OP_AND:
454 		arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
455 		arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
456 		return arg1 & arg2;
457 	default:
458 		tree_val -= MAX_INIT_MODE_OPS;
459 		return (modes & (1 << tree_val)) ? 1 : 0;
460 	}
461 }
462 
463 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
464 			       struct init_if_mode_op *p_cmd, int modes)
465 {
466 	u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
467 
468 	if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
469 		return 0;
470 	else
471 		return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
472 				 INIT_IF_MODE_OP_CMD_OFFSET);
473 }
474 
475 static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
476 				struct init_if_phase_op *p_cmd,
477 				u32 phase, u32 phase_id)
478 {
479 	u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
480 
481 	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
482 	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
483 	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
484 		return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
485 				 INIT_IF_PHASE_OP_CMD_OFFSET);
486 	else
487 		return 0;
488 }
489 
490 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
491 				    struct ecore_ptt *p_ptt,
492 				    int phase,
493 				    int phase_id,
494 				    int modes)
495 {
496 	struct ecore_dev *p_dev = p_hwfn->p_dev;
497 	u32 cmd_num, num_init_ops;
498 	union init_op *init_ops;
499 	bool b_dmae = false;
500 	enum _ecore_status_t rc = ECORE_SUCCESS;
501 
502 	num_init_ops = p_dev->fw_data->init_ops_size;
503 	init_ops = p_dev->fw_data->init_ops;
504 
505 #ifdef CONFIG_ECORE_ZIPPED_FW
506 	p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
507 					MAX_ZIPPED_SIZE * 4);
508 	if (!p_hwfn->unzip_buf) {
509 		DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
510 		return ECORE_NOMEM;
511 	}
512 #endif
513 
514 	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
515 		union init_op *cmd = &init_ops[cmd_num];
516 		u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
517 
518 		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
519 		case INIT_OP_WRITE:
520 			rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
521 					       b_dmae);
522 			break;
523 
524 		case INIT_OP_READ:
525 			ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
526 			break;
527 
528 		case INIT_OP_IF_MODE:
529 			cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
530 						       modes);
531 			break;
532 		case INIT_OP_IF_PHASE:
533 			cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
534 							phase, phase_id);
535 			b_dmae = GET_FIELD(data,
536 					   INIT_IF_PHASE_OP_DMAE_ENABLE);
537 			break;
538 		case INIT_OP_DELAY:
539 			/* ecore_init_run is always invoked from
540 			 * sleep-able context
541 			 */
542 			OSAL_UDELAY(cmd->delay.delay);
543 			break;
544 
545 		case INIT_OP_CALLBACK:
546 			ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
547 			break;
548 		}
549 
550 		if (rc)
551 			break;
552 	}
553 #ifdef CONFIG_ECORE_ZIPPED_FW
554 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
555 	p_hwfn->unzip_buf = OSAL_NULL;
556 #endif
557 	return rc;
558 }
559 
560 void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
561 {
562 	u32 gtt_base;
563 	u32 i;
564 
565 #ifndef ASIC_ONLY
566 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
567 		/* This is done by MFW on ASIC; regardless, this should only
568 		 * be done once per chip [i.e., common]. Implementation is
569 		 * not too bright, but it should work on the simple FPGA/EMUL
570 		 * scenarios.
571 		 */
572 		static bool initialized = false;
573 		int poll_cnt = 500;
574 		u32 val;
575 
576 		/* initialize PTT/GTT (poll for completion) */
577 		if (!initialized) {
578 			ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
579 				 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
580 			initialized = true;
581 		}
582 
583 		do {
584 			/* ptt might be overrided by HW until this is done */
585 			OSAL_UDELAY(10);
586 			ecore_ptt_invalidate(p_hwfn);
587 			val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
588 				       PGLUE_B_REG_INIT_DONE_PTT_GTT);
589 		} while ((val != 1) && --poll_cnt);
590 
591 		if (!poll_cnt)
592 			DP_ERR(p_hwfn, "PGLUE_B_REG_INIT_DONE didn't complete\n");
593 	}
594 #endif
595 
596 	/* Set the global windows */
597 	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
598 
599 	for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
600 		if (pxp_global_win[i])
601 			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
602 			       pxp_global_win[i]);
603 }
604 
605 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
606 					const u8 *data)
607 {
608 	struct ecore_fw_data *fw = p_dev->fw_data;
609 
610 #ifdef CONFIG_ECORE_BINARY_FW
611 	struct bin_buffer_hdr *buf_hdr;
612 	u32 offset, len;
613 
614 	if (!data) {
615 		DP_NOTICE(p_dev, true, "Invalid fw data\n");
616 		return ECORE_INVAL;
617 	}
618 
619 	buf_hdr = (struct bin_buffer_hdr *)data;
620 
621 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
622 	fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
623 
624 	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
625 	fw->init_ops = (union init_op *)(data + offset);
626 
627 	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
628 	fw->arr_data = (u32 *)(data + offset);
629 
630 	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
631 	fw->modes_tree_buf = (u8 *)(data + offset);
632 	len = buf_hdr[BIN_BUF_INIT_CMD].length;
633 	fw->init_ops_size = len / sizeof(struct init_raw_op);
634 #else
635 	fw->init_ops = (union init_op *)init_ops;
636 	fw->arr_data = (u32 *)init_val;
637 	fw->modes_tree_buf = (u8 *)modes_tree_buf;
638 	fw->init_ops_size = init_ops_size;
639 #endif
640 
641 	return ECORE_SUCCESS;
642 }
643