1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4/T5/T6 Ethernet driver.
14  *
15  * Copyright (C) 2003-2019 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 #include "common.h"
24 #include "t4_regs.h"
25 #include "t4_regs_values.h"
26 #include "t4fw_interface.h"
27 
28 /**
29  *	t4_wait_op_done_val - wait until an operation is completed
30  *	@adapter: the adapter performing the operation
31  *	@reg: the register to check for completion
32  *	@mask: a single-bit field within @reg that indicates completion
33  *	@polarity: the value of the field when the operation is completed
34  *	@attempts: number of check iterations
35  *	@delay: delay in usecs between iterations
36  *	@valp: where to store the value of the register at completion time
37  *
38  *	Wait until an operation is completed by checking a bit in a register
39  *	up to @attempts times.  If @valp is not NULL the value of the register
40  *	at the time it indicated completion is stored there.  Returns 0 if the
41  *	operation completes and	-EAGAIN	otherwise.
42  */
43 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
44 			       int polarity, int attempts, int delay, u32 *valp)
45 {
46 	while (1) {
47 		u32 val = t4_read_reg(adapter, reg);
48 
49 		if (!!(val & mask) == polarity) {
50 			if (valp)
51 				*valp = val;
52 			return 0;
53 		}
54 		if (--attempts == 0)
55 			return -EAGAIN;
56 		if (delay)
57 			udelay(delay);
58 	}
59 }
60 
61 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
62 				  int polarity, int attempts, int delay)
63 {
64 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
65 				   delay, NULL);
66 }
67 
68 /**
69  *	t4_set_reg_field - set a register field to a value
70  *	@adapter: the adapter to program
71  *	@addr: the register address
72  *	@mask: specifies the portion of the register to modify
73  *	@val: the new value for the register field
74  *
75  *	Sets a register field specified by the supplied mask to the
76  *	given value.
77  */
78 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
79 		      u32 val)
80 {
81 	u32 v = t4_read_reg(adapter, addr) & ~mask;
82 
83 	t4_write_reg(adapter, addr, v | val);
84 	(void) t4_read_reg(adapter, addr);      /* flush */
85 }
86 
87 /**
88  *	t4_read_indirect - read indirectly addressed registers
89  *	@adap: the adapter
90  *	@addr_reg: register holding the indirect address
91  *	@data_reg: register holding the value of the indirect register
92  *	@vals: where the read register values are stored
93  *	@nregs: how many indirect registers to read
94  *	@start_idx: index of first indirect register to read
95  *
96  *	Reads registers that are accessed indirectly through an address/data
97  *	register pair.
98  */
99 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
100 			     unsigned int data_reg, u32 *vals,
101 			     unsigned int nregs, unsigned int start_idx)
102 {
103 	while (nregs--) {
104 		t4_write_reg(adap, addr_reg, start_idx);
105 		*vals++ = t4_read_reg(adap, data_reg);
106 		start_idx++;
107 	}
108 }
109 
110 /**
111  *	t4_write_indirect - write indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect addresses
114  *	@data_reg: register holding the value for the indirect registers
115  *	@vals: values to write
116  *	@nregs: how many indirect registers to write
117  *	@start_idx: address of first indirect register to write
118  *
119  *	Writes a sequential block of registers that are accessed indirectly
120  *	through an address/data register pair.
121  */
122 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
123 		       unsigned int data_reg, const u32 *vals,
124 		       unsigned int nregs, unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t4_write_reg(adap, addr_reg, start_idx++);
128 		t4_write_reg(adap, data_reg, *vals++);
129 	}
130 }
131 
132 /*
133  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
134  * mechanism.  This guarantees that we get the real value even if we're
135  * operating within a Virtual Machine and the Hypervisor is trapping our
136  * Configuration Space accesses.
137  *
138  * N.B. This routine should only be used as a last resort: the firmware uses
139  *      the backdoor registers on a regular basis and we can end up
140  *      conflicting with it's uses!
141  */
142 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
143 {
144 	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
145 
146 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
147 		req |= F_ENABLE;
148 	else
149 		req |= F_T6_ENABLE;
150 
151 	if (is_t4(adap->params.chip))
152 		req |= F_LOCALCFG;
153 
154 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
155 	*val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
156 
157 	/* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
158 	 * Configuration Space read.  (None of the other fields matter when
159 	 * F_ENABLE is 0 so a simple register write is easier than a
160 	 * read-modify-write via t4_set_reg_field().)
161 	 */
162 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
163 }
164 
165 /*
166  * t4_report_fw_error - report firmware error
167  * @adap: the adapter
168  *
169  * The adapter firmware can indicate error conditions to the host.
170  * If the firmware has indicated an error, print out the reason for
171  * the firmware error.
172  */
173 static void t4_report_fw_error(struct adapter *adap)
174 {
175 	static const char *const reason[] = {
176 		"Crash",			/* PCIE_FW_EVAL_CRASH */
177 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
178 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
179 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
180 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
181 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
182 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
183 		"Reserved",			/* reserved */
184 	};
185 	u32 pcie_fw;
186 
187 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
188 	if (pcie_fw & F_PCIE_FW_ERR) {
189 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
190 			reason[G_PCIE_FW_EVAL(pcie_fw)]);
191 		adap->flags &= ~FW_OK;
192 	}
193 }
194 
195 /*
196  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
197  */
198 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
199 			 u32 mbox_addr)
200 {
201 	for ( ; nflit; nflit--, mbox_addr += 8)
202 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
203 }
204 
205 /*
206  * Handle a FW assertion reported in a mailbox.
207  */
208 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
209 {
210 	CH_ALERT(adap,
211 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
212 		  asrt->u.assert.filename_0_7,
213 		  be32_to_cpu(asrt->u.assert.line),
214 		  be32_to_cpu(asrt->u.assert.x),
215 		  be32_to_cpu(asrt->u.assert.y));
216 }
217 
218 #define X_CIM_PF_NOACCESS 0xeeeeeeee
219 
220 /*
221  * If the Host OS Driver needs locking arround accesses to the mailbox, this
222  * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
223  */
224 /* makes single-statement usage a bit cleaner ... */
225 #ifdef T4_OS_NEEDS_MBOX_LOCKING
226 #define T4_OS_MBOX_LOCKING(x) x
227 #else
228 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
229 #endif
230 
231 /*
232  * If the OS Driver wants busy waits to keep a watchdog happy, tap it during
233  * busy loops which don't sleep.
234  */
235 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG
236 #define T4_OS_TOUCH_NMI_WATCHDOG()	t4_os_touch_nmi_watchdog()
237 #else
238 #define T4_OS_TOUCH_NMI_WATCHDOG()
239 #endif
240 
241 #ifdef T4_OS_LOG_MBOX_CMDS
242 /**
243  *	t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
244  *	@adapter: the adapter
245  *	@cmd: the Firmware Mailbox Command or Reply
246  *	@size: command length in bytes
247  *	@access: the time (ms) needed to access the Firmware Mailbox
248  *	@execute: the time (ms) the command spent being executed
249  */
250 static void t4_record_mbox(struct adapter *adapter,
251 			   const __be64 *cmd, unsigned int size,
252 			   int access, int execute)
253 {
254 	struct mbox_cmd_log *log = adapter->mbox_log;
255 	struct mbox_cmd *entry;
256 	int i;
257 
258 	entry = mbox_cmd_log_entry(log, log->cursor++);
259 	if (log->cursor == log->size)
260 		log->cursor = 0;
261 
262 	for (i = 0; i < size/8; i++)
263 		entry->cmd[i] = be64_to_cpu(cmd[i]);
264 	while (i < MBOX_LEN/8)
265 		entry->cmd[i++] = 0;
266 	entry->timestamp = t4_os_timestamp();
267 	entry->seqno = log->seqno++;
268 	entry->access = access;
269 	entry->execute = execute;
270 }
271 
272 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
273 	t4_record_mbox(__adapter, __cmd, __size, __access, __execute)
274 
275 #else /* !T4_OS_LOG_MBOX_CMDS */
276 
277 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
278 	/* nothing */
279 
280 #endif /* !T4_OS_LOG_MBOX_CMDS */
281 
282 /**
283  *	t4_record_mbox_marker - record a marker in the mailbox log
284  *	@adapter: the adapter
285  *	@marker: byte array marker
286  *	@size: marker size in bytes
287  *
288  *	We inject a "fake mailbox command" into the Firmware Mailbox Log
289  *	using a known command token and then the bytes of the specified
290  *	marker.  This lets debugging code inject markers into the log to
291  *	help identify which commands are in response to higher level code.
292  */
293 void t4_record_mbox_marker(struct adapter *adapter,
294 			   const void *marker, unsigned int size)
295 {
296 #ifdef T4_OS_LOG_MBOX_CMDS
297 	__be64 marker_cmd[MBOX_LEN/8];
298 	const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64);
299 	unsigned int marker_cmd_size;
300 
301 	if (size > max_marker)
302 		size = max_marker;
303 
304 	marker_cmd[0] = cpu_to_be64(~0LLU);
305 	memcpy(&marker_cmd[1], marker, size);
306 	memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size);
307 	marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64));
308 
309 	t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0);
310 #endif /* T4_OS_LOG_MBOX_CMDS */
311 }
312 
313 /*
314  * Delay time in microseconds to wait for mailbox access/fw reply
315  * to mailbox command
316  */
317 #define MIN_MBOX_CMD_DELAY 900
318 #define MBOX_CMD_DELAY 1000
319 
320 /**
321  *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
322  *	@adap: the adapter
323  *	@mbox: index of the mailbox to use
324  *	@cmd: the command to write
325  *	@size: command length in bytes
326  *	@rpl: where to optionally store the reply
327  *	@sleep_ok: if true we may sleep while awaiting command completion
328  *	@timeout: time to wait for command to finish before timing out
329  *		(negative implies @sleep_ok=false)
330  *
331  *	Sends the given command to FW through the selected mailbox and waits
332  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
333  *	store the FW's reply to the command.  The command and its optional
334  *	reply are of the same length.  Some FW commands like RESET and
335  *	INITIALIZE can take a considerable amount of time to execute.
336  *	@sleep_ok determines whether we may sleep while awaiting the response.
337  *	If sleeping is allowed we use progressive backoff otherwise we spin.
338  *	Note that passing in a negative @timeout is an alternate mechanism
339  *	for specifying @sleep_ok=false.  This is useful when a higher level
340  *	interface allows for specification of @timeout but not @sleep_ok ...
341  *
342  *	The return value is 0 on success or a negative errno on failure.  A
343  *	failure can happen either because we are not able to execute the
344  *	command or FW executes it but signals an error.  In the latter case
345  *	the return value is the error code indicated by FW (negated).
346  */
347 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
348 			    int size, void *rpl, bool sleep_ok, int timeout)
349 {
350 #ifdef T4_OS_NEEDS_MBOX_LOCKING
351 	u16 access = 0;
352 #endif
353 	u32 v;
354 	u64 res;
355 	int i, ret;
356 	const __be64 *p = cmd;
357 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
358 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
359 	u32 ctl;
360 	__be64 cmd_rpl[MBOX_LEN/8];
361 	T4_OS_MBOX_LOCKING(t4_os_list_t entry);
362 	u32 pcie_fw;
363 
364 	if ((size & 15) || size > MBOX_LEN)
365 		return -EINVAL;
366 
367 	/*
368 	 * If we have a negative timeout, that implies that we can't sleep.
369 	 */
370 	if (timeout < 0) {
371 		sleep_ok = false;
372 		timeout = -timeout;
373 	}
374 
375 #ifdef T4_OS_NEEDS_MBOX_LOCKING
376 	/*
377 	 * Queue ourselves onto the mailbox access list.  When our entry is at
378 	 * the front of the list, we have rights to access the mailbox.  So we
379 	 * wait [for a while] till we're at the front [or bail out with an
380 	 * EBUSY] ...
381 	 */
382 	t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
383 
384 	for (i = 0; ; i++) {
385 		/*
386 		 * If we've waited too long, return a busy indication.  This
387 		 * really ought to be based on our initial position in the
388 		 * mailbox access list but this is a start.  We very rarely
389 		 * contend on access to the mailbox ...  Also check for a
390 		 * firmware error which we'll report as a device error.
391 		 */
392 		pcie_fw = t4_read_reg(adap, A_PCIE_FW);
393 		if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) {
394 			t4_os_atomic_list_del(&entry, &adap->mbox_lock);
395 			t4_report_fw_error(adap);
396 			ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
397 			T4_RECORD_MBOX(adap, cmd, size, ret, 0);
398 			return ret;
399 		}
400 
401 		/*
402 		 * If we're at the head, break out and start the mailbox
403 		 * protocol.
404 		 */
405 		if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
406 			break;
407 
408 		/*
409 		 * Delay for a bit before checking again ...
410 		 */
411 		if (sleep_ok) {
412 			usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
413 		} else {
414 			T4_OS_TOUCH_NMI_WATCHDOG();
415 			udelay(MBOX_CMD_DELAY);
416 		}
417 	}
418 	access = i;
419 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
420 
421 	/*
422 	 * Attempt to gain access to the mailbox.
423 	 */
424 	for (i = 0; i < 4; i++) {
425 		ctl = t4_read_reg(adap, ctl_reg);
426 		v = G_MBOWNER(ctl);
427 		if (v != X_MBOWNER_NONE)
428 			break;
429 	}
430 
431 	/*
432 	 * If we were unable to gain access, dequeue ourselves from the
433 	 * mailbox atomic access list and report the error to our caller.
434 	 */
435 	if (v != X_MBOWNER_PL) {
436 		T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
437 							 &adap->mbox_lock));
438 		t4_report_fw_error(adap);
439 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
440 		T4_RECORD_MBOX(adap, cmd, size, access, ret);
441 		return ret;
442 	}
443 
444 	/*
445 	 * If we gain ownership of the mailbox and there's a "valid" message
446 	 * in it, this is likely an asynchronous error message from the
447 	 * firmware.  So we'll report that and then proceed on with attempting
448 	 * to issue our own command ... which may well fail if the error
449 	 * presaged the firmware crashing ...
450 	 */
451 	if (ctl & F_MBMSGVALID) {
452 		CH_ERR(adap, "found VALID command in mbox %u: "
453 		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
454 		       (unsigned long long)t4_read_reg64(adap, data_reg),
455 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
456 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
457 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
458 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
459 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
460 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
461 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
462 	}
463 
464 	/*
465 	 * Copy in the new mailbox command and send it on its way ...
466 	 */
467 	T4_RECORD_MBOX(adap, cmd, size, access, 0);
468 	for (i = 0; i < size; i += 8, p++)
469 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
470 
471 	/*
472 	 * XXX It's not clear that we need this anymore now
473 	 * XXX that we have mailbox logging ...
474 	 */
475 	CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
476 
477 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
478 	(void) t4_read_reg(adap, ctl_reg);	/* flush write */
479 
480 	/*
481 	 * Loop waiting for the reply; bail out if we time out or the firmware
482 	 * reports an error.
483 	 */
484 	for (i = 0;
485 	     !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
486 	     i < timeout;
487 	     i++) {
488 		if (sleep_ok) {
489 			usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
490 		} else {
491 			T4_OS_TOUCH_NMI_WATCHDOG();
492 			udelay(MBOX_CMD_DELAY);
493 		}
494 
495 		v = t4_read_reg(adap, ctl_reg);
496 		if (v == X_CIM_PF_NOACCESS)
497 			continue;
498 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
499 			if (!(v & F_MBMSGVALID)) {
500 				t4_write_reg(adap, ctl_reg,
501 					     V_MBOWNER(X_MBOWNER_NONE));
502 				continue;
503 			}
504 
505 			/*
506 			 * Retrieve the command reply and release the mailbox.
507 			 */
508 			get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
509 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
510 			T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
511 								 &adap->mbox_lock));
512 
513 			T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1);
514 
515 			/*
516 			 * XXX It's not clear that we need this anymore now
517 			 * XXX that we have mailbox logging ...
518 			 */
519 			CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
520 			CH_MSG(adap, INFO, HW,
521 			       "command completed in %d ms (%ssleeping)\n",
522 			       i + 1, sleep_ok ? "" : "non-");
523 
524 			res = be64_to_cpu(cmd_rpl[0]);
525 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
526 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
527 				res = V_FW_CMD_RETVAL(EIO);
528 			} else if (rpl)
529 				memcpy(rpl, cmd_rpl, size);
530 			return -G_FW_CMD_RETVAL((int)res);
531 		}
532 	}
533 
534 	/*
535 	 * We timed out waiting for a reply to our mailbox command.  Report
536 	 * the error and also check to see if the firmware reported any
537 	 * errors ...
538 	 */
539 	T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, &adap->mbox_lock));
540 
541 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
542 	T4_RECORD_MBOX(adap, cmd, size, access, ret);
543 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
544 	       *(const u8 *)cmd, mbox);
545 
546 	t4_report_fw_error(adap);
547 	t4_fatal_err(adap);
548 	return ret;
549 }
550 
551 #ifdef CONFIG_CUDBG
552 /*
553  * The maximum number of times to iterate for FW reply before
554  * issuing a mailbox timeout
555  */
556 #define FW_REPLY_WAIT_LOOP 6000000
557 
558 /**
559  *	t4_wr_mbox_meat_timeout_panic - send a command to FW through the given
560  *	mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout()
561  *	and is only invoked during a kernel crash. Since this function is
562  *	called through a atomic notifier chain ,we cannot sleep awaiting a
563  *	response from FW, hence repeatedly loop until we get a reply.
564  *
565  *	@adap: the adapter
566  *	@mbox: index of the mailbox to use
567  *	@cmd: the command to write
568  *	@size: command length in bytes
569  *	@rpl: where to optionally store the reply
570  */
571 
572 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox,
573 			    const void *cmd, int size, void *rpl)
574 {
575 	u32 v;
576 	u64 res;
577 	int i, ret;
578 	u64 cnt;
579 	const __be64 *p = cmd;
580 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
581 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
582 	u32 ctl;
583 	__be64 cmd_rpl[MBOX_LEN/8];
584 	u32 pcie_fw;
585 
586 	if ((size & 15) || size > MBOX_LEN)
587 		return -EINVAL;
588 
589 	/*
590 	 * Check for a firmware error which we'll report as a
591 	 * device error.
592 	 */
593 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
594 	if (pcie_fw & F_PCIE_FW_ERR) {
595 		t4_report_fw_error(adap);
596 		ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
597 		return ret;
598 	}
599 
600 	/*
601 	 * Attempt to gain access to the mailbox.
602 	 */
603 	for (i = 0; i < 4; i++) {
604 		ctl = t4_read_reg(adap, ctl_reg);
605 		v = G_MBOWNER(ctl);
606 		if (v != X_MBOWNER_NONE)
607 			break;
608 	}
609 
610 	/*
611 	 * If we were unable to gain access, report the error to our caller.
612 	 */
613 	if (v != X_MBOWNER_PL) {
614 		t4_report_fw_error(adap);
615 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
616 		return ret;
617 	}
618 
619 	/*
620 	 * If we gain ownership of the mailbox and there's a "valid" message
621 	 * in it, this is likely an asynchronous error message from the
622 	 * firmware.  So we'll report that and then proceed on with attempting
623 	 * to issue our own command ... which may well fail if the error
624 	 * presaged the firmware crashing ...
625 	 */
626 	if (ctl & F_MBMSGVALID) {
627 		CH_ERR(adap, "found VALID command in mbox %u: "
628 		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
629 		       (unsigned long long)t4_read_reg64(adap, data_reg),
630 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
631 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
632 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
633 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
634 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
635 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
636 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
637 	}
638 
639 	/*
640 	 * Copy in the new mailbox command and send it on its way ...
641 	 */
642 	for (i = 0; i < size; i += 8, p++)
643 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
644 
645 	CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
646 
647 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
648 	t4_read_reg(adap, ctl_reg);	/* flush write */
649 
650 	/*
651 	 * Loop waiting for the reply; bail out if we time out or the firmware
652 	 * reports an error.
653 	 */
654 	for (cnt = 0;
655 	    !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
656 	    cnt < FW_REPLY_WAIT_LOOP;
657 	    cnt++) {
658 		v = t4_read_reg(adap, ctl_reg);
659 		if (v == X_CIM_PF_NOACCESS)
660 			continue;
661 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
662 			if (!(v & F_MBMSGVALID)) {
663 				t4_write_reg(adap, ctl_reg,
664 					     V_MBOWNER(X_MBOWNER_NONE));
665 				continue;
666 			}
667 
668 			/*
669 			 * Retrieve the command reply and release the mailbox.
670 			 */
671 			get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
672 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
673 
674 			CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
675 
676 			res = be64_to_cpu(cmd_rpl[0]);
677 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
678 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
679 				res = V_FW_CMD_RETVAL(EIO);
680 			} else if (rpl)
681 				memcpy(rpl, cmd_rpl, size);
682 			return -G_FW_CMD_RETVAL((int)res);
683 		}
684 	}
685 
686 	/*
687 	 * We timed out waiting for a reply to our mailbox command.  Report
688 	 * the error and also check to see if the firmware reported any
689 	 * errors ...
690 	 */
691 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
692 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
693 	       *(const u8 *)cmd, mbox);
694 
695 	t4_report_fw_error(adap);
696 	t4_fatal_err(adap);
697 	return ret;
698 }
699 #endif
700 
701 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
702 		    void *rpl, bool sleep_ok)
703 {
704 #ifdef CONFIG_CUDBG
705 	if (adap->flags & K_CRASH)
706 		return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size,
707 						     rpl);
708 	else
709 #endif
710 		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
711 					       sleep_ok, FW_CMD_MAX_TIMEOUT);
712 
713 }
714 
715 static int t4_edc_err_read(struct adapter *adap, int idx)
716 {
717 	u32 edc_ecc_err_addr_reg;
718 	u32 edc_bist_status_rdata_reg;
719 
720 	if (is_t4(adap->params.chip)) {
721 		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
722 		return 0;
723 	}
724 	if (idx != MEM_EDC0 && idx != MEM_EDC1) {
725 		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
726 		return 0;
727 	}
728 
729 	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
730 	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
731 
732 	CH_WARN(adap,
733 		"edc%d err addr 0x%x: 0x%x.\n",
734 		idx, edc_ecc_err_addr_reg,
735 		t4_read_reg(adap, edc_ecc_err_addr_reg));
736 	CH_WARN(adap,
737 	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
738 		edc_bist_status_rdata_reg,
739 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
740 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
741 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
742 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
743 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
744 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
745 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
746 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
747 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
748 
749 	return 0;
750 }
751 
752 /**
753  *	t4_memory_rw_addr - read/write adapter memory via PCIE memory window
754  *	@adap: the adapter
755  *	@win: PCI-E Memory Window to use
756  *	@addr: address within adapter memory
757  *	@len: amount of memory to transfer
758  *	@hbuf: host memory buffer
759  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
760  *
761  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
762  *	firmware memory address and host buffer must be aligned on 32-bit
763  *	boudaries; the length may be arbitrary.
764  *
765  *	NOTES:
766  *	 1. The memory is transferred as a raw byte sequence from/to the
767  *	    firmware's memory.  If this memory contains data structures which
768  *	    contain multi-byte integers, it's the caller's responsibility to
769  *	    perform appropriate byte order conversions.
770  *
771  *	 2. It is the Caller's responsibility to ensure that no other code
772  *	    uses the specified PCI-E Memory Window while this routine is
773  *	    using it.  This is typically done via the use of OS-specific
774  *	    locks, etc.
775  */
776 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
777 		      u32 len, void *hbuf, int dir)
778 {
779 	u32 pos, offset, resid;
780 	u32 win_pf, mem_reg, mem_aperture, mem_base;
781 	u32 *buf;
782 
783 	/* Argument sanity checks ...
784 	 */
785 	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
786 		return -EINVAL;
787 	buf = (u32 *)hbuf;
788 
789 	/* It's convenient to be able to handle lengths which aren't a
790 	 * multiple of 32-bits because we often end up transferring files to
791 	 * the firmware.  So we'll handle that by normalizing the length here
792 	 * and then handling any residual transfer at the end.
793 	 */
794 	resid = len & 0x3;
795 	len -= resid;
796 
797 	/* Each PCI-E Memory Window is programmed with a window size -- or
798 	 * "aperture" -- which controls the granularity of its mapping onto
799 	 * adapter memory.  We need to grab that aperture in order to know
800 	 * how to use the specified window.  The window is also programmed
801 	 * with the base address of the Memory Window in BAR0's address
802 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
803 	 * the address is relative to BAR0.
804 	 */
805 	mem_reg = t4_read_reg(adap,
806 			      PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
807 						  win));
808 
809 	/* a dead adapter will return 0xffffffff for PIO reads */
810 	if (mem_reg == 0xffffffff) {
811 		CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n",
812 			win);
813 		return -ENXIO;
814 	}
815 
816 	mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
817 	mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
818 	if (is_t4(adap->params.chip))
819 		mem_base -= adap->t4_bar0;
820 	win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
821 
822 	/* Calculate our initial PCI-E Memory Window Position and Offset into
823 	 * that Window.
824 	 */
825 	pos = addr & ~(mem_aperture-1);
826 	offset = addr - pos;
827 
828 	/* Set up initial PCI-E Memory Window to cover the start of our
829 	 * transfer.  (Read it back to ensure that changes propagate before we
830 	 * attempt to use the new value.)
831 	 */
832 	t4_write_reg(adap,
833 		     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
834 		     pos | win_pf);
835 	t4_read_reg(adap,
836 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
837 
838 	/* Transfer data to/from the adapter as long as there's an integral
839 	 * number of 32-bit transfers to complete.
840 	 *
841 	 * A note on Endianness issues:
842 	 *
843 	 * The "register" reads and writes below from/to the PCI-E Memory
844 	 * Window invoke the standard adapter Big-Endian to PCI-E Link
845 	 * Little-Endian "swizzel."  As a result, if we have the following
846 	 * data in adapter memory:
847 	 *
848 	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
849 	 *     Address:      i+0  i+1  i+2  i+3
850 	 *
851 	 * Then a read of the adapter memory via the PCI-E Memory Window
852 	 * will yield:
853 	 *
854 	 *     x = readl(i)
855 	 *	   31                  0
856 	 *         [ b3 | b2 | b1 | b0 ]
857 	 *
858 	 * If this value is stored into local memory on a Little-Endian system
859 	 * it will show up correctly in local memory as:
860 	 *
861 	 *     ( ..., b0, b1, b2, b3, ... )
862 	 *
863 	 * But on a Big-Endian system, the store will show up in memory
864 	 * incorrectly swizzled as:
865 	 *
866 	 *     ( ..., b3, b2, b1, b0, ... )
867 	 *
868 	 * So we need to account for this in the reads and writes to the
869 	 * PCI-E Memory Window below by undoing the register read/write
870 	 * swizzels.
871 	 */
872 	while (len > 0) {
873 		if (dir == T4_MEMORY_READ)
874 			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
875 						mem_base + offset));
876 		else
877 			t4_write_reg(adap, mem_base + offset,
878 				     (__force u32)cpu_to_le32(*buf++));
879 		offset += sizeof(__be32);
880 		len -= sizeof(__be32);
881 
882 		/* If we've reached the end of our current window aperture,
883 		 * move the PCI-E Memory Window on to the next.  Note that
884 		 * doing this here after "len" may be 0 allows us to set up
885 		 * the PCI-E Memory Window for a possible final residual
886 		 * transfer below ...
887 		 */
888 		if (offset == mem_aperture) {
889 			pos += mem_aperture;
890 			offset = 0;
891 			t4_write_reg(adap,
892 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
893 						    win), pos | win_pf);
894 			t4_read_reg(adap,
895 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
896 						    win));
897 		}
898 	}
899 
900 	/* If the original transfer had a length which wasn't a multiple of
901 	 * 32-bits, now's where we need to finish off the transfer of the
902 	 * residual amount.  The PCI-E Memory Window has already been moved
903 	 * above (if necessary) to cover this final transfer.
904 	 */
905 	if (resid) {
906 		union {
907 			u32 word;
908 			char byte[4];
909 		} last;
910 		unsigned char *bp;
911 		int i;
912 
913 		if (dir == T4_MEMORY_READ) {
914 			last.word = le32_to_cpu(
915 					(__force __le32)t4_read_reg(adap,
916 						mem_base + offset));
917 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
918 				bp[i] = last.byte[i];
919 		} else {
920 			last.word = *buf;
921 			for (i = resid; i < 4; i++)
922 				last.byte[i] = 0;
923 			t4_write_reg(adap, mem_base + offset,
924 				     (__force u32)cpu_to_le32(last.word));
925 		}
926 	}
927 
928 	return 0;
929 }
930 
931 /**
932  *	t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window
933  *	@adap: the adapter
934  *	@win: PCI-E Memory Window to use
935  *	@mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
936  *	@maddr: address within indicated memory type
937  *	@len: amount of memory to transfer
938  *	@hbuf: host memory buffer
939  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
940  *
941  *	Reads/writes adapter memory using t4_memory_rw_addr().  This routine
942  *	provides an (memory type, address withing memory type) interface.
943  */
944 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
945 		       u32 len, void *hbuf, int dir)
946 {
947 	u32 mtype_offset;
948 	u32 edc_size, mc_size;
949 
950 	/* Offset into the region of memory which is being accessed
951 	 * MEM_EDC0 = 0
952 	 * MEM_EDC1 = 1
953 	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
954 	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
955 	 * MEM_HMA  = 4
956 	 */
957 	edc_size  = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
958 	if (mtype == MEM_HMA) {
959 		mtype_offset = 2 * (edc_size * 1024 * 1024);
960 	} else if (mtype != MEM_MC1)
961 		mtype_offset = (mtype * (edc_size * 1024 * 1024));
962 	else {
963 		mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
964 						      A_MA_EXT_MEMORY0_BAR));
965 		mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
966 	}
967 
968 	return t4_memory_rw_addr(adap, win,
969 				 mtype_offset + maddr, len,
970 				 hbuf, dir);
971 }
972 
973 /*
974  * Return the specified PCI-E Configuration Space register from our Physical
975  * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
976  * since we prefer to let the firmware own all of these registers, but if that
977  * fails we go for it directly ourselves.
978  */
979 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
980 {
981 	u32 val;
982 
983 	/*
984 	 * If fw_attach != 0, construct and send the Firmware LDST Command to
985 	 * retrieve the specified PCI-E Configuration Space register.
986 	 */
987 	if (drv_fw_attach != 0) {
988 		struct fw_ldst_cmd ldst_cmd;
989 		int ret;
990 
991 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
992 		ldst_cmd.op_to_addrspace =
993 			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
994 				    F_FW_CMD_REQUEST |
995 				    F_FW_CMD_READ |
996 				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
997 		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
998 		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
999 		ldst_cmd.u.pcie.ctrl_to_fn =
1000 			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
1001 		ldst_cmd.u.pcie.r = reg;
1002 
1003 		/*
1004 		 * If the LDST Command succeeds, return the result, otherwise
1005 		 * fall through to reading it directly ourselves ...
1006 		 */
1007 		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1008 				 &ldst_cmd);
1009 		if (ret == 0)
1010 			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
1011 
1012 		CH_WARN(adap, "Firmware failed to return "
1013 			"Configuration Space register %d, err = %d\n",
1014 			reg, -ret);
1015 	}
1016 
1017 	/*
1018 	 * Read the desired Configuration Space register via the PCI-E
1019 	 * Backdoor mechanism.
1020 	 */
1021 	t4_hw_pci_read_cfg4(adap, reg, &val);
1022 	return val;
1023 }
1024 
1025 /*
1026  * Get the window based on base passed to it.
1027  * Window aperture is currently unhandled, but there is no use case for it
1028  * right now
1029  */
1030 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach)
1031 {
1032 	if (is_t4(adap->params.chip)) {
1033 		u32 bar0;
1034 
1035 		/*
1036 		 * Truncation intentional: we only read the bottom 32-bits of
1037 		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
1038 		 * mechanism to read BAR0 instead of using
1039 		 * pci_resource_start() because we could be operating from
1040 		 * within a Virtual Machine which is trapping our accesses to
1041 		 * our Configuration Space and we need to set up the PCI-E
1042 		 * Memory Window decoders with the actual addresses which will
1043 		 * be coming across the PCI-E link.
1044 		 */
1045 		bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach);
1046 		bar0 &= pci_mask;
1047 		adap->t4_bar0 = bar0;
1048 
1049 		return bar0 + memwin_base;
1050 	} else {
1051 		/* For T5, only relative offset inside the PCIe BAR is passed */
1052 		return memwin_base;
1053 	}
1054 }
1055 
1056 /* Get the default utility window (win0) used by everyone */
1057 int t4_get_util_window(struct adapter *adap, int drv_fw_attach)
1058 {
1059 	return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach);
1060 }
1061 
1062 /*
1063  * Set up memory window for accessing adapter memory ranges.  (Read
1064  * back MA register to ensure that changes propagate before we attempt
1065  * to use the new values.)
1066  */
1067 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
1068 {
1069 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window),
1070 		     memwin_base | V_BIR(0) |
1071 		     V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
1072 	t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window));
1073 }
1074 
1075 /**
1076  *	t4_get_regs_len - return the size of the chips register set
1077  *	@adapter: the adapter
1078  *
1079  *	Returns the size of the chip's BAR0 register space.
1080  */
1081 unsigned int t4_get_regs_len(struct adapter *adapter)
1082 {
1083 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
1084 
1085 	switch (chip_version) {
1086 	case CHELSIO_T4:
1087 		return T4_REGMAP_SIZE;
1088 
1089 	case CHELSIO_T5:
1090 	case CHELSIO_T6:
1091 		return T5_REGMAP_SIZE;
1092 	}
1093 
1094 	CH_ERR(adapter,
1095 		"Unsupported chip version %d\n", chip_version);
1096 	return 0;
1097 }
1098 
1099 /**
1100  *	t4_get_regs - read chip registers into provided buffer
1101  *	@adap: the adapter
1102  *	@buf: register buffer
1103  *	@buf_size: size (in bytes) of register buffer
1104  *
1105  *	If the provided register buffer isn't large enough for the chip's
1106  *	full register range, the register dump will be truncated to the
1107  *	register buffer's size.
1108  */
1109 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
1110 {
1111 	static const unsigned int t4_reg_ranges[] = {
1112 		0x1008, 0x1108,
1113 		0x1180, 0x1184,
1114 		0x1190, 0x1194,
1115 		0x11a0, 0x11a4,
1116 		0x11b0, 0x11b4,
1117 		0x11fc, 0x123c,
1118 		0x1300, 0x173c,
1119 		0x1800, 0x18fc,
1120 		0x3000, 0x30d8,
1121 		0x30e0, 0x30e4,
1122 		0x30ec, 0x5910,
1123 		0x5920, 0x5924,
1124 		0x5960, 0x5960,
1125 		0x5968, 0x5968,
1126 		0x5970, 0x5970,
1127 		0x5978, 0x5978,
1128 		0x5980, 0x5980,
1129 		0x5988, 0x5988,
1130 		0x5990, 0x5990,
1131 		0x5998, 0x5998,
1132 		0x59a0, 0x59d4,
1133 		0x5a00, 0x5ae0,
1134 		0x5ae8, 0x5ae8,
1135 		0x5af0, 0x5af0,
1136 		0x5af8, 0x5af8,
1137 		0x6000, 0x6098,
1138 		0x6100, 0x6150,
1139 		0x6200, 0x6208,
1140 		0x6240, 0x6248,
1141 		0x6280, 0x62b0,
1142 		0x62c0, 0x6338,
1143 		0x6370, 0x638c,
1144 		0x6400, 0x643c,
1145 		0x6500, 0x6524,
1146 		0x6a00, 0x6a04,
1147 		0x6a14, 0x6a38,
1148 		0x6a60, 0x6a70,
1149 		0x6a78, 0x6a78,
1150 		0x6b00, 0x6b0c,
1151 		0x6b1c, 0x6b84,
1152 		0x6bf0, 0x6bf8,
1153 		0x6c00, 0x6c0c,
1154 		0x6c1c, 0x6c84,
1155 		0x6cf0, 0x6cf8,
1156 		0x6d00, 0x6d0c,
1157 		0x6d1c, 0x6d84,
1158 		0x6df0, 0x6df8,
1159 		0x6e00, 0x6e0c,
1160 		0x6e1c, 0x6e84,
1161 		0x6ef0, 0x6ef8,
1162 		0x6f00, 0x6f0c,
1163 		0x6f1c, 0x6f84,
1164 		0x6ff0, 0x6ff8,
1165 		0x7000, 0x700c,
1166 		0x701c, 0x7084,
1167 		0x70f0, 0x70f8,
1168 		0x7100, 0x710c,
1169 		0x711c, 0x7184,
1170 		0x71f0, 0x71f8,
1171 		0x7200, 0x720c,
1172 		0x721c, 0x7284,
1173 		0x72f0, 0x72f8,
1174 		0x7300, 0x730c,
1175 		0x731c, 0x7384,
1176 		0x73f0, 0x73f8,
1177 		0x7400, 0x7450,
1178 		0x7500, 0x7530,
1179 		0x7600, 0x760c,
1180 		0x7614, 0x761c,
1181 		0x7680, 0x76cc,
1182 		0x7700, 0x7798,
1183 		0x77c0, 0x77fc,
1184 		0x7900, 0x79fc,
1185 		0x7b00, 0x7b58,
1186 		0x7b60, 0x7b84,
1187 		0x7b8c, 0x7c38,
1188 		0x7d00, 0x7d38,
1189 		0x7d40, 0x7d80,
1190 		0x7d8c, 0x7ddc,
1191 		0x7de4, 0x7e04,
1192 		0x7e10, 0x7e1c,
1193 		0x7e24, 0x7e38,
1194 		0x7e40, 0x7e44,
1195 		0x7e4c, 0x7e78,
1196 		0x7e80, 0x7ea4,
1197 		0x7eac, 0x7edc,
1198 		0x7ee8, 0x7efc,
1199 		0x8dc0, 0x8e04,
1200 		0x8e10, 0x8e1c,
1201 		0x8e30, 0x8e78,
1202 		0x8ea0, 0x8eb8,
1203 		0x8ec0, 0x8f6c,
1204 		0x8fc0, 0x9008,
1205 		0x9010, 0x9058,
1206 		0x9060, 0x9060,
1207 		0x9068, 0x9074,
1208 		0x90fc, 0x90fc,
1209 		0x9400, 0x9408,
1210 		0x9410, 0x9458,
1211 		0x9600, 0x9600,
1212 		0x9608, 0x9638,
1213 		0x9640, 0x96bc,
1214 		0x9800, 0x9808,
1215 		0x9820, 0x983c,
1216 		0x9850, 0x9864,
1217 		0x9c00, 0x9c6c,
1218 		0x9c80, 0x9cec,
1219 		0x9d00, 0x9d6c,
1220 		0x9d80, 0x9dec,
1221 		0x9e00, 0x9e6c,
1222 		0x9e80, 0x9eec,
1223 		0x9f00, 0x9f6c,
1224 		0x9f80, 0x9fec,
1225 		0xd004, 0xd004,
1226 		0xd010, 0xd03c,
1227 		0xdfc0, 0xdfe0,
1228 		0xe000, 0xea7c,
1229 		0xf000, 0x11110,
1230 		0x11118, 0x11190,
1231 		0x19040, 0x1906c,
1232 		0x19078, 0x19080,
1233 		0x1908c, 0x190e4,
1234 		0x190f0, 0x190f8,
1235 		0x19100, 0x19110,
1236 		0x19120, 0x19124,
1237 		0x19150, 0x19194,
1238 		0x1919c, 0x191b0,
1239 		0x191d0, 0x191e8,
1240 		0x19238, 0x1924c,
1241 		0x193f8, 0x1943c,
1242 		0x1944c, 0x19474,
1243 		0x19490, 0x194e0,
1244 		0x194f0, 0x194f8,
1245 		0x19800, 0x19c08,
1246 		0x19c10, 0x19c90,
1247 		0x19ca0, 0x19ce4,
1248 		0x19cf0, 0x19d40,
1249 		0x19d50, 0x19d94,
1250 		0x19da0, 0x19de8,
1251 		0x19df0, 0x19e40,
1252 		0x19e50, 0x19e90,
1253 		0x19ea0, 0x19f4c,
1254 		0x1a000, 0x1a004,
1255 		0x1a010, 0x1a06c,
1256 		0x1a0b0, 0x1a0e4,
1257 		0x1a0ec, 0x1a0f4,
1258 		0x1a100, 0x1a108,
1259 		0x1a114, 0x1a120,
1260 		0x1a128, 0x1a130,
1261 		0x1a138, 0x1a138,
1262 		0x1a190, 0x1a1c4,
1263 		0x1a1fc, 0x1a1fc,
1264 		0x1e040, 0x1e04c,
1265 		0x1e284, 0x1e28c,
1266 		0x1e2c0, 0x1e2c0,
1267 		0x1e2e0, 0x1e2e0,
1268 		0x1e300, 0x1e384,
1269 		0x1e3c0, 0x1e3c8,
1270 		0x1e440, 0x1e44c,
1271 		0x1e684, 0x1e68c,
1272 		0x1e6c0, 0x1e6c0,
1273 		0x1e6e0, 0x1e6e0,
1274 		0x1e700, 0x1e784,
1275 		0x1e7c0, 0x1e7c8,
1276 		0x1e840, 0x1e84c,
1277 		0x1ea84, 0x1ea8c,
1278 		0x1eac0, 0x1eac0,
1279 		0x1eae0, 0x1eae0,
1280 		0x1eb00, 0x1eb84,
1281 		0x1ebc0, 0x1ebc8,
1282 		0x1ec40, 0x1ec4c,
1283 		0x1ee84, 0x1ee8c,
1284 		0x1eec0, 0x1eec0,
1285 		0x1eee0, 0x1eee0,
1286 		0x1ef00, 0x1ef84,
1287 		0x1efc0, 0x1efc8,
1288 		0x1f040, 0x1f04c,
1289 		0x1f284, 0x1f28c,
1290 		0x1f2c0, 0x1f2c0,
1291 		0x1f2e0, 0x1f2e0,
1292 		0x1f300, 0x1f384,
1293 		0x1f3c0, 0x1f3c8,
1294 		0x1f440, 0x1f44c,
1295 		0x1f684, 0x1f68c,
1296 		0x1f6c0, 0x1f6c0,
1297 		0x1f6e0, 0x1f6e0,
1298 		0x1f700, 0x1f784,
1299 		0x1f7c0, 0x1f7c8,
1300 		0x1f840, 0x1f84c,
1301 		0x1fa84, 0x1fa8c,
1302 		0x1fac0, 0x1fac0,
1303 		0x1fae0, 0x1fae0,
1304 		0x1fb00, 0x1fb84,
1305 		0x1fbc0, 0x1fbc8,
1306 		0x1fc40, 0x1fc4c,
1307 		0x1fe84, 0x1fe8c,
1308 		0x1fec0, 0x1fec0,
1309 		0x1fee0, 0x1fee0,
1310 		0x1ff00, 0x1ff84,
1311 		0x1ffc0, 0x1ffc8,
1312 		0x20000, 0x2002c,
1313 		0x20100, 0x2013c,
1314 		0x20190, 0x201a0,
1315 		0x201a8, 0x201b8,
1316 		0x201c4, 0x201c8,
1317 		0x20200, 0x20318,
1318 		0x20400, 0x204b4,
1319 		0x204c0, 0x20528,
1320 		0x20540, 0x20614,
1321 		0x21000, 0x21040,
1322 		0x2104c, 0x21060,
1323 		0x210c0, 0x210ec,
1324 		0x21200, 0x21268,
1325 		0x21270, 0x21284,
1326 		0x212fc, 0x21388,
1327 		0x21400, 0x21404,
1328 		0x21500, 0x21500,
1329 		0x21510, 0x21518,
1330 		0x2152c, 0x21530,
1331 		0x2153c, 0x2153c,
1332 		0x21550, 0x21554,
1333 		0x21600, 0x21600,
1334 		0x21608, 0x2161c,
1335 		0x21624, 0x21628,
1336 		0x21630, 0x21634,
1337 		0x2163c, 0x2163c,
1338 		0x21700, 0x2171c,
1339 		0x21780, 0x2178c,
1340 		0x21800, 0x21818,
1341 		0x21820, 0x21828,
1342 		0x21830, 0x21848,
1343 		0x21850, 0x21854,
1344 		0x21860, 0x21868,
1345 		0x21870, 0x21870,
1346 		0x21878, 0x21898,
1347 		0x218a0, 0x218a8,
1348 		0x218b0, 0x218c8,
1349 		0x218d0, 0x218d4,
1350 		0x218e0, 0x218e8,
1351 		0x218f0, 0x218f0,
1352 		0x218f8, 0x21a18,
1353 		0x21a20, 0x21a28,
1354 		0x21a30, 0x21a48,
1355 		0x21a50, 0x21a54,
1356 		0x21a60, 0x21a68,
1357 		0x21a70, 0x21a70,
1358 		0x21a78, 0x21a98,
1359 		0x21aa0, 0x21aa8,
1360 		0x21ab0, 0x21ac8,
1361 		0x21ad0, 0x21ad4,
1362 		0x21ae0, 0x21ae8,
1363 		0x21af0, 0x21af0,
1364 		0x21af8, 0x21c18,
1365 		0x21c20, 0x21c20,
1366 		0x21c28, 0x21c30,
1367 		0x21c38, 0x21c38,
1368 		0x21c80, 0x21c98,
1369 		0x21ca0, 0x21ca8,
1370 		0x21cb0, 0x21cc8,
1371 		0x21cd0, 0x21cd4,
1372 		0x21ce0, 0x21ce8,
1373 		0x21cf0, 0x21cf0,
1374 		0x21cf8, 0x21d7c,
1375 		0x21e00, 0x21e04,
1376 		0x22000, 0x2202c,
1377 		0x22100, 0x2213c,
1378 		0x22190, 0x221a0,
1379 		0x221a8, 0x221b8,
1380 		0x221c4, 0x221c8,
1381 		0x22200, 0x22318,
1382 		0x22400, 0x224b4,
1383 		0x224c0, 0x22528,
1384 		0x22540, 0x22614,
1385 		0x23000, 0x23040,
1386 		0x2304c, 0x23060,
1387 		0x230c0, 0x230ec,
1388 		0x23200, 0x23268,
1389 		0x23270, 0x23284,
1390 		0x232fc, 0x23388,
1391 		0x23400, 0x23404,
1392 		0x23500, 0x23500,
1393 		0x23510, 0x23518,
1394 		0x2352c, 0x23530,
1395 		0x2353c, 0x2353c,
1396 		0x23550, 0x23554,
1397 		0x23600, 0x23600,
1398 		0x23608, 0x2361c,
1399 		0x23624, 0x23628,
1400 		0x23630, 0x23634,
1401 		0x2363c, 0x2363c,
1402 		0x23700, 0x2371c,
1403 		0x23780, 0x2378c,
1404 		0x23800, 0x23818,
1405 		0x23820, 0x23828,
1406 		0x23830, 0x23848,
1407 		0x23850, 0x23854,
1408 		0x23860, 0x23868,
1409 		0x23870, 0x23870,
1410 		0x23878, 0x23898,
1411 		0x238a0, 0x238a8,
1412 		0x238b0, 0x238c8,
1413 		0x238d0, 0x238d4,
1414 		0x238e0, 0x238e8,
1415 		0x238f0, 0x238f0,
1416 		0x238f8, 0x23a18,
1417 		0x23a20, 0x23a28,
1418 		0x23a30, 0x23a48,
1419 		0x23a50, 0x23a54,
1420 		0x23a60, 0x23a68,
1421 		0x23a70, 0x23a70,
1422 		0x23a78, 0x23a98,
1423 		0x23aa0, 0x23aa8,
1424 		0x23ab0, 0x23ac8,
1425 		0x23ad0, 0x23ad4,
1426 		0x23ae0, 0x23ae8,
1427 		0x23af0, 0x23af0,
1428 		0x23af8, 0x23c18,
1429 		0x23c20, 0x23c20,
1430 		0x23c28, 0x23c30,
1431 		0x23c38, 0x23c38,
1432 		0x23c80, 0x23c98,
1433 		0x23ca0, 0x23ca8,
1434 		0x23cb0, 0x23cc8,
1435 		0x23cd0, 0x23cd4,
1436 		0x23ce0, 0x23ce8,
1437 		0x23cf0, 0x23cf0,
1438 		0x23cf8, 0x23d7c,
1439 		0x23e00, 0x23e04,
1440 		0x24000, 0x2402c,
1441 		0x24100, 0x2413c,
1442 		0x24190, 0x241a0,
1443 		0x241a8, 0x241b8,
1444 		0x241c4, 0x241c8,
1445 		0x24200, 0x24318,
1446 		0x24400, 0x244b4,
1447 		0x244c0, 0x24528,
1448 		0x24540, 0x24614,
1449 		0x25000, 0x25040,
1450 		0x2504c, 0x25060,
1451 		0x250c0, 0x250ec,
1452 		0x25200, 0x25268,
1453 		0x25270, 0x25284,
1454 		0x252fc, 0x25388,
1455 		0x25400, 0x25404,
1456 		0x25500, 0x25500,
1457 		0x25510, 0x25518,
1458 		0x2552c, 0x25530,
1459 		0x2553c, 0x2553c,
1460 		0x25550, 0x25554,
1461 		0x25600, 0x25600,
1462 		0x25608, 0x2561c,
1463 		0x25624, 0x25628,
1464 		0x25630, 0x25634,
1465 		0x2563c, 0x2563c,
1466 		0x25700, 0x2571c,
1467 		0x25780, 0x2578c,
1468 		0x25800, 0x25818,
1469 		0x25820, 0x25828,
1470 		0x25830, 0x25848,
1471 		0x25850, 0x25854,
1472 		0x25860, 0x25868,
1473 		0x25870, 0x25870,
1474 		0x25878, 0x25898,
1475 		0x258a0, 0x258a8,
1476 		0x258b0, 0x258c8,
1477 		0x258d0, 0x258d4,
1478 		0x258e0, 0x258e8,
1479 		0x258f0, 0x258f0,
1480 		0x258f8, 0x25a18,
1481 		0x25a20, 0x25a28,
1482 		0x25a30, 0x25a48,
1483 		0x25a50, 0x25a54,
1484 		0x25a60, 0x25a68,
1485 		0x25a70, 0x25a70,
1486 		0x25a78, 0x25a98,
1487 		0x25aa0, 0x25aa8,
1488 		0x25ab0, 0x25ac8,
1489 		0x25ad0, 0x25ad4,
1490 		0x25ae0, 0x25ae8,
1491 		0x25af0, 0x25af0,
1492 		0x25af8, 0x25c18,
1493 		0x25c20, 0x25c20,
1494 		0x25c28, 0x25c30,
1495 		0x25c38, 0x25c38,
1496 		0x25c80, 0x25c98,
1497 		0x25ca0, 0x25ca8,
1498 		0x25cb0, 0x25cc8,
1499 		0x25cd0, 0x25cd4,
1500 		0x25ce0, 0x25ce8,
1501 		0x25cf0, 0x25cf0,
1502 		0x25cf8, 0x25d7c,
1503 		0x25e00, 0x25e04,
1504 		0x26000, 0x2602c,
1505 		0x26100, 0x2613c,
1506 		0x26190, 0x261a0,
1507 		0x261a8, 0x261b8,
1508 		0x261c4, 0x261c8,
1509 		0x26200, 0x26318,
1510 		0x26400, 0x264b4,
1511 		0x264c0, 0x26528,
1512 		0x26540, 0x26614,
1513 		0x27000, 0x27040,
1514 		0x2704c, 0x27060,
1515 		0x270c0, 0x270ec,
1516 		0x27200, 0x27268,
1517 		0x27270, 0x27284,
1518 		0x272fc, 0x27388,
1519 		0x27400, 0x27404,
1520 		0x27500, 0x27500,
1521 		0x27510, 0x27518,
1522 		0x2752c, 0x27530,
1523 		0x2753c, 0x2753c,
1524 		0x27550, 0x27554,
1525 		0x27600, 0x27600,
1526 		0x27608, 0x2761c,
1527 		0x27624, 0x27628,
1528 		0x27630, 0x27634,
1529 		0x2763c, 0x2763c,
1530 		0x27700, 0x2771c,
1531 		0x27780, 0x2778c,
1532 		0x27800, 0x27818,
1533 		0x27820, 0x27828,
1534 		0x27830, 0x27848,
1535 		0x27850, 0x27854,
1536 		0x27860, 0x27868,
1537 		0x27870, 0x27870,
1538 		0x27878, 0x27898,
1539 		0x278a0, 0x278a8,
1540 		0x278b0, 0x278c8,
1541 		0x278d0, 0x278d4,
1542 		0x278e0, 0x278e8,
1543 		0x278f0, 0x278f0,
1544 		0x278f8, 0x27a18,
1545 		0x27a20, 0x27a28,
1546 		0x27a30, 0x27a48,
1547 		0x27a50, 0x27a54,
1548 		0x27a60, 0x27a68,
1549 		0x27a70, 0x27a70,
1550 		0x27a78, 0x27a98,
1551 		0x27aa0, 0x27aa8,
1552 		0x27ab0, 0x27ac8,
1553 		0x27ad0, 0x27ad4,
1554 		0x27ae0, 0x27ae8,
1555 		0x27af0, 0x27af0,
1556 		0x27af8, 0x27c18,
1557 		0x27c20, 0x27c20,
1558 		0x27c28, 0x27c30,
1559 		0x27c38, 0x27c38,
1560 		0x27c80, 0x27c98,
1561 		0x27ca0, 0x27ca8,
1562 		0x27cb0, 0x27cc8,
1563 		0x27cd0, 0x27cd4,
1564 		0x27ce0, 0x27ce8,
1565 		0x27cf0, 0x27cf0,
1566 		0x27cf8, 0x27d7c,
1567 		0x27e00, 0x27e04,
1568 	};
1569 
1570 	static const unsigned int t5_reg_ranges[] = {
1571 		0x1008, 0x10c0,
1572 		0x10cc, 0x10f8,
1573 		0x1100, 0x1100,
1574 		0x110c, 0x1148,
1575 		0x1180, 0x1184,
1576 		0x1190, 0x1194,
1577 		0x11a0, 0x11a4,
1578 		0x11b0, 0x11b4,
1579 		0x11fc, 0x123c,
1580 		0x1280, 0x173c,
1581 		0x1800, 0x18fc,
1582 		0x3000, 0x3028,
1583 		0x3060, 0x30b0,
1584 		0x30b8, 0x30d8,
1585 		0x30e0, 0x30fc,
1586 		0x3140, 0x357c,
1587 		0x35a8, 0x35cc,
1588 		0x35ec, 0x35ec,
1589 		0x3600, 0x5624,
1590 		0x56cc, 0x56ec,
1591 		0x56f4, 0x5720,
1592 		0x5728, 0x575c,
1593 		0x580c, 0x5814,
1594 		0x5890, 0x589c,
1595 		0x58a4, 0x58ac,
1596 		0x58b8, 0x58bc,
1597 		0x5940, 0x59c8,
1598 		0x59d0, 0x59dc,
1599 		0x59fc, 0x5a18,
1600 		0x5a60, 0x5a70,
1601 		0x5a80, 0x5a9c,
1602 		0x5b94, 0x5bfc,
1603 		0x6000, 0x6020,
1604 		0x6028, 0x6040,
1605 		0x6058, 0x609c,
1606 		0x60a8, 0x614c,
1607 		0x7700, 0x7798,
1608 		0x77c0, 0x78fc,
1609 		0x7b00, 0x7b58,
1610 		0x7b60, 0x7b84,
1611 		0x7b8c, 0x7c54,
1612 		0x7d00, 0x7d38,
1613 		0x7d40, 0x7d80,
1614 		0x7d8c, 0x7ddc,
1615 		0x7de4, 0x7e04,
1616 		0x7e10, 0x7e1c,
1617 		0x7e24, 0x7e38,
1618 		0x7e40, 0x7e44,
1619 		0x7e4c, 0x7e78,
1620 		0x7e80, 0x7edc,
1621 		0x7ee8, 0x7efc,
1622 		0x8dc0, 0x8de0,
1623 		0x8df8, 0x8e04,
1624 		0x8e10, 0x8e84,
1625 		0x8ea0, 0x8f84,
1626 		0x8fc0, 0x9058,
1627 		0x9060, 0x9060,
1628 		0x9068, 0x90f8,
1629 		0x9400, 0x9408,
1630 		0x9410, 0x9470,
1631 		0x9600, 0x9600,
1632 		0x9608, 0x9638,
1633 		0x9640, 0x96f4,
1634 		0x9800, 0x9808,
1635 		0x9820, 0x983c,
1636 		0x9850, 0x9864,
1637 		0x9c00, 0x9c6c,
1638 		0x9c80, 0x9cec,
1639 		0x9d00, 0x9d6c,
1640 		0x9d80, 0x9dec,
1641 		0x9e00, 0x9e6c,
1642 		0x9e80, 0x9eec,
1643 		0x9f00, 0x9f6c,
1644 		0x9f80, 0xa020,
1645 		0xd004, 0xd004,
1646 		0xd010, 0xd03c,
1647 		0xdfc0, 0xdfe0,
1648 		0xe000, 0x1106c,
1649 		0x11074, 0x11088,
1650 		0x1109c, 0x1117c,
1651 		0x11190, 0x11204,
1652 		0x19040, 0x1906c,
1653 		0x19078, 0x19080,
1654 		0x1908c, 0x190e8,
1655 		0x190f0, 0x190f8,
1656 		0x19100, 0x19110,
1657 		0x19120, 0x19124,
1658 		0x19150, 0x19194,
1659 		0x1919c, 0x191b0,
1660 		0x191d0, 0x191e8,
1661 		0x19238, 0x19290,
1662 		0x193f8, 0x19428,
1663 		0x19430, 0x19444,
1664 		0x1944c, 0x1946c,
1665 		0x19474, 0x19474,
1666 		0x19490, 0x194cc,
1667 		0x194f0, 0x194f8,
1668 		0x19c00, 0x19c08,
1669 		0x19c10, 0x19c60,
1670 		0x19c94, 0x19ce4,
1671 		0x19cf0, 0x19d40,
1672 		0x19d50, 0x19d94,
1673 		0x19da0, 0x19de8,
1674 		0x19df0, 0x19e10,
1675 		0x19e50, 0x19e90,
1676 		0x19ea0, 0x19f24,
1677 		0x19f34, 0x19f34,
1678 		0x19f40, 0x19f50,
1679 		0x19f90, 0x19fb4,
1680 		0x19fc4, 0x19fe4,
1681 		0x1a000, 0x1a004,
1682 		0x1a010, 0x1a06c,
1683 		0x1a0b0, 0x1a0e4,
1684 		0x1a0ec, 0x1a0f8,
1685 		0x1a100, 0x1a108,
1686 		0x1a114, 0x1a120,
1687 		0x1a128, 0x1a130,
1688 		0x1a138, 0x1a138,
1689 		0x1a190, 0x1a1c4,
1690 		0x1a1fc, 0x1a1fc,
1691 		0x1e008, 0x1e00c,
1692 		0x1e040, 0x1e044,
1693 		0x1e04c, 0x1e04c,
1694 		0x1e284, 0x1e290,
1695 		0x1e2c0, 0x1e2c0,
1696 		0x1e2e0, 0x1e2e0,
1697 		0x1e300, 0x1e384,
1698 		0x1e3c0, 0x1e3c8,
1699 		0x1e408, 0x1e40c,
1700 		0x1e440, 0x1e444,
1701 		0x1e44c, 0x1e44c,
1702 		0x1e684, 0x1e690,
1703 		0x1e6c0, 0x1e6c0,
1704 		0x1e6e0, 0x1e6e0,
1705 		0x1e700, 0x1e784,
1706 		0x1e7c0, 0x1e7c8,
1707 		0x1e808, 0x1e80c,
1708 		0x1e840, 0x1e844,
1709 		0x1e84c, 0x1e84c,
1710 		0x1ea84, 0x1ea90,
1711 		0x1eac0, 0x1eac0,
1712 		0x1eae0, 0x1eae0,
1713 		0x1eb00, 0x1eb84,
1714 		0x1ebc0, 0x1ebc8,
1715 		0x1ec08, 0x1ec0c,
1716 		0x1ec40, 0x1ec44,
1717 		0x1ec4c, 0x1ec4c,
1718 		0x1ee84, 0x1ee90,
1719 		0x1eec0, 0x1eec0,
1720 		0x1eee0, 0x1eee0,
1721 		0x1ef00, 0x1ef84,
1722 		0x1efc0, 0x1efc8,
1723 		0x1f008, 0x1f00c,
1724 		0x1f040, 0x1f044,
1725 		0x1f04c, 0x1f04c,
1726 		0x1f284, 0x1f290,
1727 		0x1f2c0, 0x1f2c0,
1728 		0x1f2e0, 0x1f2e0,
1729 		0x1f300, 0x1f384,
1730 		0x1f3c0, 0x1f3c8,
1731 		0x1f408, 0x1f40c,
1732 		0x1f440, 0x1f444,
1733 		0x1f44c, 0x1f44c,
1734 		0x1f684, 0x1f690,
1735 		0x1f6c0, 0x1f6c0,
1736 		0x1f6e0, 0x1f6e0,
1737 		0x1f700, 0x1f784,
1738 		0x1f7c0, 0x1f7c8,
1739 		0x1f808, 0x1f80c,
1740 		0x1f840, 0x1f844,
1741 		0x1f84c, 0x1f84c,
1742 		0x1fa84, 0x1fa90,
1743 		0x1fac0, 0x1fac0,
1744 		0x1fae0, 0x1fae0,
1745 		0x1fb00, 0x1fb84,
1746 		0x1fbc0, 0x1fbc8,
1747 		0x1fc08, 0x1fc0c,
1748 		0x1fc40, 0x1fc44,
1749 		0x1fc4c, 0x1fc4c,
1750 		0x1fe84, 0x1fe90,
1751 		0x1fec0, 0x1fec0,
1752 		0x1fee0, 0x1fee0,
1753 		0x1ff00, 0x1ff84,
1754 		0x1ffc0, 0x1ffc8,
1755 		0x30000, 0x30030,
1756 		0x30100, 0x30144,
1757 		0x30190, 0x301a0,
1758 		0x301a8, 0x301b8,
1759 		0x301c4, 0x301c8,
1760 		0x301d0, 0x301d0,
1761 		0x30200, 0x30318,
1762 		0x30400, 0x304b4,
1763 		0x304c0, 0x3052c,
1764 		0x30540, 0x3061c,
1765 		0x30800, 0x30828,
1766 		0x30834, 0x30834,
1767 		0x308c0, 0x30908,
1768 		0x30910, 0x309ac,
1769 		0x30a00, 0x30a14,
1770 		0x30a1c, 0x30a2c,
1771 		0x30a44, 0x30a50,
1772 		0x30a74, 0x30a74,
1773 		0x30a7c, 0x30afc,
1774 		0x30b08, 0x30c24,
1775 		0x30d00, 0x30d00,
1776 		0x30d08, 0x30d14,
1777 		0x30d1c, 0x30d20,
1778 		0x30d3c, 0x30d3c,
1779 		0x30d48, 0x30d50,
1780 		0x31200, 0x3120c,
1781 		0x31220, 0x31220,
1782 		0x31240, 0x31240,
1783 		0x31600, 0x3160c,
1784 		0x31a00, 0x31a1c,
1785 		0x31e00, 0x31e20,
1786 		0x31e38, 0x31e3c,
1787 		0x31e80, 0x31e80,
1788 		0x31e88, 0x31ea8,
1789 		0x31eb0, 0x31eb4,
1790 		0x31ec8, 0x31ed4,
1791 		0x31fb8, 0x32004,
1792 		0x32200, 0x32200,
1793 		0x32208, 0x32240,
1794 		0x32248, 0x32280,
1795 		0x32288, 0x322c0,
1796 		0x322c8, 0x322fc,
1797 		0x32600, 0x32630,
1798 		0x32a00, 0x32abc,
1799 		0x32b00, 0x32b10,
1800 		0x32b20, 0x32b30,
1801 		0x32b40, 0x32b50,
1802 		0x32b60, 0x32b70,
1803 		0x33000, 0x33028,
1804 		0x33030, 0x33048,
1805 		0x33060, 0x33068,
1806 		0x33070, 0x3309c,
1807 		0x330f0, 0x33128,
1808 		0x33130, 0x33148,
1809 		0x33160, 0x33168,
1810 		0x33170, 0x3319c,
1811 		0x331f0, 0x33238,
1812 		0x33240, 0x33240,
1813 		0x33248, 0x33250,
1814 		0x3325c, 0x33264,
1815 		0x33270, 0x332b8,
1816 		0x332c0, 0x332e4,
1817 		0x332f8, 0x33338,
1818 		0x33340, 0x33340,
1819 		0x33348, 0x33350,
1820 		0x3335c, 0x33364,
1821 		0x33370, 0x333b8,
1822 		0x333c0, 0x333e4,
1823 		0x333f8, 0x33428,
1824 		0x33430, 0x33448,
1825 		0x33460, 0x33468,
1826 		0x33470, 0x3349c,
1827 		0x334f0, 0x33528,
1828 		0x33530, 0x33548,
1829 		0x33560, 0x33568,
1830 		0x33570, 0x3359c,
1831 		0x335f0, 0x33638,
1832 		0x33640, 0x33640,
1833 		0x33648, 0x33650,
1834 		0x3365c, 0x33664,
1835 		0x33670, 0x336b8,
1836 		0x336c0, 0x336e4,
1837 		0x336f8, 0x33738,
1838 		0x33740, 0x33740,
1839 		0x33748, 0x33750,
1840 		0x3375c, 0x33764,
1841 		0x33770, 0x337b8,
1842 		0x337c0, 0x337e4,
1843 		0x337f8, 0x337fc,
1844 		0x33814, 0x33814,
1845 		0x3382c, 0x3382c,
1846 		0x33880, 0x3388c,
1847 		0x338e8, 0x338ec,
1848 		0x33900, 0x33928,
1849 		0x33930, 0x33948,
1850 		0x33960, 0x33968,
1851 		0x33970, 0x3399c,
1852 		0x339f0, 0x33a38,
1853 		0x33a40, 0x33a40,
1854 		0x33a48, 0x33a50,
1855 		0x33a5c, 0x33a64,
1856 		0x33a70, 0x33ab8,
1857 		0x33ac0, 0x33ae4,
1858 		0x33af8, 0x33b10,
1859 		0x33b28, 0x33b28,
1860 		0x33b3c, 0x33b50,
1861 		0x33bf0, 0x33c10,
1862 		0x33c28, 0x33c28,
1863 		0x33c3c, 0x33c50,
1864 		0x33cf0, 0x33cfc,
1865 		0x34000, 0x34030,
1866 		0x34100, 0x34144,
1867 		0x34190, 0x341a0,
1868 		0x341a8, 0x341b8,
1869 		0x341c4, 0x341c8,
1870 		0x341d0, 0x341d0,
1871 		0x34200, 0x34318,
1872 		0x34400, 0x344b4,
1873 		0x344c0, 0x3452c,
1874 		0x34540, 0x3461c,
1875 		0x34800, 0x34828,
1876 		0x34834, 0x34834,
1877 		0x348c0, 0x34908,
1878 		0x34910, 0x349ac,
1879 		0x34a00, 0x34a14,
1880 		0x34a1c, 0x34a2c,
1881 		0x34a44, 0x34a50,
1882 		0x34a74, 0x34a74,
1883 		0x34a7c, 0x34afc,
1884 		0x34b08, 0x34c24,
1885 		0x34d00, 0x34d00,
1886 		0x34d08, 0x34d14,
1887 		0x34d1c, 0x34d20,
1888 		0x34d3c, 0x34d3c,
1889 		0x34d48, 0x34d50,
1890 		0x35200, 0x3520c,
1891 		0x35220, 0x35220,
1892 		0x35240, 0x35240,
1893 		0x35600, 0x3560c,
1894 		0x35a00, 0x35a1c,
1895 		0x35e00, 0x35e20,
1896 		0x35e38, 0x35e3c,
1897 		0x35e80, 0x35e80,
1898 		0x35e88, 0x35ea8,
1899 		0x35eb0, 0x35eb4,
1900 		0x35ec8, 0x35ed4,
1901 		0x35fb8, 0x36004,
1902 		0x36200, 0x36200,
1903 		0x36208, 0x36240,
1904 		0x36248, 0x36280,
1905 		0x36288, 0x362c0,
1906 		0x362c8, 0x362fc,
1907 		0x36600, 0x36630,
1908 		0x36a00, 0x36abc,
1909 		0x36b00, 0x36b10,
1910 		0x36b20, 0x36b30,
1911 		0x36b40, 0x36b50,
1912 		0x36b60, 0x36b70,
1913 		0x37000, 0x37028,
1914 		0x37030, 0x37048,
1915 		0x37060, 0x37068,
1916 		0x37070, 0x3709c,
1917 		0x370f0, 0x37128,
1918 		0x37130, 0x37148,
1919 		0x37160, 0x37168,
1920 		0x37170, 0x3719c,
1921 		0x371f0, 0x37238,
1922 		0x37240, 0x37240,
1923 		0x37248, 0x37250,
1924 		0x3725c, 0x37264,
1925 		0x37270, 0x372b8,
1926 		0x372c0, 0x372e4,
1927 		0x372f8, 0x37338,
1928 		0x37340, 0x37340,
1929 		0x37348, 0x37350,
1930 		0x3735c, 0x37364,
1931 		0x37370, 0x373b8,
1932 		0x373c0, 0x373e4,
1933 		0x373f8, 0x37428,
1934 		0x37430, 0x37448,
1935 		0x37460, 0x37468,
1936 		0x37470, 0x3749c,
1937 		0x374f0, 0x37528,
1938 		0x37530, 0x37548,
1939 		0x37560, 0x37568,
1940 		0x37570, 0x3759c,
1941 		0x375f0, 0x37638,
1942 		0x37640, 0x37640,
1943 		0x37648, 0x37650,
1944 		0x3765c, 0x37664,
1945 		0x37670, 0x376b8,
1946 		0x376c0, 0x376e4,
1947 		0x376f8, 0x37738,
1948 		0x37740, 0x37740,
1949 		0x37748, 0x37750,
1950 		0x3775c, 0x37764,
1951 		0x37770, 0x377b8,
1952 		0x377c0, 0x377e4,
1953 		0x377f8, 0x377fc,
1954 		0x37814, 0x37814,
1955 		0x3782c, 0x3782c,
1956 		0x37880, 0x3788c,
1957 		0x378e8, 0x378ec,
1958 		0x37900, 0x37928,
1959 		0x37930, 0x37948,
1960 		0x37960, 0x37968,
1961 		0x37970, 0x3799c,
1962 		0x379f0, 0x37a38,
1963 		0x37a40, 0x37a40,
1964 		0x37a48, 0x37a50,
1965 		0x37a5c, 0x37a64,
1966 		0x37a70, 0x37ab8,
1967 		0x37ac0, 0x37ae4,
1968 		0x37af8, 0x37b10,
1969 		0x37b28, 0x37b28,
1970 		0x37b3c, 0x37b50,
1971 		0x37bf0, 0x37c10,
1972 		0x37c28, 0x37c28,
1973 		0x37c3c, 0x37c50,
1974 		0x37cf0, 0x37cfc,
1975 		0x38000, 0x38030,
1976 		0x38100, 0x38144,
1977 		0x38190, 0x381a0,
1978 		0x381a8, 0x381b8,
1979 		0x381c4, 0x381c8,
1980 		0x381d0, 0x381d0,
1981 		0x38200, 0x38318,
1982 		0x38400, 0x384b4,
1983 		0x384c0, 0x3852c,
1984 		0x38540, 0x3861c,
1985 		0x38800, 0x38828,
1986 		0x38834, 0x38834,
1987 		0x388c0, 0x38908,
1988 		0x38910, 0x389ac,
1989 		0x38a00, 0x38a14,
1990 		0x38a1c, 0x38a2c,
1991 		0x38a44, 0x38a50,
1992 		0x38a74, 0x38a74,
1993 		0x38a7c, 0x38afc,
1994 		0x38b08, 0x38c24,
1995 		0x38d00, 0x38d00,
1996 		0x38d08, 0x38d14,
1997 		0x38d1c, 0x38d20,
1998 		0x38d3c, 0x38d3c,
1999 		0x38d48, 0x38d50,
2000 		0x39200, 0x3920c,
2001 		0x39220, 0x39220,
2002 		0x39240, 0x39240,
2003 		0x39600, 0x3960c,
2004 		0x39a00, 0x39a1c,
2005 		0x39e00, 0x39e20,
2006 		0x39e38, 0x39e3c,
2007 		0x39e80, 0x39e80,
2008 		0x39e88, 0x39ea8,
2009 		0x39eb0, 0x39eb4,
2010 		0x39ec8, 0x39ed4,
2011 		0x39fb8, 0x3a004,
2012 		0x3a200, 0x3a200,
2013 		0x3a208, 0x3a240,
2014 		0x3a248, 0x3a280,
2015 		0x3a288, 0x3a2c0,
2016 		0x3a2c8, 0x3a2fc,
2017 		0x3a600, 0x3a630,
2018 		0x3aa00, 0x3aabc,
2019 		0x3ab00, 0x3ab10,
2020 		0x3ab20, 0x3ab30,
2021 		0x3ab40, 0x3ab50,
2022 		0x3ab60, 0x3ab70,
2023 		0x3b000, 0x3b028,
2024 		0x3b030, 0x3b048,
2025 		0x3b060, 0x3b068,
2026 		0x3b070, 0x3b09c,
2027 		0x3b0f0, 0x3b128,
2028 		0x3b130, 0x3b148,
2029 		0x3b160, 0x3b168,
2030 		0x3b170, 0x3b19c,
2031 		0x3b1f0, 0x3b238,
2032 		0x3b240, 0x3b240,
2033 		0x3b248, 0x3b250,
2034 		0x3b25c, 0x3b264,
2035 		0x3b270, 0x3b2b8,
2036 		0x3b2c0, 0x3b2e4,
2037 		0x3b2f8, 0x3b338,
2038 		0x3b340, 0x3b340,
2039 		0x3b348, 0x3b350,
2040 		0x3b35c, 0x3b364,
2041 		0x3b370, 0x3b3b8,
2042 		0x3b3c0, 0x3b3e4,
2043 		0x3b3f8, 0x3b428,
2044 		0x3b430, 0x3b448,
2045 		0x3b460, 0x3b468,
2046 		0x3b470, 0x3b49c,
2047 		0x3b4f0, 0x3b528,
2048 		0x3b530, 0x3b548,
2049 		0x3b560, 0x3b568,
2050 		0x3b570, 0x3b59c,
2051 		0x3b5f0, 0x3b638,
2052 		0x3b640, 0x3b640,
2053 		0x3b648, 0x3b650,
2054 		0x3b65c, 0x3b664,
2055 		0x3b670, 0x3b6b8,
2056 		0x3b6c0, 0x3b6e4,
2057 		0x3b6f8, 0x3b738,
2058 		0x3b740, 0x3b740,
2059 		0x3b748, 0x3b750,
2060 		0x3b75c, 0x3b764,
2061 		0x3b770, 0x3b7b8,
2062 		0x3b7c0, 0x3b7e4,
2063 		0x3b7f8, 0x3b7fc,
2064 		0x3b814, 0x3b814,
2065 		0x3b82c, 0x3b82c,
2066 		0x3b880, 0x3b88c,
2067 		0x3b8e8, 0x3b8ec,
2068 		0x3b900, 0x3b928,
2069 		0x3b930, 0x3b948,
2070 		0x3b960, 0x3b968,
2071 		0x3b970, 0x3b99c,
2072 		0x3b9f0, 0x3ba38,
2073 		0x3ba40, 0x3ba40,
2074 		0x3ba48, 0x3ba50,
2075 		0x3ba5c, 0x3ba64,
2076 		0x3ba70, 0x3bab8,
2077 		0x3bac0, 0x3bae4,
2078 		0x3baf8, 0x3bb10,
2079 		0x3bb28, 0x3bb28,
2080 		0x3bb3c, 0x3bb50,
2081 		0x3bbf0, 0x3bc10,
2082 		0x3bc28, 0x3bc28,
2083 		0x3bc3c, 0x3bc50,
2084 		0x3bcf0, 0x3bcfc,
2085 		0x3c000, 0x3c030,
2086 		0x3c100, 0x3c144,
2087 		0x3c190, 0x3c1a0,
2088 		0x3c1a8, 0x3c1b8,
2089 		0x3c1c4, 0x3c1c8,
2090 		0x3c1d0, 0x3c1d0,
2091 		0x3c200, 0x3c318,
2092 		0x3c400, 0x3c4b4,
2093 		0x3c4c0, 0x3c52c,
2094 		0x3c540, 0x3c61c,
2095 		0x3c800, 0x3c828,
2096 		0x3c834, 0x3c834,
2097 		0x3c8c0, 0x3c908,
2098 		0x3c910, 0x3c9ac,
2099 		0x3ca00, 0x3ca14,
2100 		0x3ca1c, 0x3ca2c,
2101 		0x3ca44, 0x3ca50,
2102 		0x3ca74, 0x3ca74,
2103 		0x3ca7c, 0x3cafc,
2104 		0x3cb08, 0x3cc24,
2105 		0x3cd00, 0x3cd00,
2106 		0x3cd08, 0x3cd14,
2107 		0x3cd1c, 0x3cd20,
2108 		0x3cd3c, 0x3cd3c,
2109 		0x3cd48, 0x3cd50,
2110 		0x3d200, 0x3d20c,
2111 		0x3d220, 0x3d220,
2112 		0x3d240, 0x3d240,
2113 		0x3d600, 0x3d60c,
2114 		0x3da00, 0x3da1c,
2115 		0x3de00, 0x3de20,
2116 		0x3de38, 0x3de3c,
2117 		0x3de80, 0x3de80,
2118 		0x3de88, 0x3dea8,
2119 		0x3deb0, 0x3deb4,
2120 		0x3dec8, 0x3ded4,
2121 		0x3dfb8, 0x3e004,
2122 		0x3e200, 0x3e200,
2123 		0x3e208, 0x3e240,
2124 		0x3e248, 0x3e280,
2125 		0x3e288, 0x3e2c0,
2126 		0x3e2c8, 0x3e2fc,
2127 		0x3e600, 0x3e630,
2128 		0x3ea00, 0x3eabc,
2129 		0x3eb00, 0x3eb10,
2130 		0x3eb20, 0x3eb30,
2131 		0x3eb40, 0x3eb50,
2132 		0x3eb60, 0x3eb70,
2133 		0x3f000, 0x3f028,
2134 		0x3f030, 0x3f048,
2135 		0x3f060, 0x3f068,
2136 		0x3f070, 0x3f09c,
2137 		0x3f0f0, 0x3f128,
2138 		0x3f130, 0x3f148,
2139 		0x3f160, 0x3f168,
2140 		0x3f170, 0x3f19c,
2141 		0x3f1f0, 0x3f238,
2142 		0x3f240, 0x3f240,
2143 		0x3f248, 0x3f250,
2144 		0x3f25c, 0x3f264,
2145 		0x3f270, 0x3f2b8,
2146 		0x3f2c0, 0x3f2e4,
2147 		0x3f2f8, 0x3f338,
2148 		0x3f340, 0x3f340,
2149 		0x3f348, 0x3f350,
2150 		0x3f35c, 0x3f364,
2151 		0x3f370, 0x3f3b8,
2152 		0x3f3c0, 0x3f3e4,
2153 		0x3f3f8, 0x3f428,
2154 		0x3f430, 0x3f448,
2155 		0x3f460, 0x3f468,
2156 		0x3f470, 0x3f49c,
2157 		0x3f4f0, 0x3f528,
2158 		0x3f530, 0x3f548,
2159 		0x3f560, 0x3f568,
2160 		0x3f570, 0x3f59c,
2161 		0x3f5f0, 0x3f638,
2162 		0x3f640, 0x3f640,
2163 		0x3f648, 0x3f650,
2164 		0x3f65c, 0x3f664,
2165 		0x3f670, 0x3f6b8,
2166 		0x3f6c0, 0x3f6e4,
2167 		0x3f6f8, 0x3f738,
2168 		0x3f740, 0x3f740,
2169 		0x3f748, 0x3f750,
2170 		0x3f75c, 0x3f764,
2171 		0x3f770, 0x3f7b8,
2172 		0x3f7c0, 0x3f7e4,
2173 		0x3f7f8, 0x3f7fc,
2174 		0x3f814, 0x3f814,
2175 		0x3f82c, 0x3f82c,
2176 		0x3f880, 0x3f88c,
2177 		0x3f8e8, 0x3f8ec,
2178 		0x3f900, 0x3f928,
2179 		0x3f930, 0x3f948,
2180 		0x3f960, 0x3f968,
2181 		0x3f970, 0x3f99c,
2182 		0x3f9f0, 0x3fa38,
2183 		0x3fa40, 0x3fa40,
2184 		0x3fa48, 0x3fa50,
2185 		0x3fa5c, 0x3fa64,
2186 		0x3fa70, 0x3fab8,
2187 		0x3fac0, 0x3fae4,
2188 		0x3faf8, 0x3fb10,
2189 		0x3fb28, 0x3fb28,
2190 		0x3fb3c, 0x3fb50,
2191 		0x3fbf0, 0x3fc10,
2192 		0x3fc28, 0x3fc28,
2193 		0x3fc3c, 0x3fc50,
2194 		0x3fcf0, 0x3fcfc,
2195 		0x40000, 0x4000c,
2196 		0x40040, 0x40050,
2197 		0x40060, 0x40068,
2198 		0x4007c, 0x4008c,
2199 		0x40094, 0x400b0,
2200 		0x400c0, 0x40144,
2201 		0x40180, 0x4018c,
2202 		0x40200, 0x40254,
2203 		0x40260, 0x40264,
2204 		0x40270, 0x40288,
2205 		0x40290, 0x40298,
2206 		0x402ac, 0x402c8,
2207 		0x402d0, 0x402e0,
2208 		0x402f0, 0x402f0,
2209 		0x40300, 0x4033c,
2210 		0x403f8, 0x403fc,
2211 		0x41304, 0x413c4,
2212 		0x41400, 0x4140c,
2213 		0x41414, 0x4141c,
2214 		0x41480, 0x414d0,
2215 		0x44000, 0x44054,
2216 		0x4405c, 0x44078,
2217 		0x440c0, 0x44174,
2218 		0x44180, 0x441ac,
2219 		0x441b4, 0x441b8,
2220 		0x441c0, 0x44254,
2221 		0x4425c, 0x44278,
2222 		0x442c0, 0x44374,
2223 		0x44380, 0x443ac,
2224 		0x443b4, 0x443b8,
2225 		0x443c0, 0x44454,
2226 		0x4445c, 0x44478,
2227 		0x444c0, 0x44574,
2228 		0x44580, 0x445ac,
2229 		0x445b4, 0x445b8,
2230 		0x445c0, 0x44654,
2231 		0x4465c, 0x44678,
2232 		0x446c0, 0x44774,
2233 		0x44780, 0x447ac,
2234 		0x447b4, 0x447b8,
2235 		0x447c0, 0x44854,
2236 		0x4485c, 0x44878,
2237 		0x448c0, 0x44974,
2238 		0x44980, 0x449ac,
2239 		0x449b4, 0x449b8,
2240 		0x449c0, 0x449fc,
2241 		0x45000, 0x45004,
2242 		0x45010, 0x45030,
2243 		0x45040, 0x45060,
2244 		0x45068, 0x45068,
2245 		0x45080, 0x45084,
2246 		0x450a0, 0x450b0,
2247 		0x45200, 0x45204,
2248 		0x45210, 0x45230,
2249 		0x45240, 0x45260,
2250 		0x45268, 0x45268,
2251 		0x45280, 0x45284,
2252 		0x452a0, 0x452b0,
2253 		0x460c0, 0x460e4,
2254 		0x47000, 0x4703c,
2255 		0x47044, 0x4708c,
2256 		0x47200, 0x47250,
2257 		0x47400, 0x47408,
2258 		0x47414, 0x47420,
2259 		0x47600, 0x47618,
2260 		0x47800, 0x47814,
2261 		0x48000, 0x4800c,
2262 		0x48040, 0x48050,
2263 		0x48060, 0x48068,
2264 		0x4807c, 0x4808c,
2265 		0x48094, 0x480b0,
2266 		0x480c0, 0x48144,
2267 		0x48180, 0x4818c,
2268 		0x48200, 0x48254,
2269 		0x48260, 0x48264,
2270 		0x48270, 0x48288,
2271 		0x48290, 0x48298,
2272 		0x482ac, 0x482c8,
2273 		0x482d0, 0x482e0,
2274 		0x482f0, 0x482f0,
2275 		0x48300, 0x4833c,
2276 		0x483f8, 0x483fc,
2277 		0x49304, 0x493c4,
2278 		0x49400, 0x4940c,
2279 		0x49414, 0x4941c,
2280 		0x49480, 0x494d0,
2281 		0x4c000, 0x4c054,
2282 		0x4c05c, 0x4c078,
2283 		0x4c0c0, 0x4c174,
2284 		0x4c180, 0x4c1ac,
2285 		0x4c1b4, 0x4c1b8,
2286 		0x4c1c0, 0x4c254,
2287 		0x4c25c, 0x4c278,
2288 		0x4c2c0, 0x4c374,
2289 		0x4c380, 0x4c3ac,
2290 		0x4c3b4, 0x4c3b8,
2291 		0x4c3c0, 0x4c454,
2292 		0x4c45c, 0x4c478,
2293 		0x4c4c0, 0x4c574,
2294 		0x4c580, 0x4c5ac,
2295 		0x4c5b4, 0x4c5b8,
2296 		0x4c5c0, 0x4c654,
2297 		0x4c65c, 0x4c678,
2298 		0x4c6c0, 0x4c774,
2299 		0x4c780, 0x4c7ac,
2300 		0x4c7b4, 0x4c7b8,
2301 		0x4c7c0, 0x4c854,
2302 		0x4c85c, 0x4c878,
2303 		0x4c8c0, 0x4c974,
2304 		0x4c980, 0x4c9ac,
2305 		0x4c9b4, 0x4c9b8,
2306 		0x4c9c0, 0x4c9fc,
2307 		0x4d000, 0x4d004,
2308 		0x4d010, 0x4d030,
2309 		0x4d040, 0x4d060,
2310 		0x4d068, 0x4d068,
2311 		0x4d080, 0x4d084,
2312 		0x4d0a0, 0x4d0b0,
2313 		0x4d200, 0x4d204,
2314 		0x4d210, 0x4d230,
2315 		0x4d240, 0x4d260,
2316 		0x4d268, 0x4d268,
2317 		0x4d280, 0x4d284,
2318 		0x4d2a0, 0x4d2b0,
2319 		0x4e0c0, 0x4e0e4,
2320 		0x4f000, 0x4f03c,
2321 		0x4f044, 0x4f08c,
2322 		0x4f200, 0x4f250,
2323 		0x4f400, 0x4f408,
2324 		0x4f414, 0x4f420,
2325 		0x4f600, 0x4f618,
2326 		0x4f800, 0x4f814,
2327 		0x50000, 0x50084,
2328 		0x50090, 0x500cc,
2329 		0x50400, 0x50400,
2330 		0x50800, 0x50884,
2331 		0x50890, 0x508cc,
2332 		0x50c00, 0x50c00,
2333 		0x51000, 0x5101c,
2334 		0x51300, 0x51308,
2335 	};
2336 
2337 	static const unsigned int t6_reg_ranges[] = {
2338 		0x1008, 0x101c,
2339 		0x1024, 0x10a8,
2340 		0x10b4, 0x10f8,
2341 		0x1100, 0x1114,
2342 		0x111c, 0x112c,
2343 		0x1138, 0x113c,
2344 		0x1144, 0x114c,
2345 		0x1180, 0x1184,
2346 		0x1190, 0x1194,
2347 		0x11a0, 0x11a4,
2348 		0x11b0, 0x11c4,
2349 		0x11fc, 0x1274,
2350 		0x1280, 0x133c,
2351 		0x1800, 0x18fc,
2352 		0x3000, 0x302c,
2353 		0x3060, 0x30b0,
2354 		0x30b8, 0x30d8,
2355 		0x30e0, 0x30fc,
2356 		0x3140, 0x357c,
2357 		0x35a8, 0x35cc,
2358 		0x35ec, 0x35ec,
2359 		0x3600, 0x5624,
2360 		0x56cc, 0x56ec,
2361 		0x56f4, 0x5720,
2362 		0x5728, 0x575c,
2363 		0x580c, 0x5814,
2364 		0x5890, 0x589c,
2365 		0x58a4, 0x58ac,
2366 		0x58b8, 0x58bc,
2367 		0x5940, 0x595c,
2368 		0x5980, 0x598c,
2369 		0x59b0, 0x59c8,
2370 		0x59d0, 0x59dc,
2371 		0x59fc, 0x5a18,
2372 		0x5a60, 0x5a6c,
2373 		0x5a80, 0x5a8c,
2374 		0x5a94, 0x5a9c,
2375 		0x5b94, 0x5bfc,
2376 		0x5c10, 0x5e48,
2377 		0x5e50, 0x5e94,
2378 		0x5ea0, 0x5eb0,
2379 		0x5ec0, 0x5ec0,
2380 		0x5ec8, 0x5ed0,
2381 		0x5ee0, 0x5ee0,
2382 		0x5ef0, 0x5ef0,
2383 		0x5f00, 0x5f00,
2384 		0x6000, 0x6020,
2385 		0x6028, 0x6040,
2386 		0x6058, 0x609c,
2387 		0x60a8, 0x619c,
2388 		0x7700, 0x7798,
2389 		0x77c0, 0x7880,
2390 		0x78cc, 0x78fc,
2391 		0x7b00, 0x7b58,
2392 		0x7b60, 0x7b84,
2393 		0x7b8c, 0x7c54,
2394 		0x7d00, 0x7d38,
2395 		0x7d40, 0x7d84,
2396 		0x7d8c, 0x7ddc,
2397 		0x7de4, 0x7e04,
2398 		0x7e10, 0x7e1c,
2399 		0x7e24, 0x7e38,
2400 		0x7e40, 0x7e44,
2401 		0x7e4c, 0x7e78,
2402 		0x7e80, 0x7edc,
2403 		0x7ee8, 0x7efc,
2404 		0x8dc0, 0x8de0,
2405 		0x8df8, 0x8e04,
2406 		0x8e10, 0x8e84,
2407 		0x8ea0, 0x8f88,
2408 		0x8fb8, 0x9058,
2409 		0x9060, 0x9060,
2410 		0x9068, 0x90f8,
2411 		0x9100, 0x9124,
2412 		0x9400, 0x9470,
2413 		0x9600, 0x9600,
2414 		0x9608, 0x9638,
2415 		0x9640, 0x9704,
2416 		0x9710, 0x971c,
2417 		0x9800, 0x9808,
2418 		0x9820, 0x983c,
2419 		0x9850, 0x9864,
2420 		0x9c00, 0x9c6c,
2421 		0x9c80, 0x9cec,
2422 		0x9d00, 0x9d6c,
2423 		0x9d80, 0x9dec,
2424 		0x9e00, 0x9e6c,
2425 		0x9e80, 0x9eec,
2426 		0x9f00, 0x9f6c,
2427 		0x9f80, 0xa020,
2428 		0xd004, 0xd03c,
2429 		0xd100, 0xd118,
2430 		0xd200, 0xd214,
2431 		0xd220, 0xd234,
2432 		0xd240, 0xd254,
2433 		0xd260, 0xd274,
2434 		0xd280, 0xd294,
2435 		0xd2a0, 0xd2b4,
2436 		0xd2c0, 0xd2d4,
2437 		0xd2e0, 0xd2f4,
2438 		0xd300, 0xd31c,
2439 		0xdfc0, 0xdfe0,
2440 		0xe000, 0xf008,
2441 		0xf010, 0xf018,
2442 		0xf020, 0xf028,
2443 		0x11000, 0x11014,
2444 		0x11048, 0x1106c,
2445 		0x11074, 0x11088,
2446 		0x11098, 0x11120,
2447 		0x1112c, 0x1117c,
2448 		0x11190, 0x112e0,
2449 		0x11300, 0x1130c,
2450 		0x12000, 0x1206c,
2451 		0x19040, 0x1906c,
2452 		0x19078, 0x19080,
2453 		0x1908c, 0x190e8,
2454 		0x190f0, 0x190f8,
2455 		0x19100, 0x19110,
2456 		0x19120, 0x19124,
2457 		0x19150, 0x19194,
2458 		0x1919c, 0x191b0,
2459 		0x191d0, 0x191e8,
2460 		0x19238, 0x19290,
2461 		0x192a4, 0x192b0,
2462 		0x19348, 0x1934c,
2463 		0x193f8, 0x19418,
2464 		0x19420, 0x19428,
2465 		0x19430, 0x19444,
2466 		0x1944c, 0x1946c,
2467 		0x19474, 0x19474,
2468 		0x19490, 0x194cc,
2469 		0x194f0, 0x194f8,
2470 		0x19c00, 0x19c48,
2471 		0x19c50, 0x19c80,
2472 		0x19c94, 0x19c98,
2473 		0x19ca0, 0x19cbc,
2474 		0x19ce4, 0x19ce4,
2475 		0x19cf0, 0x19cf8,
2476 		0x19d00, 0x19d28,
2477 		0x19d50, 0x19d78,
2478 		0x19d94, 0x19d98,
2479 		0x19da0, 0x19de0,
2480 		0x19df0, 0x19e10,
2481 		0x19e50, 0x19e6c,
2482 		0x19ea0, 0x19ebc,
2483 		0x19ec4, 0x19ef4,
2484 		0x19f04, 0x19f2c,
2485 		0x19f34, 0x19f34,
2486 		0x19f40, 0x19f50,
2487 		0x19f90, 0x19fac,
2488 		0x19fc4, 0x19fc8,
2489 		0x19fd0, 0x19fe4,
2490 		0x1a000, 0x1a004,
2491 		0x1a010, 0x1a06c,
2492 		0x1a0b0, 0x1a0e4,
2493 		0x1a0ec, 0x1a0f8,
2494 		0x1a100, 0x1a108,
2495 		0x1a114, 0x1a120,
2496 		0x1a128, 0x1a130,
2497 		0x1a138, 0x1a138,
2498 		0x1a190, 0x1a1c4,
2499 		0x1a1fc, 0x1a1fc,
2500 		0x1e008, 0x1e00c,
2501 		0x1e040, 0x1e044,
2502 		0x1e04c, 0x1e04c,
2503 		0x1e284, 0x1e290,
2504 		0x1e2c0, 0x1e2c0,
2505 		0x1e2e0, 0x1e2e0,
2506 		0x1e300, 0x1e384,
2507 		0x1e3c0, 0x1e3c8,
2508 		0x1e408, 0x1e40c,
2509 		0x1e440, 0x1e444,
2510 		0x1e44c, 0x1e44c,
2511 		0x1e684, 0x1e690,
2512 		0x1e6c0, 0x1e6c0,
2513 		0x1e6e0, 0x1e6e0,
2514 		0x1e700, 0x1e784,
2515 		0x1e7c0, 0x1e7c8,
2516 		0x1e808, 0x1e80c,
2517 		0x1e840, 0x1e844,
2518 		0x1e84c, 0x1e84c,
2519 		0x1ea84, 0x1ea90,
2520 		0x1eac0, 0x1eac0,
2521 		0x1eae0, 0x1eae0,
2522 		0x1eb00, 0x1eb84,
2523 		0x1ebc0, 0x1ebc8,
2524 		0x1ec08, 0x1ec0c,
2525 		0x1ec40, 0x1ec44,
2526 		0x1ec4c, 0x1ec4c,
2527 		0x1ee84, 0x1ee90,
2528 		0x1eec0, 0x1eec0,
2529 		0x1eee0, 0x1eee0,
2530 		0x1ef00, 0x1ef84,
2531 		0x1efc0, 0x1efc8,
2532 		0x1f008, 0x1f00c,
2533 		0x1f040, 0x1f044,
2534 		0x1f04c, 0x1f04c,
2535 		0x1f284, 0x1f290,
2536 		0x1f2c0, 0x1f2c0,
2537 		0x1f2e0, 0x1f2e0,
2538 		0x1f300, 0x1f384,
2539 		0x1f3c0, 0x1f3c8,
2540 		0x1f408, 0x1f40c,
2541 		0x1f440, 0x1f444,
2542 		0x1f44c, 0x1f44c,
2543 		0x1f684, 0x1f690,
2544 		0x1f6c0, 0x1f6c0,
2545 		0x1f6e0, 0x1f6e0,
2546 		0x1f700, 0x1f784,
2547 		0x1f7c0, 0x1f7c8,
2548 		0x1f808, 0x1f80c,
2549 		0x1f840, 0x1f844,
2550 		0x1f84c, 0x1f84c,
2551 		0x1fa84, 0x1fa90,
2552 		0x1fac0, 0x1fac0,
2553 		0x1fae0, 0x1fae0,
2554 		0x1fb00, 0x1fb84,
2555 		0x1fbc0, 0x1fbc8,
2556 		0x1fc08, 0x1fc0c,
2557 		0x1fc40, 0x1fc44,
2558 		0x1fc4c, 0x1fc4c,
2559 		0x1fe84, 0x1fe90,
2560 		0x1fec0, 0x1fec0,
2561 		0x1fee0, 0x1fee0,
2562 		0x1ff00, 0x1ff84,
2563 		0x1ffc0, 0x1ffc8,
2564 		0x30000, 0x30030,
2565 		0x30100, 0x30168,
2566 		0x30190, 0x301a0,
2567 		0x301a8, 0x301b8,
2568 		0x301c4, 0x301c8,
2569 		0x301d0, 0x301d0,
2570 		0x30200, 0x30320,
2571 		0x30400, 0x304b4,
2572 		0x304c0, 0x3052c,
2573 		0x30540, 0x3061c,
2574 		0x30800, 0x308a0,
2575 		0x308c0, 0x30908,
2576 		0x30910, 0x309b8,
2577 		0x30a00, 0x30a04,
2578 		0x30a0c, 0x30a14,
2579 		0x30a1c, 0x30a2c,
2580 		0x30a44, 0x30a50,
2581 		0x30a74, 0x30a74,
2582 		0x30a7c, 0x30afc,
2583 		0x30b08, 0x30c24,
2584 		0x30d00, 0x30d14,
2585 		0x30d1c, 0x30d3c,
2586 		0x30d44, 0x30d4c,
2587 		0x30d54, 0x30d74,
2588 		0x30d7c, 0x30d7c,
2589 		0x30de0, 0x30de0,
2590 		0x30e00, 0x30ed4,
2591 		0x30f00, 0x30fa4,
2592 		0x30fc0, 0x30fc4,
2593 		0x31000, 0x31004,
2594 		0x31080, 0x310fc,
2595 		0x31208, 0x31220,
2596 		0x3123c, 0x31254,
2597 		0x31300, 0x31300,
2598 		0x31308, 0x3131c,
2599 		0x31338, 0x3133c,
2600 		0x31380, 0x31380,
2601 		0x31388, 0x313a8,
2602 		0x313b4, 0x313b4,
2603 		0x31400, 0x31420,
2604 		0x31438, 0x3143c,
2605 		0x31480, 0x31480,
2606 		0x314a8, 0x314a8,
2607 		0x314b0, 0x314b4,
2608 		0x314c8, 0x314d4,
2609 		0x31a40, 0x31a4c,
2610 		0x31af0, 0x31b20,
2611 		0x31b38, 0x31b3c,
2612 		0x31b80, 0x31b80,
2613 		0x31ba8, 0x31ba8,
2614 		0x31bb0, 0x31bb4,
2615 		0x31bc8, 0x31bd4,
2616 		0x32140, 0x3218c,
2617 		0x321f0, 0x321f4,
2618 		0x32200, 0x32200,
2619 		0x32218, 0x32218,
2620 		0x32400, 0x32400,
2621 		0x32408, 0x3241c,
2622 		0x32618, 0x32620,
2623 		0x32664, 0x32664,
2624 		0x326a8, 0x326a8,
2625 		0x326ec, 0x326ec,
2626 		0x32a00, 0x32abc,
2627 		0x32b00, 0x32b18,
2628 		0x32b20, 0x32b38,
2629 		0x32b40, 0x32b58,
2630 		0x32b60, 0x32b78,
2631 		0x32c00, 0x32c00,
2632 		0x32c08, 0x32c3c,
2633 		0x33000, 0x3302c,
2634 		0x33034, 0x33050,
2635 		0x33058, 0x33058,
2636 		0x33060, 0x3308c,
2637 		0x3309c, 0x330ac,
2638 		0x330c0, 0x330c0,
2639 		0x330c8, 0x330d0,
2640 		0x330d8, 0x330e0,
2641 		0x330ec, 0x3312c,
2642 		0x33134, 0x33150,
2643 		0x33158, 0x33158,
2644 		0x33160, 0x3318c,
2645 		0x3319c, 0x331ac,
2646 		0x331c0, 0x331c0,
2647 		0x331c8, 0x331d0,
2648 		0x331d8, 0x331e0,
2649 		0x331ec, 0x33290,
2650 		0x33298, 0x332c4,
2651 		0x332e4, 0x33390,
2652 		0x33398, 0x333c4,
2653 		0x333e4, 0x3342c,
2654 		0x33434, 0x33450,
2655 		0x33458, 0x33458,
2656 		0x33460, 0x3348c,
2657 		0x3349c, 0x334ac,
2658 		0x334c0, 0x334c0,
2659 		0x334c8, 0x334d0,
2660 		0x334d8, 0x334e0,
2661 		0x334ec, 0x3352c,
2662 		0x33534, 0x33550,
2663 		0x33558, 0x33558,
2664 		0x33560, 0x3358c,
2665 		0x3359c, 0x335ac,
2666 		0x335c0, 0x335c0,
2667 		0x335c8, 0x335d0,
2668 		0x335d8, 0x335e0,
2669 		0x335ec, 0x33690,
2670 		0x33698, 0x336c4,
2671 		0x336e4, 0x33790,
2672 		0x33798, 0x337c4,
2673 		0x337e4, 0x337fc,
2674 		0x33814, 0x33814,
2675 		0x33854, 0x33868,
2676 		0x33880, 0x3388c,
2677 		0x338c0, 0x338d0,
2678 		0x338e8, 0x338ec,
2679 		0x33900, 0x3392c,
2680 		0x33934, 0x33950,
2681 		0x33958, 0x33958,
2682 		0x33960, 0x3398c,
2683 		0x3399c, 0x339ac,
2684 		0x339c0, 0x339c0,
2685 		0x339c8, 0x339d0,
2686 		0x339d8, 0x339e0,
2687 		0x339ec, 0x33a90,
2688 		0x33a98, 0x33ac4,
2689 		0x33ae4, 0x33b10,
2690 		0x33b24, 0x33b28,
2691 		0x33b38, 0x33b50,
2692 		0x33bf0, 0x33c10,
2693 		0x33c24, 0x33c28,
2694 		0x33c38, 0x33c50,
2695 		0x33cf0, 0x33cfc,
2696 		0x34000, 0x34030,
2697 		0x34100, 0x34168,
2698 		0x34190, 0x341a0,
2699 		0x341a8, 0x341b8,
2700 		0x341c4, 0x341c8,
2701 		0x341d0, 0x341d0,
2702 		0x34200, 0x34320,
2703 		0x34400, 0x344b4,
2704 		0x344c0, 0x3452c,
2705 		0x34540, 0x3461c,
2706 		0x34800, 0x348a0,
2707 		0x348c0, 0x34908,
2708 		0x34910, 0x349b8,
2709 		0x34a00, 0x34a04,
2710 		0x34a0c, 0x34a14,
2711 		0x34a1c, 0x34a2c,
2712 		0x34a44, 0x34a50,
2713 		0x34a74, 0x34a74,
2714 		0x34a7c, 0x34afc,
2715 		0x34b08, 0x34c24,
2716 		0x34d00, 0x34d14,
2717 		0x34d1c, 0x34d3c,
2718 		0x34d44, 0x34d4c,
2719 		0x34d54, 0x34d74,
2720 		0x34d7c, 0x34d7c,
2721 		0x34de0, 0x34de0,
2722 		0x34e00, 0x34ed4,
2723 		0x34f00, 0x34fa4,
2724 		0x34fc0, 0x34fc4,
2725 		0x35000, 0x35004,
2726 		0x35080, 0x350fc,
2727 		0x35208, 0x35220,
2728 		0x3523c, 0x35254,
2729 		0x35300, 0x35300,
2730 		0x35308, 0x3531c,
2731 		0x35338, 0x3533c,
2732 		0x35380, 0x35380,
2733 		0x35388, 0x353a8,
2734 		0x353b4, 0x353b4,
2735 		0x35400, 0x35420,
2736 		0x35438, 0x3543c,
2737 		0x35480, 0x35480,
2738 		0x354a8, 0x354a8,
2739 		0x354b0, 0x354b4,
2740 		0x354c8, 0x354d4,
2741 		0x35a40, 0x35a4c,
2742 		0x35af0, 0x35b20,
2743 		0x35b38, 0x35b3c,
2744 		0x35b80, 0x35b80,
2745 		0x35ba8, 0x35ba8,
2746 		0x35bb0, 0x35bb4,
2747 		0x35bc8, 0x35bd4,
2748 		0x36140, 0x3618c,
2749 		0x361f0, 0x361f4,
2750 		0x36200, 0x36200,
2751 		0x36218, 0x36218,
2752 		0x36400, 0x36400,
2753 		0x36408, 0x3641c,
2754 		0x36618, 0x36620,
2755 		0x36664, 0x36664,
2756 		0x366a8, 0x366a8,
2757 		0x366ec, 0x366ec,
2758 		0x36a00, 0x36abc,
2759 		0x36b00, 0x36b18,
2760 		0x36b20, 0x36b38,
2761 		0x36b40, 0x36b58,
2762 		0x36b60, 0x36b78,
2763 		0x36c00, 0x36c00,
2764 		0x36c08, 0x36c3c,
2765 		0x37000, 0x3702c,
2766 		0x37034, 0x37050,
2767 		0x37058, 0x37058,
2768 		0x37060, 0x3708c,
2769 		0x3709c, 0x370ac,
2770 		0x370c0, 0x370c0,
2771 		0x370c8, 0x370d0,
2772 		0x370d8, 0x370e0,
2773 		0x370ec, 0x3712c,
2774 		0x37134, 0x37150,
2775 		0x37158, 0x37158,
2776 		0x37160, 0x3718c,
2777 		0x3719c, 0x371ac,
2778 		0x371c0, 0x371c0,
2779 		0x371c8, 0x371d0,
2780 		0x371d8, 0x371e0,
2781 		0x371ec, 0x37290,
2782 		0x37298, 0x372c4,
2783 		0x372e4, 0x37390,
2784 		0x37398, 0x373c4,
2785 		0x373e4, 0x3742c,
2786 		0x37434, 0x37450,
2787 		0x37458, 0x37458,
2788 		0x37460, 0x3748c,
2789 		0x3749c, 0x374ac,
2790 		0x374c0, 0x374c0,
2791 		0x374c8, 0x374d0,
2792 		0x374d8, 0x374e0,
2793 		0x374ec, 0x3752c,
2794 		0x37534, 0x37550,
2795 		0x37558, 0x37558,
2796 		0x37560, 0x3758c,
2797 		0x3759c, 0x375ac,
2798 		0x375c0, 0x375c0,
2799 		0x375c8, 0x375d0,
2800 		0x375d8, 0x375e0,
2801 		0x375ec, 0x37690,
2802 		0x37698, 0x376c4,
2803 		0x376e4, 0x37790,
2804 		0x37798, 0x377c4,
2805 		0x377e4, 0x377fc,
2806 		0x37814, 0x37814,
2807 		0x37854, 0x37868,
2808 		0x37880, 0x3788c,
2809 		0x378c0, 0x378d0,
2810 		0x378e8, 0x378ec,
2811 		0x37900, 0x3792c,
2812 		0x37934, 0x37950,
2813 		0x37958, 0x37958,
2814 		0x37960, 0x3798c,
2815 		0x3799c, 0x379ac,
2816 		0x379c0, 0x379c0,
2817 		0x379c8, 0x379d0,
2818 		0x379d8, 0x379e0,
2819 		0x379ec, 0x37a90,
2820 		0x37a98, 0x37ac4,
2821 		0x37ae4, 0x37b10,
2822 		0x37b24, 0x37b28,
2823 		0x37b38, 0x37b50,
2824 		0x37bf0, 0x37c10,
2825 		0x37c24, 0x37c28,
2826 		0x37c38, 0x37c50,
2827 		0x37cf0, 0x37cfc,
2828 		0x40040, 0x40040,
2829 		0x40080, 0x40084,
2830 		0x40100, 0x40100,
2831 		0x40140, 0x401bc,
2832 		0x40200, 0x40214,
2833 		0x40228, 0x40228,
2834 		0x40240, 0x40258,
2835 		0x40280, 0x40280,
2836 		0x40304, 0x40304,
2837 		0x40330, 0x4033c,
2838 		0x41304, 0x413c8,
2839 		0x413d0, 0x413dc,
2840 		0x413f0, 0x413f0,
2841 		0x41400, 0x4140c,
2842 		0x41414, 0x4141c,
2843 		0x41480, 0x414d0,
2844 		0x44000, 0x4407c,
2845 		0x440c0, 0x441ac,
2846 		0x441b4, 0x4427c,
2847 		0x442c0, 0x443ac,
2848 		0x443b4, 0x4447c,
2849 		0x444c0, 0x445ac,
2850 		0x445b4, 0x4467c,
2851 		0x446c0, 0x447ac,
2852 		0x447b4, 0x4487c,
2853 		0x448c0, 0x449ac,
2854 		0x449b4, 0x44a7c,
2855 		0x44ac0, 0x44bac,
2856 		0x44bb4, 0x44c7c,
2857 		0x44cc0, 0x44dac,
2858 		0x44db4, 0x44e7c,
2859 		0x44ec0, 0x44fac,
2860 		0x44fb4, 0x4507c,
2861 		0x450c0, 0x451ac,
2862 		0x451b4, 0x451fc,
2863 		0x45800, 0x45804,
2864 		0x45810, 0x45830,
2865 		0x45840, 0x45860,
2866 		0x45868, 0x45868,
2867 		0x45880, 0x45884,
2868 		0x458a0, 0x458b0,
2869 		0x45a00, 0x45a04,
2870 		0x45a10, 0x45a30,
2871 		0x45a40, 0x45a60,
2872 		0x45a68, 0x45a68,
2873 		0x45a80, 0x45a84,
2874 		0x45aa0, 0x45ab0,
2875 		0x460c0, 0x460e4,
2876 		0x47000, 0x4703c,
2877 		0x47044, 0x4708c,
2878 		0x47200, 0x47250,
2879 		0x47400, 0x47408,
2880 		0x47414, 0x47420,
2881 		0x47600, 0x47618,
2882 		0x47800, 0x47814,
2883 		0x47820, 0x4782c,
2884 		0x50000, 0x50084,
2885 		0x50090, 0x500cc,
2886 		0x50300, 0x50384,
2887 		0x50400, 0x50400,
2888 		0x50800, 0x50884,
2889 		0x50890, 0x508cc,
2890 		0x50b00, 0x50b84,
2891 		0x50c00, 0x50c00,
2892 		0x51000, 0x51020,
2893 		0x51028, 0x510b0,
2894 		0x51300, 0x51324,
2895 	};
2896 
2897 	u32 *buf_end = (u32 *)((char *)buf + buf_size);
2898 	const unsigned int *reg_ranges;
2899 	int reg_ranges_size, range;
2900 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2901 
2902 	/* Select the right set of register ranges to dump depending on the
2903 	 * adapter chip type.
2904 	 */
2905 	switch (chip_version) {
2906 	case CHELSIO_T4:
2907 		reg_ranges = t4_reg_ranges;
2908 		reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2909 		break;
2910 
2911 	case CHELSIO_T5:
2912 		reg_ranges = t5_reg_ranges;
2913 		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2914 		break;
2915 
2916 	case CHELSIO_T6:
2917 		reg_ranges = t6_reg_ranges;
2918 		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2919 		break;
2920 
2921 	default:
2922 		CH_ERR(adap,
2923 			"Unsupported chip version %d\n", chip_version);
2924 		return;
2925 	}
2926 
2927 	/* Clear the register buffer and insert the appropriate register
2928 	 * values selected by the above register ranges.
2929 	 */
2930 	memset(buf, 0, buf_size);
2931 	for (range = 0; range < reg_ranges_size; range += 2) {
2932 		unsigned int reg = reg_ranges[range];
2933 		unsigned int last_reg = reg_ranges[range + 1];
2934 		u32 *bufp = (u32 *)((char *)buf + reg);
2935 
2936 		/* Iterate across the register range filling in the register
2937 		 * buffer but don't write past the end of the register buffer.
2938 		 */
2939 		while (reg <= last_reg && bufp < buf_end) {
2940 			*bufp++ = t4_read_reg(adap, reg);
2941 			reg += sizeof(u32);
2942 		}
2943 	}
2944 }
2945 
2946 /*
2947  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2948  */
2949 #define EEPROM_DELAY		10		// 10us per poll spin
2950 #define EEPROM_MAX_POLL		5000		// x 5000 == 50ms
2951 
2952 #define EEPROM_STAT_ADDR	0x7bfc
2953 #define VPD_SIZE		0x800
2954 #define VPD_BASE		0x400
2955 #define VPD_BASE_OLD		0
2956 #define VPD_LEN			1024
2957 #define VPD_INFO_FLD_HDR_SIZE	3
2958 #define CHELSIO_VPD_UNIQUE_ID	0x82
2959 
2960 /*
2961  * Small utility function to wait till any outstanding VPD Access is complete.
2962  * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2963  * VPD Access in flight.  This allows us to handle the problem of having a
2964  * previous VPD Access time out and prevent an attempt to inject a new VPD
2965  * Request before any in-flight VPD reguest has completed.
2966  */
2967 static int t4_seeprom_wait(struct adapter *adapter)
2968 {
2969 	unsigned int base = adapter->params.pci.vpd_cap_addr;
2970 	int max_poll;
2971 
2972 	/*
2973 	 * If no VPD Access is in flight, we can just return success right
2974 	 * away.
2975 	 */
2976 	if (!adapter->vpd_busy)
2977 		return 0;
2978 
2979 	/*
2980 	 * Poll the VPD Capability Address/Flag register waiting for it
2981 	 * to indicate that the operation is complete.
2982 	 */
2983 	max_poll = EEPROM_MAX_POLL;
2984 	do {
2985 		u16 val;
2986 
2987 		udelay(EEPROM_DELAY);
2988 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2989 
2990 		/*
2991 		 * If the operation is complete, mark the VPD as no longer
2992 		 * busy and return success.
2993 		 */
2994 		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2995 			adapter->vpd_busy = 0;
2996 			return 0;
2997 		}
2998 	} while (--max_poll);
2999 
3000 	/*
3001 	 * Failure!  Note that we leave the VPD Busy status set in order to
3002 	 * avoid pushing a new VPD Access request into the VPD Capability till
3003 	 * the current operation eventually succeeds.  It's a bug to issue a
3004 	 * new request when an existing request is in flight and will result
3005 	 * in corrupt hardware state.
3006 	 */
3007 	return -ETIMEDOUT;
3008 }
3009 
3010 /**
3011  *	t4_seeprom_read - read a serial EEPROM location
3012  *	@adapter: adapter to read
3013  *	@addr: EEPROM virtual address
3014  *	@data: where to store the read data
3015  *
3016  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
3017  *	VPD capability.  Note that this function must be called with a virtual
3018  *	address.
3019  */
3020 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3021 {
3022 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3023 	int ret;
3024 
3025 	/*
3026 	 * VPD Accesses must alway be 4-byte aligned!
3027 	 */
3028 	if (addr >= EEPROMVSIZE || (addr & 3))
3029 		return -EINVAL;
3030 
3031 	/*
3032 	 * Wait for any previous operation which may still be in flight to
3033 	 * complete.
3034 	 */
3035 	ret = t4_seeprom_wait(adapter);
3036 	if (ret) {
3037 		CH_ERR(adapter, "VPD still busy from previous operation\n");
3038 		return ret;
3039 	}
3040 
3041 	/*
3042 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
3043 	 * for our request to complete.  If it doesn't complete, note the
3044 	 * error and return it to our caller.  Note that we do not reset the
3045 	 * VPD Busy status!
3046 	 */
3047 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3048 	adapter->vpd_busy = 1;
3049 	adapter->vpd_flag = PCI_VPD_ADDR_F;
3050 	ret = t4_seeprom_wait(adapter);
3051 	if (ret) {
3052 		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3053 		return ret;
3054 	}
3055 
3056 	/*
3057 	 * Grab the returned data, swizzle it into our endianess and
3058 	 * return success.
3059 	 */
3060 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3061 	*data = le32_to_cpu(*data);
3062 	return 0;
3063 }
3064 
3065 /**
3066  *	t4_seeprom_write - write a serial EEPROM location
3067  *	@adapter: adapter to write
3068  *	@addr: virtual EEPROM address
3069  *	@data: value to write
3070  *
3071  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
3072  *	VPD capability.  Note that this function must be called with a virtual
3073  *	address.
3074  */
3075 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3076 {
3077 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3078 	int ret;
3079 	u32 stats_reg;
3080 	int max_poll;
3081 
3082 	/*
3083 	 * VPD Accesses must alway be 4-byte aligned!
3084 	 */
3085 	if (addr >= EEPROMVSIZE || (addr & 3))
3086 		return -EINVAL;
3087 
3088 	/*
3089 	 * Wait for any previous operation which may still be in flight to
3090 	 * complete.
3091 	 */
3092 	ret = t4_seeprom_wait(adapter);
3093 	if (ret) {
3094 		CH_ERR(adapter, "VPD still busy from previous operation\n");
3095 		return ret;
3096 	}
3097 
3098 	/*
3099 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
3100 	 * for our request to complete.  If it doesn't complete, note the
3101 	 * error and return it to our caller.  Note that we do not reset the
3102 	 * VPD Busy status!
3103 	 */
3104 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3105 				 cpu_to_le32(data));
3106 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3107 				 (u16)addr | PCI_VPD_ADDR_F);
3108 	adapter->vpd_busy = 1;
3109 	adapter->vpd_flag = 0;
3110 	ret = t4_seeprom_wait(adapter);
3111 	if (ret) {
3112 		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3113 		return ret;
3114 	}
3115 
3116 	/*
3117 	 * Reset PCI_VPD_DATA register after a transaction and wait for our
3118 	 * request to complete. If it doesn't complete, return error.
3119 	 */
3120 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3121 	max_poll = EEPROM_MAX_POLL;
3122 	do {
3123 		udelay(EEPROM_DELAY);
3124 		t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3125 	} while ((stats_reg & 0x1) && --max_poll);
3126 	if (!max_poll)
3127 		return -ETIMEDOUT;
3128 
3129 	/* Return success! */
3130 	return 0;
3131 }
3132 
3133 /**
3134  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
3135  *	@phys_addr: the physical EEPROM address
3136  *	@fn: the PCI function number
3137  *	@sz: size of function-specific area
3138  *
3139  *	Translate a physical EEPROM address to virtual.  The first 1K is
3140  *	accessed through virtual addresses starting at 31K, the rest is
3141  *	accessed through virtual addresses starting at 0.
3142  *
3143  *	The mapping is as follows:
3144  *	[0..1K) -> [31K..32K)
3145  *	[1K..1K+A) -> [ES-A..ES)
3146  *	[1K+A..ES) -> [0..ES-A-1K)
3147  *
3148  *	where A = @fn * @sz, and ES = EEPROM size.
3149  */
3150 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3151 {
3152 	fn *= sz;
3153 	if (phys_addr < 1024)
3154 		return phys_addr + (31 << 10);
3155 	if (phys_addr < 1024 + fn)
3156 		return EEPROMSIZE - fn + phys_addr - 1024;
3157 	if (phys_addr < EEPROMSIZE)
3158 		return phys_addr - 1024 - fn;
3159 	return -EINVAL;
3160 }
3161 
3162 /**
3163  *	t4_seeprom_wp - enable/disable EEPROM write protection
3164  *	@adapter: the adapter
3165  *	@enable: whether to enable or disable write protection
3166  *
3167  *	Enables or disables write protection on the serial EEPROM.
3168  */
3169 int t4_seeprom_wp(struct adapter *adapter, int enable)
3170 {
3171 	return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3172 }
3173 
3174 /**
3175  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
3176  *	@v: Pointer to buffered vpd data structure
3177  *	@kw: The keyword to search for
3178  *
3179  *	Returns the value of the information field keyword or
3180  *	-ENOENT otherwise.
3181  */
3182 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
3183 {
3184 	int i;
3185 	unsigned int offset , len;
3186 	const u8 *buf = (const u8 *)v;
3187 	const u8 *vpdr_len = &v->vpdr_len[0];
3188 	offset = sizeof(struct t4_vpd_hdr);
3189 	len =  (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
3190 
3191 	if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
3192 		return -ENOENT;
3193 	}
3194 
3195 	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3196 		if(memcmp(buf + i , kw , 2) == 0){
3197 			i += VPD_INFO_FLD_HDR_SIZE;
3198 			return i;
3199 		}
3200 
3201 		i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
3202 	}
3203 
3204 	return -ENOENT;
3205 }
3206 
3207 /*
3208  * str_strip
3209  * Removes trailing whitespaces from string "s"
3210  * Based on strstrip() implementation in string.c
3211  */
3212 static void str_strip(char *s)
3213 {
3214 	size_t size;
3215 	char *end;
3216 
3217 	size = strlen(s);
3218 	if (!size)
3219 		return;
3220 
3221 	end = s + size - 1;
3222 	while (end >= s && isspace(*end))
3223 		end--;
3224 	*(end + 1) = '\0';
3225 }
3226 
3227 /**
3228  *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
3229  *	@adapter: adapter to read
3230  *	@p: where to store the parameters
3231  *
3232  *	Reads card parameters stored in VPD EEPROM.
3233  */
3234 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
3235 {
3236 	int i, ret = 0, addr;
3237 	int ec, sn, pn, na;
3238 	u8 *vpd, csum;
3239 	const struct t4_vpd_hdr *v;
3240 
3241 	vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN);
3242 	if (!vpd)
3243 		return -ENOMEM;
3244 
3245 	/* We have two VPD data structures stored in the adapter VPD area.
3246 	 * By default, Linux calculates the size of the VPD area by traversing
3247 	 * the first VPD area at offset 0x0, so we need to tell the OS what
3248 	 * our real VPD size is.
3249 	 */
3250 	ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE);
3251 	if (ret < 0)
3252 		goto out;
3253 
3254 	/* Card information normally starts at VPD_BASE but early cards had
3255 	 * it at 0.
3256 	 */
3257 	ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd));
3258 	if (ret)
3259 		goto out;
3260 
3261 	/* The VPD shall have a unique identifier specified by the PCI SIG.
3262 	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3263 	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3264 	 * is expected to automatically put this entry at the
3265 	 * beginning of the VPD.
3266 	 */
3267 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3268 
3269 	for (i = 0; i < VPD_LEN; i += 4) {
3270 		ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i));
3271 		if (ret)
3272 			goto out;
3273 	}
3274  	v = (const struct t4_vpd_hdr *)vpd;
3275 
3276 #define FIND_VPD_KW(var,name) do { \
3277 	var = get_vpd_keyword_val(v , name); \
3278 	if (var < 0) { \
3279 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3280 		ret = -EINVAL; \
3281 		goto out;      \
3282 	} \
3283 } while (0)
3284 
3285 	FIND_VPD_KW(i, "RV");
3286 	for (csum = 0; i >= 0; i--)
3287 		csum += vpd[i];
3288 
3289 	if (csum) {
3290 		CH_ERR(adapter,
3291 			"corrupted VPD EEPROM, actual csum %u\n", csum);
3292 		ret = -EINVAL;
3293 		goto out;
3294 	}
3295 
3296 	FIND_VPD_KW(ec, "EC");
3297 	FIND_VPD_KW(sn, "SN");
3298 	FIND_VPD_KW(pn, "PN");
3299 	FIND_VPD_KW(na, "NA");
3300 #undef FIND_VPD_KW
3301 
3302 	memcpy(p->id, v->id_data, ID_LEN);
3303 	str_strip((char *)p->id);
3304 	memcpy(p->ec, vpd + ec, EC_LEN);
3305 	str_strip((char *)p->ec);
3306 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3307 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3308 	str_strip((char *)p->sn);
3309 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3310 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3311 	str_strip((char *)p->pn);
3312 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3313 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3314 	str_strip((char *)p->na);
3315 
3316 out:
3317 	kmem_free(vpd, sizeof(u8) * VPD_LEN);
3318 	return ret < 0 ? ret : 0;
3319 }
3320 
3321 /**
3322  *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
3323  *	@adapter: adapter to read
3324  *	@p: where to store the parameters
3325  *
3326  *	Reads card parameters stored in VPD EEPROM and retrieves the Core
3327  *	Clock.  This can only be called after a connection to the firmware
3328  *	is established.
3329  */
3330 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
3331 {
3332 	u32 cclk_param, cclk_val;
3333 	int ret;
3334 
3335 	/*
3336 	 * Grab the raw VPD parameters.
3337 	 */
3338 	ret = t4_get_raw_vpd_params(adapter, p);
3339 	if (ret)
3340 		return ret;
3341 
3342 	/*
3343 	 * Ask firmware for the Core Clock since it knows how to translate the
3344 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
3345 	 */
3346 	cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3347 		      V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
3348 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3349 			      1, &cclk_param, &cclk_val);
3350 
3351 	if (ret)
3352 		return ret;
3353 	p->cclk = cclk_val;
3354 
3355 	return 0;
3356 }
3357 
3358 /**
3359  *	t4_get_pfres - retrieve VF resource limits
3360  *	@adapter: the adapter
3361  *
3362  *	Retrieves configured resource limits and capabilities for a physical
3363  *	function.  The results are stored in @adapter->pfres.
3364  */
3365 int t4_get_pfres(struct adapter *adapter)
3366 {
3367 	struct pf_resources *pfres = &adapter->params.pfres;
3368 	struct fw_pfvf_cmd cmd, rpl;
3369 	int v;
3370 	u32 word;
3371 
3372 	/*
3373 	 * Execute PFVF Read command to get VF resource limits; bail out early
3374 	 * with error on command failure.
3375 	 */
3376 	memset(&cmd, 0, sizeof(cmd));
3377 	cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
3378 				    F_FW_CMD_REQUEST |
3379 				    F_FW_CMD_READ |
3380 				    V_FW_PFVF_CMD_PFN(adapter->pf) |
3381 				    V_FW_PFVF_CMD_VFN(0));
3382 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
3383 	v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
3384 	if (v != FW_SUCCESS)
3385 		return v;
3386 
3387 	/*
3388 	 * Extract PF resource limits and return success.
3389 	 */
3390 	word = be32_to_cpu(rpl.niqflint_niq);
3391 	pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
3392 	pfres->niq = G_FW_PFVF_CMD_NIQ(word);
3393 
3394 	word = be32_to_cpu(rpl.type_to_neq);
3395 	pfres->neq = G_FW_PFVF_CMD_NEQ(word);
3396 	pfres->pmask = G_FW_PFVF_CMD_PMASK(word);
3397 
3398 	word = be32_to_cpu(rpl.tc_to_nexactf);
3399 	pfres->tc = G_FW_PFVF_CMD_TC(word);
3400 	pfres->nvi = G_FW_PFVF_CMD_NVI(word);
3401 	pfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
3402 
3403 	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
3404 	pfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
3405 	pfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
3406 	pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
3407 
3408 	return 0;
3409 }
3410 
3411 /* serial flash and firmware constants and flash config file constants */
3412 enum {
3413 	SF_ATTEMPTS = 10,	/* max retries for SF operations */
3414 
3415 	/* flash command opcodes */
3416 	SF_PROG_PAGE    = 2,	/* program page */
3417 	SF_WR_DISABLE   = 4,	/* disable writes */
3418 	SF_RD_STATUS    = 5,	/* read status register */
3419 	SF_WR_ENABLE    = 6,	/* enable writes */
3420 	SF_RD_DATA_FAST = 0xb,	/* read flash */
3421 	SF_RD_ID	= 0x9f,	/* read ID */
3422 	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
3423 };
3424 
3425 /**
3426  *	sf1_read - read data from the serial flash
3427  *	@adapter: the adapter
3428  *	@byte_cnt: number of bytes to read
3429  *	@cont: whether another operation will be chained
3430  *	@lock: whether to lock SF for PL access only
3431  *	@valp: where to store the read data
3432  *
3433  *	Reads up to 4 bytes of data from the serial flash.  The location of
3434  *	the read needs to be specified prior to calling this by issuing the
3435  *	appropriate commands to the serial flash.
3436  */
3437 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3438 		    int lock, u32 *valp)
3439 {
3440 	int ret;
3441 
3442 	if (!byte_cnt || byte_cnt > 4)
3443 		return -EINVAL;
3444 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3445 		return -EBUSY;
3446 	t4_write_reg(adapter, A_SF_OP,
3447 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3448 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3449 	if (!ret)
3450 		*valp = t4_read_reg(adapter, A_SF_DATA);
3451 	return ret;
3452 }
3453 
3454 /**
3455  *	sf1_write - write data to the serial flash
3456  *	@adapter: the adapter
3457  *	@byte_cnt: number of bytes to write
3458  *	@cont: whether another operation will be chained
3459  *	@lock: whether to lock SF for PL access only
3460  *	@val: value to write
3461  *
3462  *	Writes up to 4 bytes of data to the serial flash.  The location of
3463  *	the write needs to be specified prior to calling this by issuing the
3464  *	appropriate commands to the serial flash.
3465  */
3466 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3467 		     int lock, u32 val)
3468 {
3469 	if (!byte_cnt || byte_cnt > 4)
3470 		return -EINVAL;
3471 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3472 		return -EBUSY;
3473 	t4_write_reg(adapter, A_SF_DATA, val);
3474 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3475 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3476 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3477 }
3478 
3479 /**
3480  *	flash_wait_op - wait for a flash operation to complete
3481  *	@adapter: the adapter
3482  *	@attempts: max number of polls of the status register
3483  *	@delay: delay between polls in ms
3484  *
3485  *	Wait for a flash operation to complete by polling the status register.
3486  */
3487 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay)
3488 {
3489 	int ret;
3490 	u32 status;
3491 
3492 	while (1) {
3493 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3494 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3495 			return ret;
3496 		if (!(status & 1))
3497 			return 0;
3498 		if (--attempts == 0)
3499 			return -EAGAIN;
3500 		if (ch_delay) {
3501 #ifdef CONFIG_CUDBG
3502 			if (adapter->flags & K_CRASH)
3503 				mdelay(ch_delay);
3504 			else
3505 #endif
3506 				msleep(ch_delay);
3507 		}
3508 	}
3509 }
3510 
3511 /**
3512  *	t4_read_flash - read words from serial flash
3513  *	@adapter: the adapter
3514  *	@addr: the start address for the read
3515  *	@nwords: how many 32-bit words to read
3516  *	@data: where to store the read data
3517  *	@byte_oriented: whether to store data as bytes or as words
3518  *
3519  *	Read the specified number of 32-bit words from the serial flash.
3520  *	If @byte_oriented is set the read data is stored as a byte array
3521  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3522  *	natural endianness.
3523  */
3524 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3525 		  unsigned int nwords, u32 *data, int byte_oriented)
3526 {
3527 	int ret;
3528 
3529 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3530 		return -EINVAL;
3531 
3532 	addr = swab32(addr) | SF_RD_DATA_FAST;
3533 
3534 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3535 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3536 		return ret;
3537 
3538 	for ( ; nwords; nwords--, data++) {
3539 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3540 		if (nwords == 1)
3541 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3542 		if (ret)
3543 			return ret;
3544 		if (byte_oriented)
3545 			*data = (__force __u32)(cpu_to_be32(*data));
3546 	}
3547 	return 0;
3548 }
3549 
3550 /**
3551  *	t4_write_flash - write up to a page of data to the serial flash
3552  *	@adapter: the adapter
3553  *	@addr: the start address to write
3554  *	@n: length of data to write in bytes
3555  *	@data: the data to write
3556  *	@byte_oriented: whether to store data as bytes or as words
3557  *
3558  *	Writes up to a page of data (256 bytes) to the serial flash starting
3559  *	at the given address.  All the data must be written to the same page.
3560  *	If @byte_oriented is set the write data is stored as byte stream
3561  *	(i.e. matches what on disk), otherwise in big-endian.
3562  */
3563 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3564 			  unsigned int n, const u8 *data, int byte_oriented)
3565 {
3566 	int ret;
3567 	u32 buf[64];
3568 	unsigned int i, c, left, val, offset = addr & 0xff;
3569 
3570 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3571 		return -EINVAL;
3572 
3573 	val = swab32(addr) | SF_PROG_PAGE;
3574 
3575 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3576 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3577 		goto unlock;
3578 
3579 	for (left = n; left; left -= c) {
3580 		c = min(left, 4U);
3581 		for (val = 0, i = 0; i < c; ++i)
3582 			val = (val << 8) + *data++;
3583 
3584 		if (!byte_oriented)
3585 			val = cpu_to_be32(val);
3586 
3587 		ret = sf1_write(adapter, c, c != left, 1, val);
3588 		if (ret)
3589 			goto unlock;
3590 	}
3591 	ret = flash_wait_op(adapter, 8, 1);
3592 	if (ret)
3593 		goto unlock;
3594 
3595 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3596 
3597 	/* Read the page to verify the write succeeded */
3598 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3599 			    byte_oriented);
3600 	if (ret)
3601 		return ret;
3602 
3603 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3604 		CH_ERR(adapter,
3605 			"failed to correctly write the flash page at %#x\n",
3606 			addr);
3607 		return -EIO;
3608 	}
3609 	return 0;
3610 
3611 unlock:
3612 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3613 	return ret;
3614 }
3615 
3616 /**
3617  *	t4_get_fw_version - read the firmware version
3618  *	@adapter: the adapter
3619  *	@vers: where to place the version
3620  *
3621  *	Reads the FW version from flash.
3622  */
3623 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3624 {
3625 	return t4_read_flash(adapter, FLASH_FW_START +
3626 			     offsetof(struct fw_hdr, fw_ver), 1,
3627 			     vers, 0);
3628 }
3629 
3630 /**
3631  *	t4_get_bs_version - read the firmware bootstrap version
3632  *	@adapter: the adapter
3633  *	@vers: where to place the version
3634  *
3635  *	Reads the FW Bootstrap version from flash.
3636  */
3637 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3638 {
3639 	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3640 			     offsetof(struct fw_hdr, fw_ver), 1,
3641 			     vers, 0);
3642 }
3643 
3644 /**
3645  *	t4_get_tp_version - read the TP microcode version
3646  *	@adapter: the adapter
3647  *	@vers: where to place the version
3648  *
3649  *	Reads the TP microcode version from flash.
3650  */
3651 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3652 {
3653 	return t4_read_flash(adapter, FLASH_FW_START +
3654 			     offsetof(struct fw_hdr, tp_microcode_ver),
3655 			     1, vers, 0);
3656 }
3657 
3658 /**
3659  *	t4_get_exprom_version - return the Expansion ROM version (if any)
3660  *	@adapter: the adapter
3661  *	@vers: where to place the version
3662  *
3663  *	Reads the Expansion ROM header from FLASH and returns the version
3664  *	number (if present) through the @vers return value pointer.  We return
3665  *	this in the Firmware Version Format since it's convenient.  Return
3666  *	0 on success, -ENOENT if no Expansion ROM is present.
3667  */
3668 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
3669 {
3670 	struct exprom_header {
3671 		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3672 		unsigned char hdr_ver[4];	/* Expansion ROM version */
3673 	} *hdr;
3674 	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3675 					   sizeof(u32))];
3676 	int ret;
3677 
3678 	ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
3679 			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3680 			    0);
3681 	if (ret)
3682 		return ret;
3683 
3684 	hdr = (struct exprom_header *)exprom_header_buf;
3685 	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3686 		return -ENOENT;
3687 
3688 	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3689 		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3690 		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3691 		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3692 	return 0;
3693 }
3694 
3695 /**
3696  *	t4_get_scfg_version - return the Serial Configuration version
3697  *	@adapter: the adapter
3698  *	@vers: where to place the version
3699  *
3700  *	Reads the Serial Configuration Version via the Firmware interface
3701  *	(thus this can only be called once we're ready to issue Firmware
3702  *	commands).  The format of the Serial Configuration version is
3703  *	adapter specific.  Returns 0 on success, an error on failure.
3704  *
3705  *	Note that early versions of the Firmware didn't include the ability
3706  *	to retrieve the Serial Configuration version, so we zero-out the
3707  *	return-value parameter in that case to avoid leaving it with
3708  *	garbage in it.
3709  *
3710  *	Also note that the Firmware will return its cached copy of the Serial
3711  *	Initialization Revision ID, not the actual Revision ID as written in
3712  *	the Serial EEPROM.  This is only an issue if a new VPD has been written
3713  *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3714  *	it's best to defer calling this routine till after a FW_RESET_CMD has
3715  *	been issued if the Host Driver will be performing a full adapter
3716  *	initialization.
3717  */
3718 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3719 {
3720 	u32 scfgrev_param;
3721 	int ret;
3722 
3723 	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3724 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3725 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3726 			      1, &scfgrev_param, vers);
3727 	if (ret)
3728 		*vers = 0;
3729 	return ret;
3730 }
3731 
3732 /**
3733  *	t4_get_vpd_version - return the VPD version
3734  *	@adapter: the adapter
3735  *	@vers: where to place the version
3736  *
3737  *	Reads the VPD via the Firmware interface (thus this can only be called
3738  *	once we're ready to issue Firmware commands).  The format of the
3739  *	VPD version is adapter specific.  Returns 0 on success, an error on
3740  *	failure.
3741  *
3742  *	Note that early versions of the Firmware didn't include the ability
3743  *	to retrieve the VPD version, so we zero-out the return-value parameter
3744  *	in that case to avoid leaving it with garbage in it.
3745  *
3746  *	Also note that the Firmware will return its cached copy of the VPD
3747  *	Revision ID, not the actual Revision ID as written in the Serial
3748  *	EEPROM.  This is only an issue if a new VPD has been written and the
3749  *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3750  *	to defer calling this routine till after a FW_RESET_CMD has been issued
3751  *	if the Host Driver will be performing a full adapter initialization.
3752  */
3753 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3754 {
3755 	u32 vpdrev_param;
3756 	int ret;
3757 
3758 	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3759 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3760 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3761 			      1, &vpdrev_param, vers);
3762 	if (ret)
3763 		*vers = 0;
3764 	return ret;
3765 }
3766 
3767 /**
3768  *	t4_get_version_info - extract various chip/firmware version information
3769  *	@adapter: the adapter
3770  *
3771  *	Reads various chip/firmware version numbers and stores them into the
3772  *	adapter Adapter Parameters structure.  If any of the efforts fails
3773  *	the first failure will be returned, but all of the version numbers
3774  *	will be read.
3775  */
3776 int t4_get_version_info(struct adapter *adapter)
3777 {
3778 	int ret = 0;
3779 
3780 	#define FIRST_RET(__getvinfo) \
3781 	do { \
3782 		int __ret = __getvinfo; \
3783 		if (__ret && !ret) \
3784 			ret = __ret; \
3785 	} while (0)
3786 
3787 	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3788 	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3789 	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3790 	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3791 	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3792 	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3793 
3794 	#undef FIRST_RET
3795 
3796 	return ret;
3797 }
3798 
3799 /**
3800  *	t4_dump_version_info - dump all of the adapter configuration IDs
3801  *	@adapter: the adapter
3802  *
3803  *	Dumps all of the various bits of adapter configuration version/revision
3804  *	IDs information.  This is typically called at some point after
3805  *	t4_get_version_info() has been called.
3806  */
3807 void t4_dump_version_info(struct adapter *adapter)
3808 {
3809 	/*
3810 	 * Device information.
3811 	 */
3812 	CH_INFO(adapter, "Chelsio %s rev %d\n",
3813 		adapter->params.vpd.id,
3814 		CHELSIO_CHIP_RELEASE(adapter->params.chip));
3815 	CH_INFO(adapter, "S/N: %s, P/N: %s\n",
3816 		adapter->params.vpd.sn,
3817 		adapter->params.vpd.pn);
3818 
3819 	/*
3820 	 * Firmware Version.
3821 	 */
3822 	if (!adapter->params.fw_vers)
3823 		CH_WARN(adapter, "No firmware loaded\n");
3824 	else
3825 		CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n",
3826 			G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
3827 			G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
3828 			G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
3829 			G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
3830 
3831 	/*
3832 	 * Bootstrap Firmware Version.  (Some adapters don't have Bootstrap
3833 	 * Firmware, so dev_info() is more appropriate here.)
3834 	 */
3835 	if (!adapter->params.bs_vers)
3836 		CH_INFO(adapter, "No bootstrap loaded\n");
3837 	else
3838 		CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n",
3839 			G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
3840 			G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
3841 			G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
3842 			G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
3843 
3844 	/*
3845 	 * TP Microcode Version.
3846 	 */
3847 	if (!adapter->params.tp_vers)
3848 		CH_WARN(adapter, "No TP Microcode loaded\n");
3849 	else
3850 		CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n",
3851 			G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
3852 			G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
3853 			G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
3854 			G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
3855 
3856 	/*
3857 	 * Expansion ROM version.
3858 	 */
3859 	if (!adapter->params.er_vers)
3860 		CH_INFO(adapter, "No Expansion ROM loaded\n");
3861 	else
3862 		CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
3863 			G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
3864 			G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
3865 			G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
3866 			G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
3867 
3868 
3869 	/*
3870 	 * Serial Configuration version.
3871 	 */
3872 	CH_INFO(adapter, "Serial Configuration version: %x\n",
3873 		adapter->params.scfg_vers);
3874 
3875 	/*
3876 	 * VPD  version.
3877 	 */
3878 	CH_INFO(adapter, "VPD version: %x\n",
3879 		adapter->params.vpd_vers);
3880 }
3881 
3882 /**
3883  *	t4_check_fw_version - check if the FW is supported with this driver
3884  *	@adap: the adapter
3885  *
3886  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
3887  *	if there's exact match, a negative error if the version could not be
3888  *	read or there's a major version mismatch
3889  */
3890 int t4_check_fw_version(struct adapter *adap)
3891 {
3892 	int ret, major, minor, micro;
3893 	int exp_major, exp_minor, exp_micro;
3894 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3895 
3896 	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3897 	if (ret)
3898 		return ret;
3899 
3900 	major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers);
3901 	minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers);
3902 	micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers);
3903 
3904 	switch (chip_version) {
3905 	case CHELSIO_T4:
3906 		exp_major = T4FW_MIN_VERSION_MAJOR;
3907 		exp_minor = T4FW_MIN_VERSION_MINOR;
3908 		exp_micro = T4FW_MIN_VERSION_MICRO;
3909 		break;
3910 	case CHELSIO_T5:
3911 		exp_major = T5FW_MIN_VERSION_MAJOR;
3912 		exp_minor = T5FW_MIN_VERSION_MINOR;
3913 		exp_micro = T5FW_MIN_VERSION_MICRO;
3914 		break;
3915 	case CHELSIO_T6:
3916 		exp_major = T6FW_MIN_VERSION_MAJOR;
3917 		exp_minor = T6FW_MIN_VERSION_MINOR;
3918 		exp_micro = T6FW_MIN_VERSION_MICRO;
3919 		break;
3920 	default:
3921 		CH_ERR(adap, "Unsupported chip type, %x\n",
3922 			adap->params.chip);
3923 		return -EINVAL;
3924 	}
3925 
3926 	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3927 	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3928 		CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum "
3929 			"supported firmware is %u.%u.%u.\n", major, minor,
3930 			micro, exp_major, exp_minor, exp_micro);
3931 		return -EFAULT;
3932 	}
3933 	return 0;
3934 }
3935 
3936 /* Is the given firmware API compatible with the one the driver was compiled
3937  * with?
3938  */
3939 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3940 {
3941 
3942 	/* short circuit if it's the exact same firmware version */
3943 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3944 		return 1;
3945 
3946 	/*
3947 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
3948 	 * features that are supported in the driver.
3949 	 */
3950 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3951 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3952 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3953 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3954 		return 1;
3955 #undef SAME_INTF
3956 
3957 	return 0;
3958 }
3959 
3960 /* The firmware in the filesystem is usable, but should it be installed?
3961  * This routine explains itself in detail if it indicates the filesystem
3962  * firmware should be installed.
3963  */
3964 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3965 				int k, int c, int t4_fw_install)
3966 {
3967 	const char *reason;
3968 
3969 	if (!card_fw_usable) {
3970 		reason = "incompatible or unusable";
3971 		goto install;
3972 	}
3973 
3974 	if (k > c) {
3975 		reason = "older than the version bundled with this driver";
3976 		goto install;
3977 	}
3978 
3979 	if (t4_fw_install == 2 && k != c) {
3980 		reason = "different than the version bundled with this driver";
3981 		goto install;
3982 	}
3983 
3984 	return 0;
3985 
3986 install:
3987 	if (t4_fw_install == 0) {
3988 		CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3989 		       "but the driver is prohibited from installing a "
3990 		       "different firmware on the card.\n",
3991 		       G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3992 		       G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3993 		       reason);
3994 
3995 		return (0);
3996 	}
3997 
3998 	CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3999 	       "installing firmware %u.%u.%u.%u on card.\n",
4000 	       G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4001 	       G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
4002 	       G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4003 	       G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4004 
4005 	return 1;
4006 }
4007 
4008 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
4009 	       const u8 *fw_data, unsigned int fw_size,
4010 	       struct fw_hdr *card_fw, const int t4_fw_install,
4011 	       enum dev_state state, int *reset)
4012 {
4013 	int ret, card_fw_usable, fs_fw_usable;
4014 	const struct fw_hdr *fs_fw;
4015 	const struct fw_hdr *drv_fw;
4016 
4017 	drv_fw = &fw_info->fw_hdr;
4018 
4019 	/* Read the header of the firmware on the card */
4020 	ret = -t4_read_flash(adap, FLASH_FW_START,
4021 			    sizeof(*card_fw) / sizeof(uint32_t),
4022 			    (uint32_t *)card_fw, 1);
4023 	if (ret == 0) {
4024 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
4025 	} else {
4026 		CH_ERR(adap,
4027 			"Unable to read card's firmware header: %d\n", ret);
4028 		card_fw_usable = 0;
4029 	}
4030 
4031 	if (fw_data != NULL) {
4032 		fs_fw = (const void *)fw_data;
4033 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
4034 	} else {
4035 		fs_fw = NULL;
4036 		fs_fw_usable = 0;
4037 	}
4038 
4039 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
4040 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
4041 		/* Common case: the firmware on the card is an exact match and
4042 		 * the filesystem one is an exact match too, or the filesystem
4043 		 * one is absent/incompatible.  Note that t4_fw_install = 2
4044 		 * is ignored here -- use cxgbtool loadfw if you want to
4045 		 * reinstall the same firmware as the one on the card.
4046 		 */
4047 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
4048 		   should_install_fs_fw(adap, card_fw_usable,
4049 					be32_to_cpu(fs_fw->fw_ver),
4050 					be32_to_cpu(card_fw->fw_ver),
4051 					 t4_fw_install)) {
4052 
4053 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
4054 				     fw_size, 0);
4055 		if (ret != 0) {
4056 			CH_ERR(adap,
4057 				"failed to install firmware: %d\n", ret);
4058 			goto bye;
4059 		}
4060 
4061 		/* Installed successfully, update cached information */
4062 		memcpy(card_fw, fs_fw, sizeof(*card_fw));
4063 		(void)t4_init_devlog_params(adap, 1);
4064 		card_fw_usable = 1;
4065 		*reset = 0;	/* already reset as part of load_fw */
4066 	}
4067 
4068 	if (!card_fw_usable) {
4069 		uint32_t d, c, k;
4070 
4071 		d = be32_to_cpu(drv_fw->fw_ver);
4072 		c = be32_to_cpu(card_fw->fw_ver);
4073 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
4074 
4075 		CH_ERR(adap, "Cannot find a usable firmware: "
4076 			"fw_install %d, chip state %d, "
4077 			"driver compiled with %d.%d.%d.%d, "
4078 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
4079 			t4_fw_install, state,
4080 			G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4081 			G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
4082 			G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4083 			G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4084 			G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4085 			G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4086 		ret = EINVAL;
4087 		goto bye;
4088 	}
4089 
4090 	/* We're using whatever's on the card and it's known to be good. */
4091 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
4092 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
4093 
4094 bye:
4095 	return ret;
4096 
4097 }
4098 
4099 /**
4100  *	t4_flash_erase_sectors - erase a range of flash sectors
4101  *	@adapter: the adapter
4102  *	@start: the first sector to erase
4103  *	@end: the last sector to erase
4104  *
4105  *	Erases the sectors in the given inclusive range.
4106  */
4107 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4108 {
4109 	int ret = 0;
4110 
4111 	if (end >= adapter->params.sf_nsec)
4112 		return -EINVAL;
4113 
4114 	while (start <= end) {
4115 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4116 		    (ret = sf1_write(adapter, 4, 0, 1,
4117 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
4118 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4119 			CH_ERR(adapter,
4120 				"erase of flash sector %d failed, error %d\n",
4121 				start, ret);
4122 			break;
4123 		}
4124 		start++;
4125 	}
4126 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
4127 	return ret;
4128 }
4129 
4130 /**
4131  *	t4_flash_cfg_addr - return the address of the flash configuration file
4132  *	@adapter: the adapter
4133  *
4134  *	Return the address within the flash where the Firmware Configuration
4135  *	File is stored, or an error if the device FLASH is too small to contain
4136  *	a Firmware Configuration File.
4137  */
4138 int t4_flash_cfg_addr(struct adapter *adapter)
4139 {
4140 	/*
4141 	 * If the device FLASH isn't large enough to hold a Firmware
4142 	 * Configuration File, return an error.
4143 	 */
4144 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
4145 		return -ENOSPC;
4146 
4147 	return FLASH_CFG_START;
4148 }
4149 
4150 /* Return TRUE if the specified firmware matches the adapter.  I.e. T4
4151  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
4152  * and emit an error message for mismatched firmware to save our caller the
4153  * effort ...
4154  */
4155 static int t4_fw_matches_chip(const struct adapter *adap,
4156 			      const struct fw_hdr *hdr)
4157 {
4158 	/*
4159 	 * The expression below will return FALSE for any unsupported adapter
4160 	 * which will keep us "honest" in the future ...
4161 	 */
4162 	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
4163 	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
4164 	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
4165 		return 1;
4166 
4167 	CH_ERR(adap,
4168 		"FW image (%d) is not suitable for this adapter (%d)\n",
4169 		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
4170 	return 0;
4171 }
4172 
4173 /**
4174  *	t4_load_fw - download firmware
4175  *	@adap: the adapter
4176  *	@fw_data: the firmware image to write
4177  *	@size: image size
4178  *	@bootstrap: indicates if the binary is a bootstrap fw
4179  *
4180  *	Write the supplied firmware image to the card's serial flash.
4181  */
4182 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size,
4183 	       unsigned int bootstrap)
4184 {
4185 	u32 csum;
4186 	int ret, addr;
4187 	unsigned int i;
4188 	u8 first_page[SF_PAGE_SIZE];
4189 	const __be32 *p = (const __be32 *)fw_data;
4190 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4191 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
4192 	unsigned int fw_start_sec;
4193 	unsigned int fw_start;
4194 	unsigned int fw_size;
4195 
4196 	if (bootstrap) {
4197 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
4198 		fw_start = FLASH_FWBOOTSTRAP_START;
4199 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
4200 	} else {
4201 		fw_start_sec = FLASH_FW_START_SEC;
4202  		fw_start = FLASH_FW_START;
4203 		fw_size = FLASH_FW_MAX_SIZE;
4204 	}
4205 
4206 	if (!size) {
4207 		CH_ERR(adap, "FW image has no data\n");
4208 		return -EINVAL;
4209 	}
4210 	if (size & 511) {
4211 		CH_ERR(adap,
4212 			"FW image size not multiple of 512 bytes\n");
4213 		return -EINVAL;
4214 	}
4215 	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4216 		CH_ERR(adap,
4217 			"FW image size differs from size in FW header\n");
4218 		return -EINVAL;
4219 	}
4220 	if (size > fw_size) {
4221 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
4222 			fw_size);
4223 		return -EFBIG;
4224 	}
4225 	if (!t4_fw_matches_chip(adap, hdr))
4226 		return -EINVAL;
4227 
4228 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4229 		csum += be32_to_cpu(p[i]);
4230 
4231 	if (csum != 0xffffffff) {
4232 		CH_ERR(adap,
4233 			"corrupted firmware image, checksum %#x\n", csum);
4234 		return -EINVAL;
4235 	}
4236 
4237 	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
4238 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4239 	if (ret)
4240 		goto out;
4241 
4242 	/*
4243 	 * We write the correct version at the end so the driver can see a bad
4244 	 * version if the FW write fails.  Start by writing a copy of the
4245 	 * first page with a bad version.
4246 	 */
4247 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
4248 	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4249 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4250 	if (ret)
4251 		goto out;
4252 
4253 	addr = fw_start;
4254 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4255 		addr += SF_PAGE_SIZE;
4256 		fw_data += SF_PAGE_SIZE;
4257 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4258 		if (ret)
4259 			goto out;
4260 	}
4261 
4262 	ret = t4_write_flash(adap,
4263 			     fw_start + offsetof(struct fw_hdr, fw_ver),
4264 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4265 out:
4266 	if (ret)
4267 		CH_ERR(adap, "firmware download failed, error %d\n",
4268 			ret);
4269 	else {
4270 		if (bootstrap)
4271 			ret = t4_get_bs_version(adap, &adap->params.bs_vers);
4272 		else
4273 			ret = t4_get_fw_version(adap, &adap->params.fw_vers);
4274 	}
4275 	return ret;
4276 }
4277 
4278 /**
4279  *	t4_phy_fw_ver - return current PHY firmware version
4280  *	@adap: the adapter
4281  *	@phy_fw_ver: return value buffer for PHY firmware version
4282  *
4283  *	Returns the current version of external PHY firmware on the
4284  *	adapter.
4285  */
4286 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
4287 {
4288 	u32 param, val;
4289 	int ret;
4290 
4291 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4292 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4293 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4294 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
4295 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4296 			      &param, &val);
4297 	if (ret < 0)
4298 		return ret;
4299 	*phy_fw_ver = val;
4300 	return 0;
4301 }
4302 
4303 /**
4304  *	t4_load_phy_fw - download port PHY firmware
4305  *	@adap: the adapter
4306  *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
4307  *	@lock: the lock to use to guard the memory copy
4308  *	@phy_fw_version: function to check PHY firmware versions
4309  *	@phy_fw_data: the PHY firmware image to write
4310  *	@phy_fw_size: image size
4311  *
4312  *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
4313  *	@phy_fw_version is supplied, then it will be used to determine if
4314  *	it's necessary to perform the transfer by comparing the version
4315  *	of any existing adapter PHY firmware with that of the passed in
4316  *	PHY firmware image.  If @lock is non-NULL then it will be used
4317  *	around the call to t4_memory_rw() which transfers the PHY firmware
4318  *	to the adapter.
4319  *
4320  *	A negative error number will be returned if an error occurs.  If
4321  *	version number support is available and there's no need to upgrade
4322  *	the firmware, 0 will be returned.  If firmware is successfully
4323  *	transferred to the adapter, 1 will be retured.
4324  *
4325  *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
4326  *	a result, a RESET of the adapter would cause that RAM to lose its
4327  *	contents.  Thus, loading PHY firmware on such adapters must happen after any
4328  *	FW_RESET_CMDs ...
4329  */
4330 int t4_load_phy_fw(struct adapter *adap,
4331 		   int win, t4_os_lock_t *lock,
4332 		   int (*phy_fw_version)(const u8 *, size_t),
4333 		   const u8 *phy_fw_data, size_t phy_fw_size)
4334 {
4335 	unsigned long mtype = 0, maddr = 0;
4336 	u32 param, val;
4337 	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
4338 	int ret;
4339 
4340 	/*
4341 	 * If we have version number support, then check to see if the adapter
4342 	 * already has up-to-date PHY firmware loaded.
4343 	 */
4344 	 if (phy_fw_version) {
4345 		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
4346 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4347 		if (ret < 0)
4348 			return ret;;
4349 
4350 		if (cur_phy_fw_ver >= new_phy_fw_vers) {
4351 			CH_WARN(adap, "PHY Firmware already up-to-date, "
4352 				"version %#x\n", cur_phy_fw_ver);
4353 			return 0;
4354 		}
4355 	}
4356 
4357 	/*
4358 	 * Ask the firmware where it wants us to copy the PHY firmware image.
4359 	 * The size of the file requires a special version of the READ coommand
4360 	 * which will pass the file size via the values field in PARAMS_CMD and
4361 	 * retreive the return value from firmware and place it in the same
4362 	 * buffer values
4363 	 */
4364 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4365 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4366 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4367 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4368 	val = phy_fw_size;
4369 	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
4370 			      &param, &val, 1, true);
4371 	if (ret < 0)
4372 		return ret;
4373 	mtype = val >> 8;
4374 	maddr = (val & 0xff) << 16;
4375 
4376 	/*
4377 	 * Copy the supplied PHY Firmware image to the adapter memory location
4378 	 * allocated by the adapter firmware.
4379 	 */
4380 	if (lock)
4381 		t4_os_lock(lock);
4382 	ret = t4_memory_rw(adap, win, mtype, maddr,
4383 			   phy_fw_size, (__be32*)phy_fw_data,
4384 			   T4_MEMORY_WRITE);
4385 	if (lock)
4386 		t4_os_unlock(lock);
4387 	if (ret)
4388 		return ret;
4389 
4390 	/*
4391 	 * Tell the firmware that the PHY firmware image has been written to
4392 	 * RAM and it can now start copying it over to the PHYs.  The chip
4393 	 * firmware will RESET the affected PHYs as part of this operation
4394 	 * leaving them running the new PHY firmware image.
4395 	 */
4396 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4397 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4398 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4399 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4400 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
4401 				    &param, &val, 30000);
4402 
4403 	/*
4404 	 * If we have version number support, then check to see that the new
4405 	 * firmware got loaded properly.
4406 	 */
4407 	if (phy_fw_version) {
4408 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4409 		if (ret < 0)
4410 			return ret;
4411 
4412 		if (cur_phy_fw_ver != new_phy_fw_vers) {
4413 			CH_WARN(adap, "PHY Firmware did not update: "
4414 				"version on adapter %#x, "
4415 				"version flashed %#x\n",
4416 				cur_phy_fw_ver, new_phy_fw_vers);
4417 			return -ENXIO;
4418 		}
4419 	}
4420 
4421 	return 1;
4422 }
4423 
4424 /**
4425  *	t4_fwcache - firmware cache operation
4426  *	@adap: the adapter
4427  *	@op  : the operation (flush or flush and invalidate)
4428  */
4429 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4430 {
4431 	struct fw_params_cmd c;
4432 
4433 	memset(&c, 0, sizeof(c));
4434 	c.op_to_vfn =
4435 	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4436 			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4437 				V_FW_PARAMS_CMD_PFN(adap->pf) |
4438 				V_FW_PARAMS_CMD_VFN(0));
4439 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4440 	c.param[0].mnem =
4441 	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4442 			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4443 	c.param[0].val = (__force __be32)op;
4444 
4445 	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4446 }
4447 
4448 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4449 			unsigned int *pif_req_wrptr,
4450 			unsigned int *pif_rsp_wrptr)
4451 {
4452 	int i, j;
4453 	u32 cfg, val, req, rsp;
4454 
4455 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4456 	if (cfg & F_LADBGEN)
4457 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4458 
4459 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4460 	req = G_POLADBGWRPTR(val);
4461 	rsp = G_PILADBGWRPTR(val);
4462 	if (pif_req_wrptr)
4463 		*pif_req_wrptr = req;
4464 	if (pif_rsp_wrptr)
4465 		*pif_rsp_wrptr = rsp;
4466 
4467 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4468 		for (j = 0; j < 6; j++) {
4469 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4470 				     V_PILADBGRDPTR(rsp));
4471 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4472 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4473 			req++;
4474 			rsp++;
4475 		}
4476 		req = (req + 2) & M_POLADBGRDPTR;
4477 		rsp = (rsp + 2) & M_PILADBGRDPTR;
4478 	}
4479 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4480 }
4481 
4482 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4483 {
4484 	u32 cfg;
4485 	int i, j, idx;
4486 
4487 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4488 	if (cfg & F_LADBGEN)
4489 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4490 
4491 	for (i = 0; i < CIM_MALA_SIZE; i++) {
4492 		for (j = 0; j < 5; j++) {
4493 			idx = 8 * i + j;
4494 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4495 				     V_PILADBGRDPTR(idx));
4496 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4497 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4498 		}
4499 	}
4500 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4501 }
4502 
4503 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4504 {
4505 	unsigned int i, j;
4506 
4507 	for (i = 0; i < 8; i++) {
4508 		u32 *p = la_buf + i;
4509 
4510 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4511 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4512 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4513 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4514 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4515 	}
4516 }
4517 
4518 /* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
4519  * Capabilities which we control with separate controls -- see, for instance,
4520  * Pause Frames and Forward Error Correction.  In order to determine what the
4521  * full set of Advertised Port Capabilities are, the base Advertised Port
4522  * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
4523  * Port Capabilities associated with those other controls.  See
4524  * t4_link_acaps() for how this is done.
4525  */
4526 #define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
4527 		     FW_PORT_CAP32_ANEG)
4528 
4529 /**
4530  *	fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
4531  *	@caps16: a 16-bit Port Capabilities value
4532  *
4533  *	Returns the equivalent 32-bit Port Capabilities value.
4534  */
4535 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
4536 {
4537 	fw_port_cap32_t caps32 = 0;
4538 
4539 	#define CAP16_TO_CAP32(__cap) \
4540 		do { \
4541 			if (caps16 & FW_PORT_CAP_##__cap) \
4542 				caps32 |= FW_PORT_CAP32_##__cap; \
4543 		} while (0)
4544 
4545 	CAP16_TO_CAP32(SPEED_100M);
4546 	CAP16_TO_CAP32(SPEED_1G);
4547 	CAP16_TO_CAP32(SPEED_25G);
4548 	CAP16_TO_CAP32(SPEED_10G);
4549 	CAP16_TO_CAP32(SPEED_40G);
4550 	CAP16_TO_CAP32(SPEED_100G);
4551 	CAP16_TO_CAP32(FC_RX);
4552 	CAP16_TO_CAP32(FC_TX);
4553 	CAP16_TO_CAP32(ANEG);
4554 	CAP16_TO_CAP32(FORCE_PAUSE);
4555 	CAP16_TO_CAP32(MDIAUTO);
4556 	CAP16_TO_CAP32(MDISTRAIGHT);
4557 	CAP16_TO_CAP32(FEC_RS);
4558 	CAP16_TO_CAP32(FEC_BASER_RS);
4559 	CAP16_TO_CAP32(802_3_PAUSE);
4560 	CAP16_TO_CAP32(802_3_ASM_DIR);
4561 
4562 	#undef CAP16_TO_CAP32
4563 
4564 	return caps32;
4565 }
4566 
4567 /**
4568  *	fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
4569  *	@caps32: a 32-bit Port Capabilities value
4570  *
4571  *	Returns the equivalent 16-bit Port Capabilities value.  Note that
4572  *	not all 32-bit Port Capabilities can be represented in the 16-bit
4573  *	Port Capabilities and some fields/values may not make it.
4574  */
4575 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
4576 {
4577 	fw_port_cap16_t caps16 = 0;
4578 
4579 	#define CAP32_TO_CAP16(__cap) \
4580 		do { \
4581 			if (caps32 & FW_PORT_CAP32_##__cap) \
4582 				caps16 |= FW_PORT_CAP_##__cap; \
4583 		} while (0)
4584 
4585 	CAP32_TO_CAP16(SPEED_100M);
4586 	CAP32_TO_CAP16(SPEED_1G);
4587 	CAP32_TO_CAP16(SPEED_10G);
4588 	CAP32_TO_CAP16(SPEED_25G);
4589 	CAP32_TO_CAP16(SPEED_40G);
4590 	CAP32_TO_CAP16(SPEED_100G);
4591 	CAP32_TO_CAP16(FC_RX);
4592 	CAP32_TO_CAP16(FC_TX);
4593 	CAP32_TO_CAP16(802_3_PAUSE);
4594 	CAP32_TO_CAP16(802_3_ASM_DIR);
4595 	CAP32_TO_CAP16(ANEG);
4596 	CAP32_TO_CAP16(FORCE_PAUSE);
4597 	CAP32_TO_CAP16(MDIAUTO);
4598 	CAP32_TO_CAP16(MDISTRAIGHT);
4599 	CAP32_TO_CAP16(FEC_RS);
4600 	CAP32_TO_CAP16(FEC_BASER_RS);
4601 
4602 	#undef CAP32_TO_CAP16
4603 
4604 	return caps16;
4605 }
4606 
4607 /* Translate Firmware Port Capabilities Pause specification to Common Code */
4608 static inline cc_pause_t fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
4609 {
4610 	cc_pause_t cc_pause = 0;
4611 
4612 	if (fw_pause & FW_PORT_CAP32_FC_RX)
4613 		cc_pause |= PAUSE_RX;
4614 	if (fw_pause & FW_PORT_CAP32_FC_TX)
4615 		cc_pause |= PAUSE_TX;
4616 
4617 	return cc_pause;
4618 }
4619 
4620 /* Translate Common Code Pause specification into Firmware Port Capabilities */
4621 static inline fw_port_cap32_t cc_to_fwcap_pause(cc_pause_t cc_pause)
4622 {
4623 	fw_port_cap32_t fw_pause = 0;
4624 
4625 	/* Translate orthogonal RX/TX Pause Controls for L1 Configure
4626 	 * commands, etc.
4627 	 */
4628 	if (cc_pause & PAUSE_RX)
4629 		fw_pause |= FW_PORT_CAP32_FC_RX;
4630 	if (cc_pause & PAUSE_TX)
4631 		fw_pause |= FW_PORT_CAP32_FC_TX;
4632 	if (!(cc_pause & PAUSE_AUTONEG))
4633 		fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
4634 
4635 	return fw_pause;
4636 }
4637 
4638 /* Translate Firmware Forward Error Correction specification to Common Code */
4639 static inline cc_fec_t fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
4640 {
4641 	cc_fec_t cc_fec = 0;
4642 
4643 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
4644 		cc_fec |= FEC_RS;
4645 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
4646 		cc_fec |= FEC_BASER_RS;
4647 
4648 	return cc_fec;
4649 }
4650 
4651 /* Translate Common Code Forward Error Correction specification to Firmware */
4652 static inline fw_port_cap32_t cc_to_fwcap_fec(cc_fec_t cc_fec)
4653 {
4654 	fw_port_cap32_t fw_fec = 0;
4655 
4656 	if (cc_fec & FEC_RS)
4657 		fw_fec |= FW_PORT_CAP32_FEC_RS;
4658 	if (cc_fec & FEC_BASER_RS)
4659 		fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
4660 
4661 	return fw_fec;
4662 }
4663 
4664 /**
4665  *	t4_link_acaps - compute Link Advertised Port Capabilities
4666  *	@adapter: the adapter
4667  *	@port: the Port ID
4668  *	@lc: the Port's Link Configuration
4669  *
4670  *	Synthesize the Advertised Port Capabilities we'll be using based on
4671  *	the base Advertised Port Capabilities (which have been filtered by
4672  *	ADVERT_MASK) plus the individual controls for things like Pause
4673  *	Frames, Forward Error Correction, MDI, etc.
4674  */
4675 fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
4676 			      struct link_config *lc)
4677 {
4678 	unsigned int fw_mdi =
4679 		(V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
4680 	fw_port_cap32_t fw_fc, fw_fec, acaps;
4681 	cc_fec_t cc_fec;
4682 
4683 	/* Convert driver coding of Pause Frame Flow Control settings into the
4684 	 * Firmware's API.
4685 	 */
4686 	fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4687 
4688 	/* Convert Common Code Forward Error Control settings into the
4689 	 * Firmware's API.  If the current Requested FEC has "Automatic"
4690 	 * (IEEE 802.3) specified, then we use whatever the Firmware
4691 	 * sent us as part of it's IEEE 802.3-based interpratation of
4692 	 * the Transceiver Module EPROM FEC parameters.  Otherwise we
4693 	 * use whatever is in the current Requested FEC settings.
4694 	 */
4695 	if (lc->requested_fec & FEC_AUTO)
4696 		cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4697 	else
4698 		cc_fec = lc->requested_fec;
4699 	fw_fec = cc_to_fwcap_fec(cc_fec);
4700 
4701 	/* Figure out what our Requested Port Capabilities are going to be.
4702 	 * Note parallel structure in t4_handle_get_port_info() and
4703 	 * init_link_config().
4704 	 */
4705 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4706 		acaps = lc->acaps | fw_fc | fw_fec;
4707 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4708 		lc->fec = cc_fec;
4709 	} else if (lc->autoneg == AUTONEG_DISABLE) {
4710 		acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4711 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4712 		lc->fec = cc_fec;
4713 	} else
4714 		acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
4715 
4716 	/* Some Requested Port Capabilities are trivially wrong if they exceed
4717 	 * the Physical Port Capabilities.  We can check that here and provide
4718 	 * moderately useful feedback in the system log.
4719 	 *
4720 	 * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
4721 	 * we need to exclude this from this check in order to maintain
4722 	 * compatibility ...
4723 	 */
4724 	if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
4725 		CH_ERR(adapter,
4726 		       "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
4727 		       acaps, lc->pcaps);
4728 		return 0;
4729 	}
4730 
4731 	return acaps;
4732 }
4733 
4734 /**
4735  *	t4_link_l1cfg_core - apply link configuration to MAC/PHY
4736  *	@adapter: the adapter
4737  *	@mbox: the Firmware Mailbox to use
4738  *	@port: the Port ID
4739  *	@lc: the Port's Link Configuration
4740  *	@sleep_ok: if true we may sleep while awaiting command completion
4741  *	@timeout: time to wait for command to finish before timing out
4742  *		(negative implies @sleep_ok=false)
4743  *
4744  *	Set up a port's MAC and PHY according to a desired link configuration.
4745  *	- If the PHY can auto-negotiate first decide what to advertise, then
4746  *	  enable/disable auto-negotiation as desired, and reset.
4747  *	- If the PHY does not auto-negotiate just reset it.
4748  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4749  *	  otherwise do it later based on the outcome of auto-negotiation.
4750  */
4751 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
4752 		       unsigned int port, struct link_config *lc,
4753 		       bool sleep_ok, int timeout)
4754 {
4755 	unsigned int fw_caps = adapter->params.fw_caps_support;
4756 	fw_port_cap32_t rcap;
4757 	struct fw_port_cmd cmd;
4758 	int ret;
4759 
4760 	/* Filter out nonsense.
4761 	 */
4762 	if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
4763 	    lc->autoneg == AUTONEG_ENABLE)
4764 		return -EINVAL;
4765 
4766 	/* Compute our Requested Port Capabilities and send that on to the
4767 	 * Firmware.
4768 	 */
4769 	rcap = t4_link_acaps(adapter, port, lc);
4770 	if(!rcap)
4771 		return -EINVAL;
4772 	memset(&cmd, 0, sizeof(cmd));
4773 	cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4774 				       F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4775 				       V_FW_PORT_CMD_PORTID(port));
4776 	cmd.action_to_len16 =
4777 		cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
4778 						 ? FW_PORT_ACTION_L1_CFG
4779 						 : FW_PORT_ACTION_L1_CFG32) |
4780 			    FW_LEN16(cmd));
4781 	if (fw_caps == FW_CAPS16)
4782 		cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4783 	else
4784 		cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4785 	ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
4786 				      sleep_ok, timeout);
4787 
4788 	/* Unfortunately, even if the Requested Port Capabilities "fit" within
4789 	 * the Physical Port Capabilities, some combinations of features may
4790 	 * still not be legal.  For example, 40Gb/s and Reed-Solomon Forward
4791 	 * Error Correction.  So if the Firmware rejects the L1 Configure
4792 	 * request, flag that here.
4793 	 */
4794 	if (ret) {
4795 		CH_ERR(adapter,
4796 		       "Requested Port Capabilities %#x rejected, error %d\n",
4797 		       rcap, -ret);
4798 		return ret;
4799 	}
4800 	return 0;
4801 }
4802 
4803 /**
4804  *	t4_restart_aneg - restart autonegotiation
4805  *	@adap: the adapter
4806  *	@mbox: mbox to use for the FW command
4807  *	@port: the port id
4808  *
4809  *	Restarts autonegotiation for the selected port.
4810  */
4811 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4812 {
4813 	unsigned int fw_caps = adap->params.fw_caps_support;
4814 	struct fw_port_cmd c;
4815 
4816 	memset(&c, 0, sizeof(c));
4817 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4818 				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4819 				     V_FW_PORT_CMD_PORTID(port));
4820 	c.action_to_len16 =
4821 		cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
4822 						 ? FW_PORT_ACTION_L1_CFG
4823 						 : FW_PORT_ACTION_L1_CFG32) |
4824 			    FW_LEN16(c));
4825 	if (fw_caps == FW_CAPS16)
4826 		c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4827 	else
4828 		c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
4829 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4830 }
4831 
4832 typedef void (*int_handler_t)(struct adapter *adap);
4833 
4834 struct intr_info {
4835 	unsigned int mask;	/* bits to check in interrupt status */
4836 	const char *msg;	/* message to print or NULL */
4837 	short stat_idx;		/* stat counter to increment or -1 */
4838 	unsigned short fatal;	/* whether the condition reported is fatal */
4839 	int_handler_t int_handler;	/* platform-specific int handler */
4840 };
4841 
4842 /**
4843  *	t4_handle_intr_status - table driven interrupt handler
4844  *	@adapter: the adapter that generated the interrupt
4845  *	@reg: the interrupt status register to process
4846  *	@acts: table of interrupt actions
4847  *
4848  *	A table driven interrupt handler that applies a set of masks to an
4849  *	interrupt status word and performs the corresponding actions if the
4850  *	interrupts described by the mask have occurred.  The actions include
4851  *	optionally emitting a warning or alert message.  The table is terminated
4852  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
4853  *	conditions.
4854  */
4855 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4856 				 const struct intr_info *acts)
4857 {
4858 	int fatal = 0;
4859 	unsigned int mask = 0;
4860 	unsigned int status = t4_read_reg(adapter, reg);
4861 
4862 	for ( ; acts->mask; ++acts) {
4863 		if (!(status & acts->mask))
4864 			continue;
4865 		if (acts->fatal) {
4866 			fatal++;
4867 			CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
4868 				  status & acts->mask);
4869 		} else if (acts->msg)
4870 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
4871 				 status & acts->mask);
4872 		if (acts->int_handler)
4873 			acts->int_handler(adapter);
4874 		mask |= acts->mask;
4875 	}
4876 	status &= mask;
4877 	if (status)	/* clear processed interrupts */
4878 		t4_write_reg(adapter, reg, status);
4879 	return fatal;
4880 }
4881 
4882 /*
4883  * Interrupt handler for the PCIE module.
4884  */
4885 static void pcie_intr_handler(struct adapter *adapter)
4886 {
4887 	static const struct intr_info sysbus_intr_info[] = {
4888 		{ F_RNPP, "RXNP array parity error", -1, 1 },
4889 		{ F_RPCP, "RXPC array parity error", -1, 1 },
4890 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
4891 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
4892 		{ F_RFTP, "RXFT array parity error", -1, 1 },
4893 		{ 0 }
4894 	};
4895 	static const struct intr_info pcie_port_intr_info[] = {
4896 		{ F_TPCP, "TXPC array parity error", -1, 1 },
4897 		{ F_TNPP, "TXNP array parity error", -1, 1 },
4898 		{ F_TFTP, "TXFT array parity error", -1, 1 },
4899 		{ F_TCAP, "TXCA array parity error", -1, 1 },
4900 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
4901 		{ F_RCAP, "RXCA array parity error", -1, 1 },
4902 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
4903 		{ F_RDPE, "Rx data parity error", -1, 1 },
4904 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
4905 		{ 0 }
4906 	};
4907 	static const struct intr_info pcie_intr_info[] = {
4908 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
4909 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
4910 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
4911 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4912 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4913 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4914 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4915 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
4916 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
4917 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4918 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
4919 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4920 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4921 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
4922 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4923 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4924 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
4925 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4926 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4927 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4928 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4929 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
4930 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
4931 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4932 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
4933 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
4934 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
4935 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
4936 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
4937 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
4938 		  0 },
4939 		{ 0 }
4940 	};
4941 
4942 	static struct intr_info t5_pcie_intr_info[] = {
4943 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
4944 		  -1, 1 },
4945 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4946 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4947 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4948 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4949 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4950 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4951 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4952 		  -1, 1 },
4953 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4954 		  -1, 1 },
4955 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4956 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4957 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4958 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4959 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
4960 		  -1, 1 },
4961 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4962 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4963 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4964 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4965 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4966 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4967 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4968 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4969 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4970 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4971 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4972 		  -1, 1 },
4973 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4974 		  -1, 1 },
4975 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4976 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4977 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4978 		{ F_READRSPERR, "Outbound read error", -1,
4979 		  0 },
4980 		{ 0 }
4981 	};
4982 
4983 	int fat;
4984 
4985 	if (is_t4(adapter->params.chip))
4986 		fat = t4_handle_intr_status(adapter,
4987 				A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4988 				sysbus_intr_info) +
4989 			t4_handle_intr_status(adapter,
4990 					A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4991 					pcie_port_intr_info) +
4992 			t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4993 					      pcie_intr_info);
4994 	else
4995 		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4996 					    t5_pcie_intr_info);
4997 	if (fat)
4998 		t4_fatal_err(adapter);
4999 }
5000 
5001 /*
5002  * TP interrupt handler.
5003  */
5004 static void tp_intr_handler(struct adapter *adapter)
5005 {
5006 	static const struct intr_info tp_intr_info[] = {
5007 		{ 0x3fffffff, "TP parity error", -1, 1 },
5008 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
5009 		{ 0 }
5010 	};
5011 
5012 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
5013 		t4_fatal_err(adapter);
5014 }
5015 
5016 /*
5017  * SGE interrupt handler.
5018  */
5019 static void sge_intr_handler(struct adapter *adapter)
5020 {
5021 	u32 v = 0, perr;
5022 	u32 err;
5023 
5024 	static const struct intr_info sge_intr_info[] = {
5025 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
5026 		  "SGE received CPL exceeding IQE size", -1, 1 },
5027 		{ F_ERR_INVALID_CIDX_INC,
5028 		  "SGE GTS CIDX increment too large", -1, 0 },
5029 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
5030 		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
5031 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
5032 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
5033 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
5034 		  0 },
5035 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
5036 		  0 },
5037 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
5038 		  0 },
5039 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
5040 		  0 },
5041 		{ F_ERR_ING_CTXT_PRIO,
5042 		  "SGE too many priority ingress contexts", -1, 0 },
5043 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
5044 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
5045 		{ F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
5046 		  F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
5047 		  "SGE PCIe error for a DBP thread", -1, 0 },
5048 		{ 0 }
5049 	};
5050 
5051 	static struct intr_info t4t5_sge_intr_info[] = {
5052 		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
5053 		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
5054 		{ F_ERR_EGR_CTXT_PRIO,
5055 		  "SGE too many priority egress contexts", -1, 0 },
5056 		{ 0 }
5057 	};
5058 
5059 	/*
5060  	* For now, treat below interrupts as fatal so that we disable SGE and
5061  	* get better debug */
5062 	static struct intr_info t6_sge_intr_info[] = {
5063 		{ F_FATAL_WRE_LEN,
5064 		  "SGE Actual WRE packet is less than advertized length",
5065 		  -1, 1 },
5066 		{ 0 }
5067 	};
5068 
5069 	perr = t4_read_reg(adapter, A_SGE_INT_CAUSE1);
5070 	if (perr) {
5071 		v |= perr;
5072 		CH_ALERT(adapter, "SGE Cause1 Parity Error %#x\n", perr);
5073 	}
5074 	perr = t4_read_reg(adapter, A_SGE_INT_CAUSE2);
5075 	if (perr) {
5076 		v |= perr;
5077 		CH_ALERT(adapter, "SGE Cause2 Parity Error %#x\n", perr);
5078 	}
5079 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
5080 		perr = t4_read_reg(adapter, A_SGE_INT_CAUSE5);
5081 		if (perr) {
5082 			v |= perr;
5083 			CH_ALERT(adapter, "SGE Cause5 Parity Error %#x\n", perr);
5084 		}
5085 	}
5086 
5087 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
5088 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5089 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
5090 					   t4t5_sge_intr_info);
5091 	else
5092 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
5093 					   t6_sge_intr_info);
5094 
5095 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
5096 	if (err & F_ERROR_QID_VALID) {
5097 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
5098 		if (err & F_UNCAPTURED_ERROR)
5099 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
5100 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
5101 			     F_UNCAPTURED_ERROR);
5102 	}
5103 
5104 	if (v != 0)
5105 		t4_fatal_err(adapter);
5106 }
5107 
5108 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
5109 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
5110 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
5111 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
5112 
5113 /*
5114  * CIM interrupt handler.
5115  */
5116 static void cim_intr_handler(struct adapter *adapter)
5117 {
5118 	static const struct intr_info cim_intr_info[] = {
5119 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
5120 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
5121 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
5122 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
5123 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
5124 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
5125 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
5126 		{ F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
5127 		{ 0 }
5128 	};
5129 	static const struct intr_info cim_upintr_info[] = {
5130 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
5131 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
5132 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
5133 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
5134 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
5135 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
5136 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
5137 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
5138 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
5139 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
5140 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
5141 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
5142 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
5143 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
5144 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
5145 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
5146 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
5147 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
5148 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
5149 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
5150 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
5151 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
5152 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
5153 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
5154 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
5155 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
5156 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
5157 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
5158 		{ 0 }
5159 	};
5160 	u32 val, fw_err;
5161 	int fat;
5162 
5163 	fw_err = t4_read_reg(adapter, A_PCIE_FW);
5164 	if (fw_err & F_PCIE_FW_ERR)
5165 		t4_report_fw_error(adapter);
5166 
5167 	/* When the Firmware detects an internal error which normally wouldn't
5168 	 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
5169 	 * to make sure the Host sees the Firmware Crash.  So if we have a
5170 	 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
5171 	 * interrupt.
5172 	 */
5173 	val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
5174 	if (val & F_TIMER0INT)
5175 		if (!(fw_err & F_PCIE_FW_ERR) ||
5176 		    (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
5177 			t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
5178 				     F_TIMER0INT);
5179 
5180 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
5181 				    cim_intr_info) +
5182 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
5183 				    cim_upintr_info);
5184 	if (fat)
5185 		t4_fatal_err(adapter);
5186 }
5187 
5188 /*
5189  * ULP RX interrupt handler.
5190  */
5191 static void ulprx_intr_handler(struct adapter *adapter)
5192 {
5193 	static const struct intr_info ulprx_intr_info[] = {
5194 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
5195 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
5196 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
5197 		{ 0 }
5198 	};
5199 
5200 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
5201 		t4_fatal_err(adapter);
5202 }
5203 
5204 /*
5205  * ULP TX interrupt handler.
5206  */
5207 static void ulptx_intr_handler(struct adapter *adapter)
5208 {
5209 	static const struct intr_info ulptx_intr_info[] = {
5210 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
5211 		  0 },
5212 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
5213 		  0 },
5214 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
5215 		  0 },
5216 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
5217 		  0 },
5218 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
5219 		{ 0 }
5220 	};
5221 
5222 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
5223 		t4_fatal_err(adapter);
5224 }
5225 
5226 /*
5227  * PM TX interrupt handler.
5228  */
5229 static void pmtx_intr_handler(struct adapter *adapter)
5230 {
5231 	static const struct intr_info pmtx_intr_info[] = {
5232 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
5233 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
5234 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
5235 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
5236 		{ 0xffffff0, "PMTX framing error", -1, 1 },
5237 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
5238 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
5239 		  1 },
5240 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
5241 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
5242 		{ 0 }
5243 	};
5244 
5245 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
5246 		t4_fatal_err(adapter);
5247 }
5248 
5249 /*
5250  * PM RX interrupt handler.
5251  */
5252 static void pmrx_intr_handler(struct adapter *adapter)
5253 {
5254 	static const struct intr_info pmrx_intr_info[] = {
5255 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
5256 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
5257 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
5258 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
5259 		  1 },
5260 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
5261 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
5262 		{ 0 }
5263 	};
5264 
5265 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
5266 		t4_fatal_err(adapter);
5267 }
5268 
5269 /*
5270  * CPL switch interrupt handler.
5271  */
5272 static void cplsw_intr_handler(struct adapter *adapter)
5273 {
5274 	static const struct intr_info cplsw_intr_info[] = {
5275 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
5276 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
5277 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
5278 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
5279 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
5280 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
5281 		{ 0 }
5282 	};
5283 
5284 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
5285 		t4_fatal_err(adapter);
5286 }
5287 
5288 /*
5289  * LE interrupt handler.
5290  */
5291 static void le_intr_handler(struct adapter *adap)
5292 {
5293 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
5294 	static const struct intr_info le_intr_info[] = {
5295 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
5296 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
5297 		{ F_PARITYERR, "LE parity error", -1, 1 },
5298 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
5299 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
5300 		{ 0 }
5301 	};
5302 
5303 	static struct intr_info t6_le_intr_info[] = {
5304 		/* log an error for HASHTBLMEMCRCERR and clear the bit */
5305 		{ F_T6_HASHTBLMEMCRCERR, "LE hash table mem crc error", -1, 0 },
5306 		{ F_T6_LIPMISS, "LE LIP miss", -1, 0 },
5307 		{ F_T6_LIP0, "LE 0 LIP error", -1, 0 },
5308 		{ F_TCAMINTPERR, "LE parity error", -1, 1 },
5309 		{ F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
5310 		{ F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
5311 		{ 0 }
5312 	};
5313 
5314 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
5315 				  (chip_ver <= CHELSIO_T5) ?
5316 				  le_intr_info : t6_le_intr_info))
5317 		t4_fatal_err(adap);
5318 }
5319 
5320 /*
5321  * MPS interrupt handler.
5322  */
5323 static void mps_intr_handler(struct adapter *adapter)
5324 {
5325 	static const struct intr_info mps_rx_intr_info[] = {
5326 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
5327 		{ 0 }
5328 	};
5329 	static const struct intr_info mps_tx_intr_info[] = {
5330 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5331 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5332 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5333 		  -1, 1 },
5334 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5335 		  -1, 1 },
5336 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
5337 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5338 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
5339 		{ 0 }
5340 	};
5341 	static const struct intr_info t6_mps_tx_intr_info[] = {
5342 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5343 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5344 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5345 		  -1, 1 },
5346 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5347 		  -1, 1 },
5348 		/* MPS Tx Bubble is normal for T6 */
5349 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5350 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
5351 		{ 0 }
5352 	};
5353 	static const struct intr_info mps_trc_intr_info[] = {
5354 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
5355 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
5356 		  1 },
5357 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
5358 		{ 0 }
5359 	};
5360 	static const struct intr_info mps_stat_sram_intr_info[] = {
5361 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
5362 		{ 0 }
5363 	};
5364 	static const struct intr_info mps_stat_tx_intr_info[] = {
5365 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
5366 		{ 0 }
5367 	};
5368 	static const struct intr_info mps_stat_rx_intr_info[] = {
5369 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
5370 		{ 0 }
5371 	};
5372 	static const struct intr_info mps_cls_intr_info[] = {
5373 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
5374 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
5375 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
5376 		{ 0 }
5377 	};
5378 
5379 	int fat;
5380 
5381 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
5382 				    mps_rx_intr_info) +
5383 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
5384 				    is_t6(adapter->params.chip)
5385 				    ? t6_mps_tx_intr_info
5386 				    : mps_tx_intr_info) +
5387 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
5388 				    mps_trc_intr_info) +
5389 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5390 				    mps_stat_sram_intr_info) +
5391 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5392 				    mps_stat_tx_intr_info) +
5393 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5394 				    mps_stat_rx_intr_info) +
5395 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
5396 				    mps_cls_intr_info);
5397 
5398 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
5399 	t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
5400 	if (fat)
5401 		t4_fatal_err(adapter);
5402 }
5403 
5404 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
5405 		      F_ECC_UE_INT_CAUSE)
5406 
5407 /*
5408  * EDC/MC interrupt handler.
5409  */
5410 static void mem_intr_handler(struct adapter *adapter, int idx)
5411 {
5412 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
5413 
5414 	unsigned int addr, cnt_addr, v;
5415 
5416 	if (idx <= MEM_EDC1) {
5417 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
5418 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
5419 	} else if (idx == MEM_MC) {
5420 		if (is_t4(adapter->params.chip)) {
5421 			addr = A_MC_INT_CAUSE;
5422 			cnt_addr = A_MC_ECC_STATUS;
5423 		} else {
5424 			addr = A_MC_P_INT_CAUSE;
5425 			cnt_addr = A_MC_P_ECC_STATUS;
5426 		}
5427 	} else {
5428 		addr = MC_REG(A_MC_P_INT_CAUSE, 1);
5429 		cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
5430 	}
5431 
5432 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
5433 	if (v & F_PERR_INT_CAUSE)
5434 		CH_ALERT(adapter, "%s FIFO parity error\n",
5435 			  name[idx]);
5436 	if (v & F_ECC_CE_INT_CAUSE) {
5437 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
5438 
5439 		if (idx <= MEM_EDC1)
5440 			t4_edc_err_read(adapter, idx);
5441 
5442 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
5443 		CH_WARN_RATELIMIT(adapter,
5444 				  "%u %s correctable ECC data error%s\n",
5445 				  cnt, name[idx], cnt > 1 ? "s" : "");
5446 	}
5447 	if (v & F_ECC_UE_INT_CAUSE)
5448 		CH_ALERT(adapter,
5449 			 "%s uncorrectable ECC data error\n", name[idx]);
5450 
5451 	t4_write_reg(adapter, addr, v);
5452 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
5453 		t4_fatal_err(adapter);
5454 }
5455 
5456 /*
5457  * MA interrupt handler.
5458  */
5459 static void ma_intr_handler(struct adapter *adapter)
5460 {
5461 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
5462 
5463 	if (status & F_MEM_PERR_INT_CAUSE) {
5464 		CH_ALERT(adapter,
5465 			  "MA parity error, parity status %#x\n",
5466 			  t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
5467 		if (is_t5(adapter->params.chip))
5468 			CH_ALERT(adapter,
5469 				  "MA parity error, parity status %#x\n",
5470 				  t4_read_reg(adapter,
5471 					      A_MA_PARITY_ERROR_STATUS2));
5472 	}
5473 	if (status & F_MEM_WRAP_INT_CAUSE) {
5474 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
5475 		CH_ALERT(adapter, "MA address wrap-around error by "
5476 			  "client %u to address %#x\n",
5477 			  G_MEM_WRAP_CLIENT_NUM(v),
5478 			  G_MEM_WRAP_ADDRESS(v) << 4);
5479 	}
5480 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
5481 	t4_fatal_err(adapter);
5482 }
5483 
5484 /*
5485  * SMB interrupt handler.
5486  */
5487 static void smb_intr_handler(struct adapter *adap)
5488 {
5489 	static const struct intr_info smb_intr_info[] = {
5490 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
5491 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
5492 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
5493 		{ 0 }
5494 	};
5495 
5496 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
5497 		t4_fatal_err(adap);
5498 }
5499 
5500 /*
5501  * NC-SI interrupt handler.
5502  */
5503 static void ncsi_intr_handler(struct adapter *adap)
5504 {
5505 	static const struct intr_info ncsi_intr_info[] = {
5506 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
5507 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
5508 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
5509 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
5510 		{ 0 }
5511 	};
5512 
5513 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
5514 		t4_fatal_err(adap);
5515 }
5516 
5517 /*
5518  * XGMAC interrupt handler.
5519  */
5520 static void xgmac_intr_handler(struct adapter *adap, int port)
5521 {
5522 	u32 v, int_cause_reg;
5523 
5524 	if (is_t4(adap->params.chip))
5525 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5526 	else
5527 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5528 
5529 	v = t4_read_reg(adap, int_cause_reg);
5530 
5531 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
5532 	if (!v)
5533 		return;
5534 
5535 	if (v & F_TXFIFO_PRTY_ERR)
5536 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
5537 			  port);
5538 	if (v & F_RXFIFO_PRTY_ERR)
5539 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
5540 			  port);
5541 	t4_write_reg(adap, int_cause_reg, v);
5542 	t4_fatal_err(adap);
5543 }
5544 
5545 /*
5546  * PL Parity Error interrupt handler.
5547  */
5548 static void pl_perr_intr_handler(struct adapter *adap)
5549 {
5550 	static const struct intr_info pl_perr_info[] = {
5551 		{ F_UART, "UART Parity Error", -1, },
5552 		{ F_ULP_TX, "ULP TX Parity Error", -1 },
5553 		{ F_SGE, "SGE Parity Error", -1 },
5554 		{ F_HMA, "HMA Parity Error", -1 },
5555 		{ F_CPL_SWITCH, "CPL Switch Parity Error", -1 },
5556 		{ F_ULP_RX, "ULP RX Parity Error", -1 },
5557 		{ F_PM_RX, "PM RX Parity Error", -1 },
5558 		{ F_PM_TX, "PM TX Parity Error", -1 },
5559 		{ F_MA, "MA Parity Error", -1 },
5560 		{ F_TP, "TP Parity Error", -1 },
5561 		{ F_LE, "LE Parity Error", -1 },
5562 		{ F_EDC1, "EDC1 Parity Error", -1 },
5563 		{ F_EDC0, "EDC0 Parity Error", -1 },
5564 		{ F_MC, "MC Parity Error", -1 },
5565 		{ F_PCIE, "PCIE Parity Error", -1 },
5566 		{ F_PMU, "PMU Parity Error", -1 },
5567 		{ F_XGMAC_KR1, "XGMAC_KR1 Parity Error", -1 },
5568 		{ F_XGMAC_KR0, "XGMAC_KR0 Parity Error", -1 },
5569 		{ F_XGMAC1, "XGMAC1 Parity Error", -1 },
5570 		{ F_XGMAC0, "XGMAC0 Parity Error", -1 },
5571 		{ F_SMB, "SMB Parity Error", -1 },
5572 		{ F_SF, "SF Parity Error", -1 },
5573 		{ F_PL, "PL Parity Error", -1 },
5574 		{ F_NCSI, "NCSI Parity Error", -1 },
5575 		{ F_MPS, "MPS Parity Error", -1 },
5576 		{ F_MI, "MI Parity Error", -1 },
5577 		{ F_DBG, "DBG Parity Error", -1 },
5578 		{ F_I2CM, "I2CM Parity Error", -1 },
5579 		{ F_CIM, "CIM Parity Error", -1 },
5580 	};
5581 
5582 	t4_handle_intr_status(adap, A_PL_PERR_CAUSE, pl_perr_info);
5583 	/* pl_intr_handler() will do the t4_fatal_err(adap) */
5584 }
5585 
5586 /*
5587  * PL interrupt handler.
5588  */
5589 static void pl_intr_handler(struct adapter *adap)
5590 {
5591 	static const struct intr_info pl_intr_info[] = {
5592 		{ F_FATALPERR, "Fatal parity error", -1, 1,
5593 		  pl_perr_intr_handler },
5594 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
5595 		{ 0 }
5596 	};
5597 
5598 	static struct intr_info t5_pl_intr_info[] = {
5599 		{ F_FATALPERR, "Fatal parity error", -1, 1,
5600 		  pl_perr_intr_handler },
5601 		{ 0 }
5602 	};
5603 
5604 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
5605 				  is_t4(adap->params.chip) ?
5606 				  pl_intr_info : t5_pl_intr_info))
5607 		t4_fatal_err(adap);
5608 }
5609 
5610 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5611 
5612 /**
5613  *	t4_slow_intr_handler - control path interrupt handler
5614  *	@adapter: the adapter
5615  *
5616  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
5617  *	The designation 'slow' is because it involves register reads, while
5618  *	data interrupts typically don't involve any MMIOs.
5619  */
5620 int t4_slow_intr_handler(struct adapter *adapter)
5621 {
5622 	/* There are rare cases where a PL_INT_CAUSE bit may end up getting
5623 	 * set when the corresponding PL_INT_ENABLE bit isn't set.  It's
5624 	 * easiest just to mask that case here.
5625 	 */
5626 	u32 raw_cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
5627 	u32 enable = t4_read_reg(adapter, A_PL_INT_ENABLE);
5628 	u32 cause = raw_cause & enable;
5629 
5630 	if (!(cause & GLBL_INTR_MASK))
5631 		return 0;
5632 
5633 	/* Disable all the interrupt(bits) in PL_INT_ENABLE */
5634 	t4_write_reg(adapter, A_PL_INT_ENABLE, 0);
5635 	(void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */
5636 
5637 	if (cause & F_CIM)
5638 		cim_intr_handler(adapter);
5639 	if (cause & F_MPS)
5640 		mps_intr_handler(adapter);
5641 	if (cause & F_NCSI)
5642 		ncsi_intr_handler(adapter);
5643 	if (cause & F_PL)
5644 		pl_intr_handler(adapter);
5645 	if (cause & F_SMB)
5646 		smb_intr_handler(adapter);
5647 	if (cause & F_MAC0)
5648 		xgmac_intr_handler(adapter, 0);
5649 	if (cause & F_MAC1)
5650 		xgmac_intr_handler(adapter, 1);
5651 	if (cause & F_MAC2)
5652 		xgmac_intr_handler(adapter, 2);
5653 	if (cause & F_MAC3)
5654 		xgmac_intr_handler(adapter, 3);
5655 	if (cause & F_PCIE)
5656 		pcie_intr_handler(adapter);
5657 	if (cause & F_MC0)
5658 		mem_intr_handler(adapter, MEM_MC);
5659 	if (is_t5(adapter->params.chip) && (cause & F_MC1))
5660 		mem_intr_handler(adapter, MEM_MC1);
5661 	if (cause & F_EDC0)
5662 		mem_intr_handler(adapter, MEM_EDC0);
5663 	if (cause & F_EDC1)
5664 		mem_intr_handler(adapter, MEM_EDC1);
5665 	if (cause & F_LE)
5666 		le_intr_handler(adapter);
5667 	if (cause & F_TP)
5668 		tp_intr_handler(adapter);
5669 	if (cause & F_MA)
5670 		ma_intr_handler(adapter);
5671 	if (cause & F_PM_TX)
5672 		pmtx_intr_handler(adapter);
5673 	if (cause & F_PM_RX)
5674 		pmrx_intr_handler(adapter);
5675 	if (cause & F_ULP_RX)
5676 		ulprx_intr_handler(adapter);
5677 	if (cause & F_CPL_SWITCH)
5678 		cplsw_intr_handler(adapter);
5679 	if (cause & F_SGE)
5680 		sge_intr_handler(adapter);
5681 	if (cause & F_ULP_TX)
5682 		ulptx_intr_handler(adapter);
5683 
5684 	/* Clear the interrupts just processed for which we are the master. */
5685 	t4_write_reg(adapter, A_PL_INT_CAUSE, raw_cause & GLBL_INTR_MASK);
5686 
5687 	/* re-enable the interrupts (bits that were disabled
5688 	 * earlier in PL_INT_ENABLE)
5689 	 */
5690 	t4_write_reg(adapter, A_PL_INT_ENABLE, enable);
5691 	(void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */
5692 	return 1;
5693 }
5694 
5695 /**
5696  *	t4_intr_enable - enable interrupts
5697  *	@adapter: the adapter whose interrupts should be enabled
5698  *
5699  *	Enable PF-specific interrupts for the calling function and the top-level
5700  *	interrupt concentrator for global interrupts.  Interrupts are already
5701  *	enabled at each module,	here we just enable the roots of the interrupt
5702  *	hierarchies.
5703  *
5704  *	Note: this function should be called only when the driver manages
5705  *	non PF-specific interrupts from the various HW modules.  Only one PCI
5706  *	function at a time should be doing this.
5707  */
5708 void t4_intr_enable(struct adapter *adapter)
5709 {
5710 	u32 val = 0;
5711 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5712 	u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5713 		  ? G_SOURCEPF(whoami)
5714 		  : G_T6_SOURCEPF(whoami));
5715 
5716 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5717 		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5718 	else
5719 		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5720 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
5721 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
5722 		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
5723 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
5724 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5725 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
5726 		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
5727 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5728 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
5729 }
5730 
5731 /**
5732  *	t4_intr_disable - disable interrupts
5733  *	@adapter: the adapter whose interrupts should be disabled
5734  *
5735  *	Disable interrupts.  We only disable the top-level interrupt
5736  *	concentrators.  The caller must be a PCI function managing global
5737  *	interrupts.
5738  */
5739 void t4_intr_disable(struct adapter *adapter)
5740 {
5741 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5742 	u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5743 		  ? G_SOURCEPF(whoami)
5744 		  : G_T6_SOURCEPF(whoami));
5745 
5746 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5747 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
5748 }
5749 
5750 unsigned int t4_chip_rss_size(struct adapter *adap)
5751 {
5752 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5753 		return RSS_NENTRIES;
5754 	else
5755 		return T6_RSS_NENTRIES;
5756 }
5757 
5758 /**
5759  *	t4_config_rss_range - configure a portion of the RSS mapping table
5760  *	@adapter: the adapter
5761  *	@mbox: mbox to use for the FW command
5762  *	@viid: virtual interface whose RSS subtable is to be written
5763  *	@start: start entry in the table to write
5764  *	@n: how many table entries to write
5765  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
5766  *	@nrspq: number of values in @rspq
5767  *
5768  *	Programs the selected part of the VI's RSS mapping table with the
5769  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
5770  *	until the full table range is populated.
5771  *
5772  *	The caller must ensure the values in @rspq are in the range allowed for
5773  *	@viid.
5774  */
5775 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5776 			int start, int n, const u16 *rspq, unsigned int nrspq)
5777 {
5778 	int ret;
5779 	const u16 *rsp = rspq;
5780 	const u16 *rsp_end = rspq + nrspq;
5781 	struct fw_rss_ind_tbl_cmd cmd;
5782 
5783 	memset(&cmd, 0, sizeof(cmd));
5784 	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5785 				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5786 				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
5787 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5788 
5789 	/* Each firmware RSS command can accommodate up to 32 RSS Ingress
5790 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
5791 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5792 	 * reserved.
5793 	 */
5794 	while (n > 0) {
5795 		int nq = min(n, 32);
5796 		int nq_packed = 0;
5797 		__be32 *qp = &cmd.iq0_to_iq2;
5798 
5799 		/* Set up the firmware RSS command header to send the next
5800 		 * "nq" Ingress Queue IDs to the firmware.
5801 		 */
5802 		cmd.niqid = cpu_to_be16(nq);
5803 		cmd.startidx = cpu_to_be16(start);
5804 
5805 		/* "nq" more done for the start of the next loop.
5806 		 */
5807 		start += nq;
5808 		n -= nq;
5809 
5810 		/* While there are still Ingress Queue IDs to stuff into the
5811 		 * current firmware RSS command, retrieve them from the
5812 		 * Ingress Queue ID array and insert them into the command.
5813 		 */
5814 		while (nq > 0) {
5815 			/* Grab up to the next 3 Ingress Queue IDs (wrapping
5816 			 * around the Ingress Queue ID array if necessary) and
5817 			 * insert them into the firmware RSS command at the
5818 			 * current 3-tuple position within the commad.
5819 			 */
5820 			u16 qbuf[3];
5821 			u16 *qbp = qbuf;
5822 			int nqbuf = min(3, nq);
5823 
5824 			nq -= nqbuf;
5825 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
5826 			while (nqbuf && nq_packed < 32) {
5827 				nqbuf--;
5828 				nq_packed++;
5829 				*qbp++ = *rsp++;
5830 				if (rsp >= rsp_end)
5831 					rsp = rspq;
5832 			}
5833 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5834 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5835 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5836 		}
5837 
5838 		/* Send this portion of the RRS table update to the firmware;
5839 		 * bail out on any errors.
5840 		 */
5841 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5842 		if (ret)
5843 			return ret;
5844 	}
5845 	return 0;
5846 }
5847 
5848 /**
5849  *	t4_config_glbl_rss - configure the global RSS mode
5850  *	@adapter: the adapter
5851  *	@mbox: mbox to use for the FW command
5852  *	@mode: global RSS mode
5853  *	@flags: mode-specific flags
5854  *
5855  *	Sets the global RSS mode.
5856  */
5857 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5858 		       unsigned int flags)
5859 {
5860 	struct fw_rss_glb_config_cmd c;
5861 
5862 	memset(&c, 0, sizeof(c));
5863 	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5864 				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5865 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5866 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5867 		c.u.manual.mode_pkd =
5868 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5869 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5870 		c.u.basicvirtual.mode_keymode =
5871 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5872 		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5873 	} else
5874 		return -EINVAL;
5875 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5876 }
5877 
5878 /**
5879  *	t4_config_vi_rss - configure per VI RSS settings
5880  *	@adapter: the adapter
5881  *	@mbox: mbox to use for the FW command
5882  *	@viid: the VI id
5883  *	@flags: RSS flags
5884  *	@defq: id of the default RSS queue for the VI.
5885  *	@skeyidx: RSS secret key table index for non-global mode
5886  *	@skey: RSS vf_scramble key for VI.
5887  *
5888  *	Configures VI-specific RSS properties.
5889  */
5890 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5891 		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
5892 		     unsigned int skey)
5893 {
5894 	struct fw_rss_vi_config_cmd c;
5895 
5896 	memset(&c, 0, sizeof(c));
5897 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5898 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5899 				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5900 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5901 	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5902 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5903 	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5904 					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5905 	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5906 
5907 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5908 }
5909 
5910 /* Read an RSS table row */
5911 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5912 {
5913 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5914 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5915 				   5, 0, val);
5916 }
5917 
5918 /**
5919  *	t4_read_rss - read the contents of the RSS mapping table
5920  *	@adapter: the adapter
5921  *	@map: holds the contents of the RSS mapping table
5922  *
5923  *	Reads the contents of the RSS hash->queue mapping table.
5924  */
5925 int t4_read_rss(struct adapter *adapter, u16 *map)
5926 {
5927 	u32 val;
5928 	int i, ret, nentries;
5929 
5930 	nentries = t4_chip_rss_size(adapter);
5931 	for (i = 0; i < nentries / 2; ++i) {
5932 		ret = rd_rss_row(adapter, i, &val);
5933 		if (ret)
5934 			return ret;
5935 		*map++ = G_LKPTBLQUEUE0(val);
5936 		*map++ = G_LKPTBLQUEUE1(val);
5937 	}
5938 	return 0;
5939 }
5940 
5941 /**
5942  * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5943  * @adap: the adapter
5944  * @cmd: TP fw ldst address space type
5945  * @vals: where the indirect register values are stored/written
5946  * @nregs: how many indirect registers to read/write
5947  * @start_idx: index of first indirect register to read/write
5948  * @rw: Read (1) or Write (0)
5949  * @sleep_ok: if true we may sleep while awaiting command completion
5950  *
5951  * Access TP indirect registers through LDST
5952  **/
5953 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5954 			    unsigned int nregs, unsigned int start_index,
5955 			    unsigned int rw, bool sleep_ok)
5956 {
5957 	int ret = 0;
5958 	unsigned int i;
5959 	struct fw_ldst_cmd c;
5960 
5961 	for (i = 0; i < nregs; i++) {
5962 		memset(&c, 0, sizeof(c));
5963 		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5964 						F_FW_CMD_REQUEST |
5965 						(rw ? F_FW_CMD_READ :
5966 						      F_FW_CMD_WRITE) |
5967 						V_FW_LDST_CMD_ADDRSPACE(cmd));
5968 		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5969 
5970 		c.u.addrval.addr = cpu_to_be32(start_index + i);
5971 		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
5972 		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5973 				      sleep_ok);
5974 		if (ret)
5975 			return ret;
5976 
5977 		if (rw)
5978 			vals[i] = be32_to_cpu(c.u.addrval.val);
5979 	}
5980 	return 0;
5981 }
5982 
5983 /**
5984  * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5985  * @adap: the adapter
5986  * @reg_addr: Address Register
5987  * @reg_data: Data register
5988  * @buff: where the indirect register values are stored/written
5989  * @nregs: how many indirect registers to read/write
5990  * @start_index: index of first indirect register to read/write
5991  * @rw: READ(1) or WRITE(0)
5992  * @sleep_ok: if true we may sleep while awaiting command completion
5993  *
5994  * Read/Write TP indirect registers through LDST if possible.
5995  * Else, use backdoor access
5996  **/
5997 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5998 			      u32 *buff, u32 nregs, u32 start_index, int rw,
5999 			      bool sleep_ok)
6000 {
6001 	int rc = -EINVAL;
6002 	int cmd;
6003 
6004 	switch (reg_addr) {
6005 	case A_TP_PIO_ADDR:
6006 		cmd = FW_LDST_ADDRSPC_TP_PIO;
6007 		break;
6008 	case A_TP_TM_PIO_ADDR:
6009 		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
6010 		break;
6011 	case A_TP_MIB_INDEX:
6012 		cmd = FW_LDST_ADDRSPC_TP_MIB;
6013 		break;
6014 	default:
6015 		goto indirect_access;
6016 	}
6017 
6018 	if (t4_use_ldst(adap))
6019 		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
6020 				      sleep_ok);
6021 
6022 indirect_access:
6023 
6024 	if (rc) {
6025 		if (rw)
6026 			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
6027 					 start_index);
6028 		else
6029 			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
6030 					  start_index);
6031 	}
6032 }
6033 
6034 /**
6035  * t4_tp_pio_read - Read TP PIO registers
6036  * @adap: the adapter
6037  * @buff: where the indirect register values are written
6038  * @nregs: how many indirect registers to read
6039  * @start_index: index of first indirect register to read
6040  * @sleep_ok: if true we may sleep while awaiting command completion
6041  *
6042  * Read TP PIO Registers
6043  **/
6044 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
6045 		    u32 start_index, bool sleep_ok)
6046 {
6047 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
6048 			  start_index, 1, sleep_ok);
6049 }
6050 
6051 /**
6052  * t4_tp_pio_write - Write TP PIO registers
6053  * @adap: the adapter
6054  * @buff: where the indirect register values are stored
6055  * @nregs: how many indirect registers to write
6056  * @start_index: index of first indirect register to write
6057  * @sleep_ok: if true we may sleep while awaiting command completion
6058  *
6059  * Write TP PIO Registers
6060  **/
6061 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
6062 		     u32 start_index, bool sleep_ok)
6063 {
6064 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
6065 			  start_index, 0, sleep_ok);
6066 }
6067 
6068 /**
6069  * t4_tp_tm_pio_read - Read TP TM PIO registers
6070  * @adap: the adapter
6071  * @buff: where the indirect register values are written
6072  * @nregs: how many indirect registers to read
6073  * @start_index: index of first indirect register to read
6074  * @sleep_ok: if true we may sleep while awaiting command completion
6075  *
6076  * Read TP TM PIO Registers
6077  **/
6078 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
6079 		       u32 start_index, bool sleep_ok)
6080 {
6081 	t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
6082 			  nregs, start_index, 1, sleep_ok);
6083 }
6084 
6085 /**
6086  * t4_tp_mib_read - Read TP MIB registers
6087  * @adap: the adapter
6088  * @buff: where the indirect register values are written
6089  * @nregs: how many indirect registers to read
6090  * @start_index: index of first indirect register to read
6091  * @sleep_ok: if true we may sleep while awaiting command completion
6092  *
6093  * Read TP MIB Registers
6094  **/
6095 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
6096 		    bool sleep_ok)
6097 {
6098 	t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
6099 			  start_index, 1, sleep_ok);
6100 }
6101 
6102 /**
6103  *	t4_read_rss_key - read the global RSS key
6104  *	@adap: the adapter
6105  *	@key: 10-entry array holding the 320-bit RSS key
6106  * 	@sleep_ok: if true we may sleep while awaiting command completion
6107  *
6108  *	Reads the global 320-bit RSS key.
6109  */
6110 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
6111 {
6112 	t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
6113 }
6114 
6115 /**
6116  *	t4_write_rss_key - program one of the RSS keys
6117  *	@adap: the adapter
6118  *	@key: 10-entry array holding the 320-bit RSS key
6119  *	@idx: which RSS key to write
6120  * 	@sleep_ok: if true we may sleep while awaiting command completion
6121  *
6122  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
6123  *	0..15 the corresponding entry in the RSS key table is written,
6124  *	otherwise the global RSS key is written.
6125  */
6126 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
6127 		      bool sleep_ok)
6128 {
6129 	u8 rss_key_addr_cnt = 16;
6130 	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
6131 
6132 	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
6133 	 * allows access to key addresses 16-63 by using KeyWrAddrX
6134 	 * as index[5:4](upper 2) into key table
6135 	 */
6136 	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
6137 	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
6138 		rss_key_addr_cnt = 32;
6139 
6140 	t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
6141 
6142 	if (idx >= 0 && idx < rss_key_addr_cnt) {
6143 		if (rss_key_addr_cnt > 16)
6144 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
6145 				     vrt | V_KEYWRADDRX(idx >> 4) |
6146 				     V_T6_VFWRADDR(idx) | F_KEYWREN);
6147 		else
6148 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
6149 				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
6150 	}
6151 }
6152 
6153 /**
6154  *	t4_read_rss_pf_config - read PF RSS Configuration Table
6155  *	@adapter: the adapter
6156  *	@index: the entry in the PF RSS table to read
6157  *	@valp: where to store the returned value
6158  * 	@sleep_ok: if true we may sleep while awaiting command completion
6159  *
6160  *	Reads the PF RSS Configuration Table at the specified index and returns
6161  *	the value found there.
6162  */
6163 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
6164 			   u32 *valp, bool sleep_ok)
6165 {
6166 	t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
6167 }
6168 
6169 /**
6170  *	t4_write_rss_pf_config - write PF RSS Configuration Table
6171  *	@adapter: the adapter
6172  *	@index: the entry in the VF RSS table to read
6173  *	@val: the value to store
6174  * 	@sleep_ok: if true we may sleep while awaiting command completion
6175  *
6176  *	Writes the PF RSS Configuration Table at the specified index with the
6177  *	specified value.
6178  */
6179 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
6180 			    u32 val, bool sleep_ok)
6181 {
6182 	t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
6183 			sleep_ok);
6184 }
6185 
6186 /**
6187  *	t4_read_rss_vf_config - read VF RSS Configuration Table
6188  *	@adapter: the adapter
6189  *	@index: the entry in the VF RSS table to read
6190  *	@vfl: where to store the returned VFL
6191  *	@vfh: where to store the returned VFH
6192  * 	@sleep_ok: if true we may sleep while awaiting command completion
6193  *
6194  *	Reads the VF RSS Configuration Table at the specified index and returns
6195  *	the (VFL, VFH) values found there.
6196  */
6197 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
6198 			   u32 *vfl, u32 *vfh, bool sleep_ok)
6199 {
6200 	u32 vrt, mask, data;
6201 
6202 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
6203 		mask = V_VFWRADDR(M_VFWRADDR);
6204 		data = V_VFWRADDR(index);
6205 	} else {
6206 		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
6207 		 data = V_T6_VFWRADDR(index);
6208 	}
6209 	/*
6210 	 * Request that the index'th VF Table values be read into VFL/VFH.
6211 	 */
6212 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
6213 	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
6214 	vrt |= data | F_VFRDEN;
6215 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
6216 
6217 	/*
6218 	 * Grab the VFL/VFH values ...
6219 	 */
6220 	t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
6221 	t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
6222 }
6223 
6224 /**
6225  *	t4_read_rss_pf_map - read PF RSS Map
6226  *	@adapter: the adapter
6227  * 	@sleep_ok: if true we may sleep while awaiting command completion
6228  *
6229  *	Reads the PF RSS Map register and returns its value.
6230  */
6231 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
6232 {
6233 	u32 pfmap;
6234 
6235 	t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
6236 
6237 	return pfmap;
6238 }
6239 
6240 /**
6241  *	t4_read_rss_pf_mask - read PF RSS Mask
6242  *	@adapter: the adapter
6243  * 	@sleep_ok: if true we may sleep while awaiting command completion
6244  *
6245  *	Reads the PF RSS Mask register and returns its value.
6246  */
6247 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
6248 {
6249 	u32 pfmask;
6250 
6251 	t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
6252 
6253 	return pfmask;
6254 }
6255 
6256 /**
6257  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
6258  *	@adap: the adapter
6259  *	@v4: holds the TCP/IP counter values
6260  *	@v6: holds the TCP/IPv6 counter values
6261  * 	@sleep_ok: if true we may sleep while awaiting command completion
6262  *
6263  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
6264  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
6265  */
6266 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
6267 			 struct tp_tcp_stats *v6, bool sleep_ok)
6268 {
6269 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
6270 
6271 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
6272 #define STAT(x)     val[STAT_IDX(x)]
6273 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
6274 
6275 	if (v4) {
6276 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6277 			       A_TP_MIB_TCP_OUT_RST, sleep_ok);
6278 		v4->tcp_out_rsts = STAT(OUT_RST);
6279 		v4->tcp_in_segs  = STAT64(IN_SEG);
6280 		v4->tcp_out_segs = STAT64(OUT_SEG);
6281 		v4->tcp_retrans_segs = STAT64(RXT_SEG);
6282 	}
6283 	if (v6) {
6284 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
6285 			       A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
6286 		v6->tcp_out_rsts = STAT(OUT_RST);
6287 		v6->tcp_in_segs  = STAT64(IN_SEG);
6288 		v6->tcp_out_segs = STAT64(OUT_SEG);
6289 		v6->tcp_retrans_segs = STAT64(RXT_SEG);
6290 	}
6291 #undef STAT64
6292 #undef STAT
6293 #undef STAT_IDX
6294 }
6295 
6296 /**
6297  *	t4_tp_get_err_stats - read TP's error MIB counters
6298  *	@adap: the adapter
6299  *	@st: holds the counter values
6300  * 	@sleep_ok: if true we may sleep while awaiting command completion
6301  *
6302  *	Returns the values of TP's error counters.
6303  */
6304 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
6305 			 bool sleep_ok)
6306 {
6307 	int nchan = adap->params.arch.nchan;
6308 
6309 	t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
6310 		       sleep_ok);
6311 
6312 	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
6313 		       sleep_ok);
6314 
6315 	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
6316 		       sleep_ok);
6317 
6318 	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
6319 		       A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
6320 
6321 	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
6322 		       A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
6323 
6324 	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
6325 		       sleep_ok);
6326 
6327 	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
6328 		       A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
6329 
6330 	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6331 		       A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6332 
6333 	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6334 		       sleep_ok);
6335 }
6336 
6337 /**
6338  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
6339  *	@adap: the adapter
6340  *	@st: holds the counter values
6341  * 	@sleep_ok: if true we may sleep while awaiting command completion
6342  *
6343  *	Returns the values of TP's CPL counters.
6344  */
6345 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6346 			 bool sleep_ok)
6347 {
6348 	int nchan = adap->params.arch.nchan;
6349 
6350 	t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6351 
6352 	t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6353 }
6354 
6355 /**
6356  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6357  *	@adap: the adapter
6358  *	@st: holds the counter values
6359  *
6360  *	Returns the values of TP's RDMA counters.
6361  */
6362 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6363 			  bool sleep_ok)
6364 {
6365 	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6366 		       sleep_ok);
6367 }
6368 
6369 /**
6370  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6371  *	@adap: the adapter
6372  *	@idx: the port index
6373  *	@st: holds the counter values
6374  * 	@sleep_ok: if true we may sleep while awaiting command completion
6375  *
6376  *	Returns the values of TP's FCoE counters for the selected port.
6377  */
6378 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6379 		       struct tp_fcoe_stats *st, bool sleep_ok)
6380 {
6381 	u32 val[2];
6382 
6383 	t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6384 		       sleep_ok);
6385 
6386 	t4_tp_mib_read(adap, &st->frames_drop, 1,
6387 		       A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6388 
6389 	t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6390 		       sleep_ok);
6391 
6392 	st->octets_ddp = ((u64)val[0] << 32) | val[1];
6393 }
6394 
6395 /**
6396  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6397  *	@adap: the adapter
6398  *	@st: holds the counter values
6399  * 	@sleep_ok: if true we may sleep while awaiting command completion
6400  *
6401  *	Returns the values of TP's counters for non-TCP directly-placed packets.
6402  */
6403 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6404 		      bool sleep_ok)
6405 {
6406 	u32 val[4];
6407 
6408 	t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6409 
6410 	st->frames = val[0];
6411 	st->drops = val[1];
6412 	st->octets = ((u64)val[2] << 32) | val[3];
6413 }
6414 
6415 /**
6416  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
6417  *	@adap: the adapter
6418  *	@mtus: where to store the MTU values
6419  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
6420  *
6421  *	Reads the HW path MTU table.
6422  */
6423 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6424 {
6425 	u32 v;
6426 	int i;
6427 
6428 	for (i = 0; i < NMTUS; ++i) {
6429 		t4_write_reg(adap, A_TP_MTU_TABLE,
6430 			     V_MTUINDEX(0xffU) | V_MTUVALUE(i));
6431 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
6432 		mtus[i] = G_MTUVALUE(v);
6433 		if (mtu_log)
6434 			mtu_log[i] = G_MTUWIDTH(v);
6435 	}
6436 }
6437 
6438 /**
6439  *	t4_read_cong_tbl - reads the congestion control table
6440  *	@adap: the adapter
6441  *	@incr: where to store the alpha values
6442  *
6443  *	Reads the additive increments programmed into the HW congestion
6444  *	control table.
6445  */
6446 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6447 {
6448 	unsigned int mtu, w;
6449 
6450 	for (mtu = 0; mtu < NMTUS; ++mtu)
6451 		for (w = 0; w < NCCTRL_WIN; ++w) {
6452 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
6453 				     V_ROWINDEX(0xffffU) | (mtu << 5) | w);
6454 			incr[mtu][w] = (u16)t4_read_reg(adap,
6455 						A_TP_CCTRL_TABLE) & 0x1fff;
6456 		}
6457 }
6458 
6459 /**
6460  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6461  *	@adap: the adapter
6462  *	@addr: the indirect TP register address
6463  *	@mask: specifies the field within the register to modify
6464  *	@val: new value for the field
6465  *
6466  *	Sets a field of an indirect TP register to the given value.
6467  */
6468 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6469 			    unsigned int mask, unsigned int val)
6470 {
6471 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6472 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6473 	t4_write_reg(adap, A_TP_PIO_DATA, val);
6474 }
6475 
6476 /**
6477  *	init_cong_ctrl - initialize congestion control parameters
6478  *	@a: the alpha values for congestion control
6479  *	@b: the beta values for congestion control
6480  *
6481  *	Initialize the congestion control parameters.
6482  */
6483 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6484 {
6485 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6486 	a[9] = 2;
6487 	a[10] = 3;
6488 	a[11] = 4;
6489 	a[12] = 5;
6490 	a[13] = 6;
6491 	a[14] = 7;
6492 	a[15] = 8;
6493 	a[16] = 9;
6494 	a[17] = 10;
6495 	a[18] = 14;
6496 	a[19] = 17;
6497 	a[20] = 21;
6498 	a[21] = 25;
6499 	a[22] = 30;
6500 	a[23] = 35;
6501 	a[24] = 45;
6502 	a[25] = 60;
6503 	a[26] = 80;
6504 	a[27] = 100;
6505 	a[28] = 200;
6506 	a[29] = 300;
6507 	a[30] = 400;
6508 	a[31] = 500;
6509 
6510 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6511 	b[9] = b[10] = 1;
6512 	b[11] = b[12] = 2;
6513 	b[13] = b[14] = b[15] = b[16] = 3;
6514 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6515 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6516 	b[28] = b[29] = 6;
6517 	b[30] = b[31] = 7;
6518 }
6519 
6520 /* The minimum additive increment value for the congestion control table */
6521 #define CC_MIN_INCR 2U
6522 
6523 /**
6524  *	t4_load_mtus - write the MTU and congestion control HW tables
6525  *	@adap: the adapter
6526  *	@mtus: the values for the MTU table
6527  *	@alpha: the values for the congestion control alpha parameter
6528  *	@beta: the values for the congestion control beta parameter
6529  *
6530  *	Write the HW MTU table with the supplied MTUs and the high-speed
6531  *	congestion control table with the supplied alpha, beta, and MTUs.
6532  *	We write the two tables together because the additive increments
6533  *	depend on the MTUs.
6534  */
6535 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6536 		  const unsigned short *alpha, const unsigned short *beta)
6537 {
6538 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
6539 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6540 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6541 		28672, 40960, 57344, 81920, 114688, 163840, 229376
6542 	};
6543 
6544 	unsigned int i, w;
6545 
6546 	for (i = 0; i < NMTUS; ++i) {
6547 		unsigned int mtu = mtus[i];
6548 		unsigned int log2 = fls(mtu);
6549 
6550 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
6551 			log2--;
6552 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6553 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6554 
6555 		for (w = 0; w < NCCTRL_WIN; ++w) {
6556 			unsigned int inc;
6557 
6558 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6559 				  CC_MIN_INCR);
6560 
6561 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6562 				     (w << 16) | (beta[w] << 13) | inc);
6563 		}
6564 	}
6565 }
6566 
6567 /*
6568  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6569  * clocks.  The formula is
6570  *
6571  * bytes/s = bytes256 * 256 * ClkFreq / 4096
6572  *
6573  * which is equivalent to
6574  *
6575  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6576  */
6577 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6578 {
6579 	u64 v = bytes256 * adap->params.vpd.cclk;
6580 
6581 	return v * 62 + v / 2;
6582 }
6583 
6584 /**
6585  *	t4_get_chan_txrate - get the current per channel Tx rates
6586  *	@adap: the adapter
6587  *	@nic_rate: rates for NIC traffic
6588  *	@ofld_rate: rates for offloaded traffic
6589  *
6590  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
6591  *	for each channel.
6592  */
6593 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6594 {
6595 	u32 v;
6596 
6597 	v = t4_read_reg(adap, A_TP_TX_TRATE);
6598 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6599 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6600 	if (adap->params.arch.nchan == NCHAN) {
6601 		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6602 		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6603 	}
6604 
6605 	v = t4_read_reg(adap, A_TP_TX_ORATE);
6606 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6607 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6608 	if (adap->params.arch.nchan == NCHAN) {
6609 		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6610 		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6611 	}
6612 }
6613 
6614 /**
6615  *	t4_set_trace_filter - configure one of the tracing filters
6616  *	@adap: the adapter
6617  *	@tp: the desired trace filter parameters
6618  *	@idx: which filter to configure
6619  *	@enable: whether to enable or disable the filter
6620  *
6621  *	Configures one of the tracing filters available in HW.  If @enable is
6622  *	%0 @tp is not examined and may be %NULL. The user is responsible to
6623  *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
6624  *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
6625  *	docs/readme.txt for a complete description of how to setup traceing on
6626  *	T4.
6627  */
6628 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
6629 			int enable)
6630 {
6631 	int i, ofst = idx * 4;
6632 	u32 data_reg, mask_reg, cfg;
6633 
6634 	if (!enable) {
6635 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6636 		return 0;
6637 	}
6638 
6639 	/*
6640 	 * TODO - After T4 data book is updated, specify the exact
6641 	 * section below.
6642 	 *
6643 	 * See T4 data book - MPS section for a complete description
6644 	 * of the below if..else handling of A_MPS_TRC_CFG register
6645 	 * value.
6646 	 */
6647 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6648 	if (cfg & F_TRCMULTIFILTER) {
6649 		/*
6650 		 * If multiple tracers are enabled, then maximum
6651 		 * capture size is 2.5KB (FIFO size of a single channel)
6652 		 * minus 2 flits for CPL_TRACE_PKT header.
6653 		 */
6654 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6655 			return -EINVAL;
6656 	}
6657 	else {
6658 		/*
6659 		 * If multiple tracers are disabled, to avoid deadlocks
6660 		 * maximum packet capture size of 9600 bytes is recommended.
6661 		 * Also in this mode, only trace0 can be enabled and running.
6662 		 */
6663 		if (tp->snap_len > 9600 || idx)
6664 			return -EINVAL;
6665 	}
6666 
6667 	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
6668 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6669 	    tp->min_len > M_TFMINPKTSIZE)
6670 		return -EINVAL;
6671 
6672 	/* stop the tracer we'll be changing */
6673 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6674 
6675 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6676 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6677 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6678 
6679 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6680 		t4_write_reg(adap, data_reg, tp->data[i]);
6681 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6682 	}
6683 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6684 		     V_TFCAPTUREMAX(tp->snap_len) |
6685 		     V_TFMINPKTSIZE(tp->min_len));
6686 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6687 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
6688 		     (is_t4(adap->params.chip) ?
6689 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
6690 		     V_T5_TFPORT(tp->port) | F_T5_TFEN |
6691 		     V_T5_TFINVERTMATCH(tp->invert)));
6692 
6693 	return 0;
6694 }
6695 
6696 /**
6697  *	t4_get_trace_filter - query one of the tracing filters
6698  *	@adap: the adapter
6699  *	@tp: the current trace filter parameters
6700  *	@idx: which trace filter to query
6701  *	@enabled: non-zero if the filter is enabled
6702  *
6703  *	Returns the current settings of one of the HW tracing filters.
6704  */
6705 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6706 			 int *enabled)
6707 {
6708 	u32 ctla, ctlb;
6709 	int i, ofst = idx * 4;
6710 	u32 data_reg, mask_reg;
6711 
6712 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6713 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6714 
6715 	if (is_t4(adap->params.chip)) {
6716 		*enabled = !!(ctla & F_TFEN);
6717 		tp->port =  G_TFPORT(ctla);
6718 		tp->invert = !!(ctla & F_TFINVERTMATCH);
6719 	} else {
6720 		*enabled = !!(ctla & F_T5_TFEN);
6721 		tp->port = G_T5_TFPORT(ctla);
6722 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6723 	}
6724 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
6725 	tp->min_len = G_TFMINPKTSIZE(ctlb);
6726 	tp->skip_ofst = G_TFOFFSET(ctla);
6727 	tp->skip_len = G_TFLENGTH(ctla);
6728 
6729 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6730 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6731 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6732 
6733 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6734 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6735 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6736 	}
6737 }
6738 
6739 /**
6740  *	t4_read_tcb - read a hardware TCP Control Block structure
6741  *	@adap: the adapter
6742  *	@win: PCI-E Memory Window to use
6743  *	@tid: the TCB ID
6744  *	@tcb: the buffer to return the TCB in
6745  *
6746  *	Reads the indicated hardware TCP Control Block and returns it in
6747  *	the supplied buffer.  Returns 0 on success.
6748  */
6749 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4])
6750 {
6751 	u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE);
6752 	u32 tcb_addr = tcb_base + tid * TCB_SIZE;
6753 	__be32 raw_tcb[TCB_SIZE/4];
6754 	int ret, word;
6755 
6756 	ret = t4_memory_rw_addr(adap, win,
6757 				tcb_addr, sizeof raw_tcb, raw_tcb,
6758 				T4_MEMORY_READ);
6759 	if (ret)
6760 		return ret;
6761 
6762 	for (word = 0; word < 32; word++)
6763 		tcb[word] = be32_to_cpu(raw_tcb[word]);
6764 	return 0;
6765 }
6766 
6767 /**
6768  *	t4_pmtx_get_stats - returns the HW stats from PMTX
6769  *	@adap: the adapter
6770  *	@cnt: where to store the count statistics
6771  *	@cycles: where to store the cycle statistics
6772  *
6773  *	Returns performance statistics from PMTX.
6774  */
6775 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6776 {
6777 	int i;
6778 	u32 data[2];
6779 
6780 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6781 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6782 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6783 		if (is_t4(adap->params.chip)) {
6784 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6785 		} else {
6786 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6787 					 A_PM_TX_DBG_DATA, data, 2,
6788 					 A_PM_TX_DBG_STAT_MSB);
6789 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6790 		}
6791 	}
6792 }
6793 
6794 /**
6795  *	t4_pmrx_get_stats - returns the HW stats from PMRX
6796  *	@adap: the adapter
6797  *	@cnt: where to store the count statistics
6798  *	@cycles: where to store the cycle statistics
6799  *
6800  *	Returns performance statistics from PMRX.
6801  */
6802 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6803 {
6804 	int i;
6805 	u32 data[2];
6806 
6807 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6808 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6809 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6810 		if (is_t4(adap->params.chip)) {
6811 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6812 		} else {
6813 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6814 					 A_PM_RX_DBG_DATA, data, 2,
6815 					 A_PM_RX_DBG_STAT_MSB);
6816 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6817 		}
6818 	}
6819 }
6820 
6821 /**
6822  *	compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6823  *	@adapter: the adapter
6824  *	@pidx: the port index
6825  *
6826  *	Compuytes and returns a bitmap indicating which MPS buffer groups are
6827  *	associated with the given Port.  Bit i is set if buffer group i is
6828  *	used by the Port.
6829  */
6830 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6831 					      int pidx)
6832 {
6833 	unsigned int chip_version, nports;
6834 
6835 	chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6836 	nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6837 
6838 	switch (chip_version) {
6839 	case CHELSIO_T4:
6840 	case CHELSIO_T5:
6841 		switch (nports) {
6842 		case 1: return 0xf;
6843 		case 2: return 3 << (2 * pidx);
6844 		case 4: return 1 << pidx;
6845 		}
6846 		break;
6847 
6848 	case CHELSIO_T6:
6849 		switch (nports) {
6850 		case 2: return 1 << (2 * pidx);
6851 		}
6852 		break;
6853 	}
6854 
6855 	CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6856 	       chip_version, nports);
6857 
6858 	return 0;
6859 }
6860 
6861 /**
6862  *	t4_get_mps_bg_map - return the buffer groups associated with a port
6863  *	@adapter: the adapter
6864  *	@pidx: the port index
6865  *
6866  *	Returns a bitmap indicating which MPS buffer groups are associated
6867  *	with the given Port.  Bit i is set if buffer group i is used by the
6868  *	Port.
6869  */
6870 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6871 {
6872 	u8 *mps_bg_map;
6873 	unsigned int nports;
6874 
6875 	nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6876 	if (pidx >= nports) {
6877 		CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports);
6878 		return 0;
6879 	}
6880 
6881 	/* If we've already retrieved/computed this, just return the result.
6882 	 */
6883 	mps_bg_map = adapter->params.mps_bg_map;
6884 	if (mps_bg_map[pidx])
6885 		return mps_bg_map[pidx];
6886 
6887 	/* Newer Firmware can tell us what the MPS Buffer Group Map is.
6888 	 * If we're talking to such Firmware, let it tell us.  If the new
6889 	 * API isn't supported, revert back to old hardcoded way.  The value
6890 	 * obtained from Firmware is encoded in below format:
6891 	 *
6892 	 * val = (( MPSBGMAP[Port 3] << 24 ) |
6893 	 *        ( MPSBGMAP[Port 2] << 16 ) |
6894 	 *        ( MPSBGMAP[Port 1] <<  8 ) |
6895 	 *        ( MPSBGMAP[Port 0] <<  0 ))
6896 	 */
6897 	if (adapter->flags & FW_OK) {
6898 		u32 param, val;
6899 		int ret;
6900 
6901 		param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6902 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6903 		ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6904 					 0, 1, &param, &val);
6905 		if (!ret) {
6906 			int p;
6907 
6908 			/* Store the BG Map for all of the Ports in order to
6909 			 * avoid more calls to the Firmware in the future.
6910 			 */
6911 			for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6912 				mps_bg_map[p] = val & 0xff;
6913 
6914 			return mps_bg_map[pidx];
6915 		}
6916 	}
6917 
6918 	/* Either we're not talking to the Firmware or we're dealing with
6919 	 * older Firmware which doesn't support the new API to get the MPS
6920 	 * Buffer Group Map.  Fall back to computing it ourselves.
6921 	 */
6922 	mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6923 	return mps_bg_map[pidx];
6924 }
6925 
6926 /**
6927  *      t4_get_tp_e2c_map - return the E2C channel map associated with a port
6928  *      @adapter: the adapter
6929  *      @pidx: the port index
6930  */
6931 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6932 {
6933 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6934 	u32 param, val = 0;
6935 	int ret;
6936 
6937 	if (pidx >= nports) {
6938 		CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports);
6939 		return 0;
6940 	}
6941 
6942 	/* FW version >= 1.16.44.0 can determine E2C channel map using
6943 	 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6944 	 */
6945 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6946 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP));
6947 	ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6948 					 0, 1, &param, &val);
6949 	if (!ret)
6950 		return (val >> (8*pidx)) & 0xff;
6951 
6952 	return 0;
6953 }
6954 
6955 /**
6956  *	t4_get_tp_ch_map - return TP ingress channels associated with a port
6957  *	@adapter: the adapter
6958  *	@pidx: the port index
6959  *
6960  *	Returns a bitmap indicating which TP Ingress Channels are associated with
6961  *	a given Port.  Bit i is set if TP Ingress Channel i is used by the Port.
6962  */
6963 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx)
6964 {
6965 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6966 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6967 
6968 	if (pidx >= nports) {
6969 		CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports);
6970 		return 0;
6971 	}
6972 
6973 	switch (chip_version) {
6974 	case CHELSIO_T4:
6975 	case CHELSIO_T5:
6976 		/*
6977 		 * Note that this happens to be the same values as the MPS
6978 		 * Buffer Group Map for these Chips.  But we replicate the code
6979 		 * here because they're really separate concepts.
6980 		 */
6981 		switch (nports) {
6982 		case 1: return 0xf;
6983 		case 2: return 3 << (2 * pidx);
6984 		case 4: return 1 << pidx;
6985 		}
6986 		break;
6987 
6988 	case CHELSIO_T6:
6989 		switch (nports) {
6990 		case 1: return 1 << pidx;
6991 		case 2: return 1 << pidx;
6992 		}
6993 		break;
6994 	}
6995 
6996 	CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
6997 	       chip_version, nports);
6998 	return 0;
6999 }
7000 
7001 /**
7002  *      t4_get_port_type_description - return Port Type string description
7003  *      @port_type: firmware Port Type enumeration
7004  */
7005 const char *t4_get_port_type_description(enum fw_port_type port_type)
7006 {
7007 	static const char *const port_type_description[] = {
7008 		"Fiber_XFI",
7009 		"Fiber_XAUI",
7010 		"BT_SGMII",
7011 		"BT_XFI",
7012 		"BT_XAUI",
7013 		"KX4",
7014 		"CX4",
7015 		"KX",
7016 		"KR",
7017 		"SFP",
7018 		"BP_AP",
7019 		"BP4_AP",
7020 		"QSFP_10G",
7021 		"QSA",
7022 		"QSFP",
7023 		"BP40_BA",
7024 		"KR4_100G",
7025 		"CR4_QSFP",
7026 		"CR_QSFP",
7027 		"CR2_QSFP",
7028 		"SFP28",
7029 		"KR_SFP28",
7030 		"KR_XLAUI",
7031 	};
7032 
7033 	if (port_type < ARRAY_SIZE(port_type_description))
7034 		return port_type_description[port_type];
7035 	return "UNKNOWN";
7036 }
7037 
7038 /**
7039  *      t4_get_port_stats_offset - collect port stats relative to a previous
7040  *				   snapshot
7041  *      @adap: The adapter
7042  *      @idx: The port
7043  *      @stats: Current stats to fill
7044  *      @offset: Previous stats snapshot
7045  */
7046 void t4_get_port_stats_offset(struct adapter *adap, int idx,
7047 		struct port_stats *stats,
7048 		struct port_stats *offset)
7049 {
7050 	u64 *s, *o;
7051 	int i;
7052 
7053 	t4_get_port_stats(adap, idx, stats);
7054 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
7055 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
7056 			i++, s++, o++)
7057 		*s -= *o;
7058 }
7059 
7060 /**
7061  *	t4_get_port_stats - collect port statistics
7062  *	@adap: the adapter
7063  *	@idx: the port index
7064  *	@p: the stats structure to fill
7065  *
7066  *	Collect statistics related to the given port from HW.
7067  */
7068 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
7069 {
7070 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
7071 	u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
7072 
7073 #define GET_STAT(name) \
7074 	t4_read_reg64(adap, \
7075 	(is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
7076 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
7077 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
7078 
7079 	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
7080 	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
7081 	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
7082 	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
7083 	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
7084 	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
7085 	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
7086 	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
7087 	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
7088 	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
7089 	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
7090 	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
7091 	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
7092 	p->tx_drop		= GET_STAT(TX_PORT_DROP);
7093 	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
7094 	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
7095 	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
7096 	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
7097 	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
7098 	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
7099 	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
7100 	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
7101 	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
7102 
7103 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
7104 		if (stat_ctl & F_COUNTPAUSESTATTX)
7105 			p->tx_frames_64 -= p->tx_pause;
7106 		if (stat_ctl & F_COUNTPAUSEMCTX)
7107 			p->tx_mcast_frames -= p->tx_pause;
7108 	}
7109 
7110 	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
7111 	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
7112 	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
7113 	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
7114 	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
7115 	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
7116 	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
7117 	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
7118 	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
7119 	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
7120 	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
7121 	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
7122 	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
7123 	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
7124 	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
7125 	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
7126 	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
7127 	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
7128 	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
7129 	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
7130 	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
7131 	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
7132 	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
7133 	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
7134 	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
7135 	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
7136 	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
7137 
7138 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
7139 		if (stat_ctl & F_COUNTPAUSESTATRX)
7140 			p->rx_frames_64 -= p->rx_pause;
7141 		if (stat_ctl & F_COUNTPAUSEMCRX)
7142 			p->rx_mcast_frames -= p->rx_pause;
7143 	}
7144 
7145 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
7146 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
7147 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
7148 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
7149 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
7150 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
7151 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
7152 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
7153 
7154 #undef GET_STAT
7155 #undef GET_STAT_COM
7156 }
7157 
7158 /**
7159  *	t4_get_lb_stats - collect loopback port statistics
7160  *	@adap: the adapter
7161  *	@idx: the loopback port index
7162  *	@p: the stats structure to fill
7163  *
7164  *	Return HW statistics for the given loopback port.
7165  */
7166 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
7167 {
7168 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
7169 
7170 #define GET_STAT(name) \
7171 	t4_read_reg64(adap, \
7172 	(is_t4(adap->params.chip) ? \
7173 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
7174 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
7175 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
7176 
7177 	p->octets	= GET_STAT(BYTES);
7178 	p->frames	= GET_STAT(FRAMES);
7179 	p->bcast_frames	= GET_STAT(BCAST);
7180 	p->mcast_frames	= GET_STAT(MCAST);
7181 	p->ucast_frames	= GET_STAT(UCAST);
7182 	p->error_frames	= GET_STAT(ERROR);
7183 
7184 	p->frames_64		= GET_STAT(64B);
7185 	p->frames_65_127	= GET_STAT(65B_127B);
7186 	p->frames_128_255	= GET_STAT(128B_255B);
7187 	p->frames_256_511	= GET_STAT(256B_511B);
7188 	p->frames_512_1023	= GET_STAT(512B_1023B);
7189 	p->frames_1024_1518	= GET_STAT(1024B_1518B);
7190 	p->frames_1519_max	= GET_STAT(1519B_MAX);
7191 	p->drop			= GET_STAT(DROP_FRAMES);
7192 
7193 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
7194 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
7195 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
7196 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
7197 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
7198 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
7199 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
7200 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
7201 
7202 #undef GET_STAT
7203 #undef GET_STAT_COM
7204 }
7205 
7206 /*	t4_mk_filtdelwr - create a delete filter WR
7207  *	@ftid: the filter ID
7208  *	@wr: the filter work request to populate
7209  *	@rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6
7210  *	@qid: ingress queue to receive the delete notification
7211  *
7212  *	Creates a filter work request to delete the supplied filter.  If @qid
7213  *	is negative the delete notification is suppressed.
7214  */
7215 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr,
7216 		     int rqtype, int qid)
7217 {
7218 	memset(wr, 0, sizeof(*wr));
7219 	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
7220 	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
7221 	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
7222 				    V_FW_FILTER_WR_RQTYPE(rqtype) |
7223 				    V_FW_FILTER_WR_NOREPLY(qid < 0));
7224 	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
7225 	if (qid >= 0)
7226 		wr->rx_chan_rx_rpl_iq =
7227 				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
7228 }
7229 
7230 #define INIT_CMD(var, cmd, rd_wr) do { \
7231 	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
7232 					F_FW_CMD_REQUEST | \
7233 					F_FW_CMD_##rd_wr); \
7234 	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
7235 } while (0)
7236 
7237 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
7238 			  u32 addr, u32 val)
7239 {
7240 	u32 ldst_addrspace;
7241 	struct fw_ldst_cmd c;
7242 
7243 	memset(&c, 0, sizeof(c));
7244 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
7245 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7246 					F_FW_CMD_REQUEST |
7247 					F_FW_CMD_WRITE |
7248 					ldst_addrspace);
7249 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7250 	c.u.addrval.addr = cpu_to_be32(addr);
7251 	c.u.addrval.val = cpu_to_be32(val);
7252 
7253 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7254 }
7255 
7256 /**
7257  *	t4_mdio_rd - read a PHY register through MDIO
7258  *	@adap: the adapter
7259  *	@mbox: mailbox to use for the FW command
7260  *	@phy_addr: the PHY address
7261  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
7262  *	@reg: the register to read
7263  *	@valp: where to store the value
7264  *
7265  *	Issues a FW command through the given mailbox to read a PHY register.
7266  */
7267 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7268 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
7269 {
7270 	int ret;
7271 	u32 ldst_addrspace;
7272 	struct fw_ldst_cmd c;
7273 
7274 	memset(&c, 0, sizeof(c));
7275 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7276 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7277 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
7278 					ldst_addrspace);
7279 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7280 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7281 					 V_FW_LDST_CMD_MMD(mmd));
7282 	c.u.mdio.raddr = cpu_to_be16(reg);
7283 
7284 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7285 	if (ret == 0)
7286 		*valp = be16_to_cpu(c.u.mdio.rval);
7287 	return ret;
7288 }
7289 
7290 /**
7291  *	t4_mdio_wr - write a PHY register through MDIO
7292  *	@adap: the adapter
7293  *	@mbox: mailbox to use for the FW command
7294  *	@phy_addr: the PHY address
7295  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
7296  *	@reg: the register to write
7297  *	@valp: value to write
7298  *
7299  *	Issues a FW command through the given mailbox to write a PHY register.
7300  */
7301 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
7302 	       unsigned int mmd, unsigned int reg, unsigned int val)
7303 {
7304 	u32 ldst_addrspace;
7305 	struct fw_ldst_cmd c;
7306 
7307 	memset(&c, 0, sizeof(c));
7308 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
7309 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7310 					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7311 					ldst_addrspace);
7312 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7313 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
7314 					 V_FW_LDST_CMD_MMD(mmd));
7315 	c.u.mdio.raddr = cpu_to_be16(reg);
7316 	c.u.mdio.rval = cpu_to_be16(val);
7317 
7318 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7319 }
7320 
7321 /**
7322  *
7323  *	t4_sge_decode_idma_state - decode the idma state
7324  *	@adap: the adapter
7325  *	@state: the state idma is stuck in
7326  */
7327 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
7328 {
7329 	static const char * const t4_decode[] = {
7330 		"IDMA_IDLE",
7331 		"IDMA_PUSH_MORE_CPL_FIFO",
7332 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7333 		"Not used",
7334 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7335 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7336 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7337 		"IDMA_SEND_FIFO_TO_IMSG",
7338 		"IDMA_FL_REQ_DATA_FL_PREP",
7339 		"IDMA_FL_REQ_DATA_FL",
7340 		"IDMA_FL_DROP",
7341 		"IDMA_FL_H_REQ_HEADER_FL",
7342 		"IDMA_FL_H_SEND_PCIEHDR",
7343 		"IDMA_FL_H_PUSH_CPL_FIFO",
7344 		"IDMA_FL_H_SEND_CPL",
7345 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7346 		"IDMA_FL_H_SEND_IP_HDR",
7347 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7348 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7349 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7350 		"IDMA_FL_D_SEND_PCIEHDR",
7351 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7352 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7353 		"IDMA_FL_SEND_PCIEHDR",
7354 		"IDMA_FL_PUSH_CPL_FIFO",
7355 		"IDMA_FL_SEND_CPL",
7356 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7357 		"IDMA_FL_SEND_PAYLOAD",
7358 		"IDMA_FL_REQ_NEXT_DATA_FL",
7359 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7360 		"IDMA_FL_SEND_PADDING",
7361 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7362 		"IDMA_FL_SEND_FIFO_TO_IMSG",
7363 		"IDMA_FL_REQ_DATAFL_DONE",
7364 		"IDMA_FL_REQ_HEADERFL_DONE",
7365 	};
7366 	static const char * const t5_decode[] = {
7367 		"IDMA_IDLE",
7368 		"IDMA_ALMOST_IDLE",
7369 		"IDMA_PUSH_MORE_CPL_FIFO",
7370 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7371 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7372 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7373 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7374 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7375 		"IDMA_SEND_FIFO_TO_IMSG",
7376 		"IDMA_FL_REQ_DATA_FL",
7377 		"IDMA_FL_DROP",
7378 		"IDMA_FL_DROP_SEND_INC",
7379 		"IDMA_FL_H_REQ_HEADER_FL",
7380 		"IDMA_FL_H_SEND_PCIEHDR",
7381 		"IDMA_FL_H_PUSH_CPL_FIFO",
7382 		"IDMA_FL_H_SEND_CPL",
7383 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7384 		"IDMA_FL_H_SEND_IP_HDR",
7385 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7386 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7387 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7388 		"IDMA_FL_D_SEND_PCIEHDR",
7389 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7390 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7391 		"IDMA_FL_SEND_PCIEHDR",
7392 		"IDMA_FL_PUSH_CPL_FIFO",
7393 		"IDMA_FL_SEND_CPL",
7394 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7395 		"IDMA_FL_SEND_PAYLOAD",
7396 		"IDMA_FL_REQ_NEXT_DATA_FL",
7397 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7398 		"IDMA_FL_SEND_PADDING",
7399 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7400 	};
7401 	static const char * const t6_decode[] = {
7402 		"IDMA_IDLE",
7403 		"IDMA_PUSH_MORE_CPL_FIFO",
7404 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7405 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7406 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7407 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7408 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7409 		"IDMA_FL_REQ_DATA_FL",
7410 		"IDMA_FL_DROP",
7411 		"IDMA_FL_DROP_SEND_INC",
7412 		"IDMA_FL_H_REQ_HEADER_FL",
7413 		"IDMA_FL_H_SEND_PCIEHDR",
7414 		"IDMA_FL_H_PUSH_CPL_FIFO",
7415 		"IDMA_FL_H_SEND_CPL",
7416 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7417 		"IDMA_FL_H_SEND_IP_HDR",
7418 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7419 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7420 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7421 		"IDMA_FL_D_SEND_PCIEHDR",
7422 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7423 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7424 		"IDMA_FL_SEND_PCIEHDR",
7425 		"IDMA_FL_PUSH_CPL_FIFO",
7426 		"IDMA_FL_SEND_CPL",
7427 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7428 		"IDMA_FL_SEND_PAYLOAD",
7429 		"IDMA_FL_REQ_NEXT_DATA_FL",
7430 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7431 		"IDMA_FL_SEND_PADDING",
7432 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7433 	};
7434 	static const u32 sge_regs[] = {
7435 		A_SGE_DEBUG_DATA_LOW_INDEX_2,
7436 		A_SGE_DEBUG_DATA_LOW_INDEX_3,
7437 		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7438 	};
7439 	const char **sge_idma_decode;
7440 	int sge_idma_decode_nstates;
7441 	int i;
7442 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
7443 
7444 	/* Select the right set of decode strings to dump depending on the
7445 	 * adapter chip type.
7446 	 */
7447 	switch (chip_version) {
7448 	case CHELSIO_T4:
7449 		sge_idma_decode = (const char **)t4_decode;
7450 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7451 		break;
7452 
7453 	case CHELSIO_T5:
7454 		sge_idma_decode = (const char **)t5_decode;
7455 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7456 		break;
7457 
7458 	case CHELSIO_T6:
7459 		sge_idma_decode = (const char **)t6_decode;
7460 		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7461 		break;
7462 
7463 	default:
7464 		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
7465 		return;
7466 	}
7467 
7468 	if (state < sge_idma_decode_nstates)
7469 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7470 	else
7471 		CH_WARN(adapter, "idma state %d unknown\n", state);
7472 
7473 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7474 		CH_WARN(adapter, "SGE register %#x value %#x\n",
7475 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7476 }
7477 
7478 /**
7479  *      t4_sge_ctxt_flush - flush the SGE context cache
7480  *      @adap: the adapter
7481  *      @mbox: mailbox to use for the FW command
7482  *
7483  *      Issues a FW command through the given mailbox to flush the
7484  *      SGE context cache.
7485  */
7486 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
7487 {
7488 	int ret;
7489 	u32 ldst_addrspace;
7490 	struct fw_ldst_cmd c;
7491 
7492 	memset(&c, 0, sizeof(c));
7493 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ?
7494 						 FW_LDST_ADDRSPC_SGE_EGRC :
7495 						 FW_LDST_ADDRSPC_SGE_INGC);
7496 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7497 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
7498 					ldst_addrspace);
7499 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7500 	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7501 
7502 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7503 	return ret;
7504 }
7505 
7506 /**
7507  *	t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values
7508  *	@adap - the adapter
7509  *	@ndbqtimers: size of the provided SGE Doorbell Queue Timer table
7510  *	@dbqtimers: SGE Doorbell Queue Timer table
7511  *
7512  *	Reads the SGE Doorbell Queue Timer values into the provided table.
7513  *	Returns 0 on success (Firmware and Hardware support this feature),
7514  *	an error on failure.
7515  */
7516 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
7517 			  u16 *dbqtimers)
7518 {
7519 	int ret, dbqtimerix;
7520 
7521 	ret = 0;
7522 	dbqtimerix = 0;
7523 	while (dbqtimerix < ndbqtimers) {
7524 		int nparams, param;
7525 		u32 params[7], vals[7];
7526 
7527 		nparams = ndbqtimers - dbqtimerix;
7528 		if (nparams > ARRAY_SIZE(params))
7529 			nparams = ARRAY_SIZE(params);
7530 
7531 		for (param = 0; param < nparams; param++)
7532 			params[param] =
7533 			  (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
7534 			   V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
7535 			   V_FW_PARAMS_PARAM_Y(dbqtimerix + param));
7536 		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
7537 				      nparams, params, vals);
7538 		if (ret)
7539 			break;
7540 
7541 		for (param = 0; param < nparams; param++)
7542 			dbqtimers[dbqtimerix++] = vals[param];
7543 	}
7544 	return ret;
7545 }
7546 
7547 /**
7548  *      t4_fw_hello - establish communication with FW
7549  *      @adap: the adapter
7550  *      @mbox: mailbox to use for the FW command
7551  *      @evt_mbox: mailbox to receive async FW events
7552  *      @master: specifies the caller's willingness to be the device master
7553  *	@state: returns the current device state (if non-NULL)
7554  *
7555  *	Issues a command to establish communication with FW.  Returns either
7556  *	an error (negative integer) or the mailbox of the Master PF.
7557  */
7558 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7559 		enum dev_master master, enum dev_state *state)
7560 {
7561 	int ret;
7562 	struct fw_hello_cmd c;
7563 	u32 v;
7564 	unsigned int master_mbox;
7565 	int retries = FW_CMD_HELLO_RETRIES;
7566 
7567 retry:
7568 	memset(&c, 0, sizeof(c));
7569 	INIT_CMD(c, HELLO, WRITE);
7570 	c.err_to_clearinit = cpu_to_be32(
7571 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7572 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7573 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7574 					mbox : M_FW_HELLO_CMD_MBMASTER) |
7575 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7576 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7577 		F_FW_HELLO_CMD_CLEARINIT);
7578 
7579 	/*
7580 	 * Issue the HELLO command to the firmware.  If it's not successful
7581 	 * but indicates that we got a "busy" or "timeout" condition, retry
7582 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
7583 	 * retry limit, check to see if the firmware left us any error
7584 	 * information and report that if so ...
7585 	 */
7586 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7587 	if (ret != FW_SUCCESS) {
7588 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7589 			goto retry;
7590 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7591 			t4_report_fw_error(adap);
7592 		return ret;
7593 	}
7594 
7595 	v = be32_to_cpu(c.err_to_clearinit);
7596 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7597 	if (state) {
7598 		if (v & F_FW_HELLO_CMD_ERR)
7599 			*state = DEV_STATE_ERR;
7600 		else if (v & F_FW_HELLO_CMD_INIT)
7601 			*state = DEV_STATE_INIT;
7602 		else
7603 			*state = DEV_STATE_UNINIT;
7604 	}
7605 
7606 	/*
7607 	 * If we're not the Master PF then we need to wait around for the
7608 	 * Master PF Driver to finish setting up the adapter.
7609 	 *
7610 	 * Note that we also do this wait if we're a non-Master-capable PF and
7611 	 * there is no current Master PF; a Master PF may show up momentarily
7612 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
7613 	 * OS loads lots of different drivers rapidly at the same time).  In
7614 	 * this case, the Master PF returned by the firmware will be
7615 	 * M_PCIE_FW_MASTER so the test below will work ...
7616 	 */
7617 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7618 	    master_mbox != mbox) {
7619 		int waiting = FW_CMD_HELLO_TIMEOUT;
7620 
7621 		/*
7622 		 * Wait for the firmware to either indicate an error or
7623 		 * initialized state.  If we see either of these we bail out
7624 		 * and report the issue to the caller.  If we exhaust the
7625 		 * "hello timeout" and we haven't exhausted our retries, try
7626 		 * again.  Otherwise bail with a timeout error.
7627 		 */
7628 		for (;;) {
7629 			u32 pcie_fw;
7630 
7631 			msleep(50);
7632 			waiting -= 50;
7633 
7634 			/*
7635 			 * If neither Error nor Initialialized are indicated
7636 			 * by the firmware keep waiting till we exaust our
7637 			 * timeout ... and then retry if we haven't exhausted
7638 			 * our retries ...
7639 			 */
7640 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7641 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7642 				if (waiting <= 0) {
7643 					if (retries-- > 0)
7644 						goto retry;
7645 
7646 					return -ETIMEDOUT;
7647 				}
7648 				continue;
7649 			}
7650 
7651 			/*
7652 			 * We either have an Error or Initialized condition
7653 			 * report errors preferentially.
7654 			 */
7655 			if (state) {
7656 				if (pcie_fw & F_PCIE_FW_ERR)
7657 					*state = DEV_STATE_ERR;
7658 				else if (pcie_fw & F_PCIE_FW_INIT)
7659 					*state = DEV_STATE_INIT;
7660 			}
7661 
7662 			/*
7663 			 * If we arrived before a Master PF was selected and
7664 			 * there's not a valid Master PF, grab its identity
7665 			 * for our caller.
7666 			 */
7667 			if (master_mbox == M_PCIE_FW_MASTER &&
7668 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
7669 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7670 			break;
7671 		}
7672 	}
7673 
7674 	return master_mbox;
7675 }
7676 
7677 /**
7678  *	t4_fw_bye - end communication with FW
7679  *	@adap: the adapter
7680  *	@mbox: mailbox to use for the FW command
7681  *
7682  *	Issues a command to terminate communication with FW.
7683  */
7684 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7685 {
7686 	struct fw_bye_cmd c;
7687 
7688 	memset(&c, 0, sizeof(c));
7689 	INIT_CMD(c, BYE, WRITE);
7690 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7691 }
7692 
7693 /**
7694  *	t4_fw_reset - issue a reset to FW
7695  *	@adap: the adapter
7696  *	@mbox: mailbox to use for the FW command
7697  *	@reset: specifies the type of reset to perform
7698  *
7699  *	Issues a reset command of the specified type to FW.
7700  */
7701 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7702 {
7703 	struct fw_reset_cmd c;
7704 
7705 	memset(&c, 0, sizeof(c));
7706 	INIT_CMD(c, RESET, WRITE);
7707 	c.val = cpu_to_be32(reset);
7708 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7709 }
7710 
7711 /**
7712  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7713  *	@adap: the adapter
7714  *	@mbox: mailbox to use for the FW RESET command (if desired)
7715  *	@force: force uP into RESET even if FW RESET command fails
7716  *
7717  *	Issues a RESET command to firmware (if desired) with a HALT indication
7718  *	and then puts the microprocessor into RESET state.  The RESET command
7719  *	will only be issued if a legitimate mailbox is provided (mbox <=
7720  *	M_PCIE_FW_MASTER).
7721  *
7722  *	This is generally used in order for the host to safely manipulate the
7723  *	adapter without fear of conflicting with whatever the firmware might
7724  *	be doing.  The only way out of this state is to RESTART the firmware
7725  *	...
7726  */
7727 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7728 {
7729 	int ret = 0;
7730 
7731 	/*
7732 	 * If a legitimate mailbox is provided, issue a RESET command
7733 	 * with a HALT indication.
7734 	 */
7735 	if (mbox <= M_PCIE_FW_MASTER) {
7736 		struct fw_reset_cmd c;
7737 
7738 		memset(&c, 0, sizeof(c));
7739 		INIT_CMD(c, RESET, WRITE);
7740 		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7741 		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7742 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7743 	}
7744 
7745 	/*
7746 	 * Normally we won't complete the operation if the firmware RESET
7747 	 * command fails but if our caller insists we'll go ahead and put the
7748 	 * uP into RESET.  This can be useful if the firmware is hung or even
7749 	 * missing ...  We'll have to take the risk of putting the uP into
7750 	 * RESET without the cooperation of firmware in that case.
7751 	 *
7752 	 * We also force the firmware's HALT flag to be on in case we bypassed
7753 	 * the firmware RESET command above or we're dealing with old firmware
7754 	 * which doesn't have the HALT capability.  This will serve as a flag
7755 	 * for the incoming firmware to know that it's coming out of a HALT
7756 	 * rather than a RESET ... if it's new enough to understand that ...
7757 	 */
7758 	if (ret == 0 || force) {
7759 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7760 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7761 				 F_PCIE_FW_HALT);
7762 	}
7763 
7764 	/*
7765 	 * And we always return the result of the firmware RESET command
7766 	 * even when we force the uP into RESET ...
7767 	 */
7768 	return ret;
7769 }
7770 
7771 /**
7772  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
7773  *	@adap: the adapter
7774  *	@reset: if we want to do a RESET to restart things
7775  *
7776  *	Restart firmware previously halted by t4_fw_halt().  On successful
7777  *	return the previous PF Master remains as the new PF Master and there
7778  *	is no need to issue a new HELLO command, etc.
7779  *
7780  *	We do this in two ways:
7781  *
7782  *	 1. If we're dealing with newer firmware we'll simply want to take
7783  *	    the chip's microprocessor out of RESET.  This will cause the
7784  *	    firmware to start up from its start vector.  And then we'll loop
7785  *	    until the firmware indicates it's started again (PCIE_FW.HALT
7786  *	    reset to 0) or we timeout.
7787  *
7788  *	 2. If we're dealing with older firmware then we'll need to RESET
7789  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
7790  *	    flag and automatically RESET itself on startup.
7791  */
7792 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7793 {
7794 	if (reset) {
7795 		/*
7796 		 * Since we're directing the RESET instead of the firmware
7797 		 * doing it automatically, we need to clear the PCIE_FW.HALT
7798 		 * bit.
7799 		 */
7800 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
7801 
7802 		/*
7803 		 * If we've been given a valid mailbox, first try to get the
7804 		 * firmware to do the RESET.  If that works, great and we can
7805 		 * return success.  Otherwise, if we haven't been given a
7806 		 * valid mailbox or the RESET command failed, fall back to
7807 		 * hitting the chip with a hammer.
7808 		 */
7809 		if (mbox <= M_PCIE_FW_MASTER) {
7810 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7811 			msleep(100);
7812 			if (t4_fw_reset(adap, mbox,
7813 					F_PIORST | F_PIORSTMODE) == 0)
7814 				return 0;
7815 		}
7816 
7817 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
7818 		msleep(2000);
7819 	} else {
7820 		int ms;
7821 
7822 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7823 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7824 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7825 				return FW_SUCCESS;
7826 			msleep(100);
7827 			ms += 100;
7828 		}
7829 		return -ETIMEDOUT;
7830 	}
7831 	return 0;
7832 }
7833 
7834 /**
7835  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7836  *	@adap: the adapter
7837  *	@mbox: mailbox to use for the FW RESET command (if desired)
7838  *	@fw_data: the firmware image to write
7839  *	@size: image size
7840  *	@force: force upgrade even if firmware doesn't cooperate
7841  *
7842  *	Perform all of the steps necessary for upgrading an adapter's
7843  *	firmware image.  Normally this requires the cooperation of the
7844  *	existing firmware in order to halt all existing activities
7845  *	but if an invalid mailbox token is passed in we skip that step
7846  *	(though we'll still put the adapter microprocessor into RESET in
7847  *	that case).
7848  *
7849  *	On successful return the new firmware will have been loaded and
7850  *	the adapter will have been fully RESET losing all previous setup
7851  *	state.  On unsuccessful return the adapter may be completely hosed ...
7852  *	positive errno indicates that the adapter is ~probably~ intact, a
7853  *	negative errno indicates that things are looking bad ...
7854  */
7855 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7856 		  const u8 *fw_data, unsigned int size, int force)
7857 {
7858 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7859 	unsigned int bootstrap =
7860 	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7861 	int reset, ret;
7862 
7863 	if (!t4_fw_matches_chip(adap, fw_hdr))
7864 		return -EINVAL;
7865 
7866 	/* Disable FW_OK flags so that mbox commands with FW_OK flags check
7867 	 * wont be send when we are flashing FW.
7868 	 */
7869 	adap->flags &= ~FW_OK;
7870 
7871 	if (!bootstrap) {
7872 		ret = t4_fw_halt(adap, mbox, force);
7873 		if (ret < 0 && !force)
7874 			goto out;
7875 	}
7876 
7877 	ret = t4_load_fw(adap, fw_data, size, bootstrap);
7878 	if (ret < 0 || bootstrap)
7879 		goto out;
7880 
7881 	/*
7882 	 * If there was a Firmware Configuration File staored in FLASH,
7883 	 * there's a good chance that it won't be compatible with the new
7884 	 * Firmware.  In order to prevent difficult to diagnose adapter
7885 	 * initialization issues, we clear out the Firmware Configuration File
7886 	 * portion of the FLASH .  The user will need to re-FLASH a new
7887 	 * Firmware Configuration File which is compatible with the new
7888 	 * Firmware if that's desired.
7889 	 */
7890 	(void)t4_load_cfg(adap, NULL, 0);
7891 
7892 	/*
7893 	 * Older versions of the firmware don't understand the new
7894 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7895 	 * restart.  So for newly loaded older firmware we'll have to do the
7896 	 * RESET for it so it starts up on a clean slate.  We can tell if
7897 	 * the newly loaded firmware will handle this right by checking
7898 	 * its header flags to see if it advertises the capability.
7899 	 */
7900 	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7901 	ret = t4_fw_restart(adap, mbox, reset);
7902 
7903 	/* Grab potentially new Firmware Device Log parameters so we can see
7904 	 * how helthy the new Firmware is.  It's okay to contact the new
7905 	 * Firmware for these parameters even though, as far as it's
7906 	 * concerned, we've never said "HELLO" to it ...
7907 	 */
7908 	(void)t4_init_devlog_params(adap, 1);
7909 
7910 out:
7911 	adap->flags |= FW_OK;
7912 	return ret;
7913 }
7914 
7915 /**
7916  *	t4_fl_pkt_align - return the fl packet alignment
7917  *	@adap: the adapter
7918  *	is_packed: True when the driver uses packed FLM mode
7919  *
7920  *	T4 has a single field to specify the packing and padding boundary.
7921  *	T5 onwards has separate fields for this and hence the alignment for
7922  *	next packet offset is maximum of these two.
7923  *
7924  */
7925 int t4_fl_pkt_align(struct adapter *adap, bool is_packed)
7926 {
7927 	u32 sge_control, sge_control2;
7928 	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7929 
7930 	sge_control = t4_read_reg(adap, A_SGE_CONTROL);
7931 
7932 	/* T4 uses a single control field to specify both the PCIe Padding and
7933 	 * Packing Boundary.  T5 introduced the ability to specify these
7934 	 * separately.  The actual Ingress Packet Data alignment boundary
7935 	 * within Packed Buffer Mode is the maximum of these two
7936 	 * specifications.  (Note that it makes no real practical sense to
7937 	 * have the Pading Boudary be larger than the Packing Boundary but you
7938 	 * could set the chip up that way and, in fact, legacy T4 code would
7939 	 * end doing this because it would initialize the Padding Boundary and
7940 	 * leave the Packing Boundary initialized to 0 (16 bytes).)
7941 	 * Padding Boundary values in T6 starts from 8B,
7942 	 * where as it is 32B for T4 and T5.
7943 	 */
7944 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7945 		ingpad_shift = X_INGPADBOUNDARY_SHIFT;
7946 	else
7947 		ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
7948 
7949 	ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
7950 
7951 	fl_align = ingpadboundary;
7952 	if (!is_t4(adap->params.chip) && is_packed) {
7953 		/* T5 has a weird interpretation of one of the PCIe Packing
7954 		 * Boundary values.  No idea why ...
7955 		 */
7956 		sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
7957 		ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
7958 		if (ingpackboundary == X_INGPACKBOUNDARY_16B)
7959 			ingpackboundary = 16;
7960 		else
7961 			ingpackboundary = 1 << (ingpackboundary +
7962 						X_INGPACKBOUNDARY_SHIFT);
7963 
7964 		fl_align = max(ingpadboundary, ingpackboundary);
7965 	}
7966 	return fl_align;
7967 }
7968 
7969 /**
7970  *	t4_fixup_host_params_compat - fix up host-dependent parameters
7971  *	@adap: the adapter
7972  *	@page_size: the host's Base Page Size
7973  *	@cache_line_size: the host's Cache Line Size
7974  *	@chip_compat: maintain compatibility with designated chip
7975  *
7976  *	Various registers in the chip contain values which are dependent on the
7977  *	host's Base Page and Cache Line Sizes.  This function will fix all of
7978  *	those registers with the appropriate values as passed in ...
7979  *
7980  *	@chip_compat is used to limit the set of changes that are made
7981  *	to be compatible with the indicated chip release.  This is used by
7982  *	drivers to maintain compatibility with chip register settings when
7983  *	the drivers haven't [yet] been updated with new chip support.
7984  */
7985 int t4_fixup_host_params_compat(struct adapter *adap,
7986 				unsigned int page_size,
7987 				unsigned int cache_line_size,
7988 				enum chip_type chip_compat)
7989 {
7990 	unsigned int page_shift = fls(page_size) - 1;
7991 	unsigned int sge_hps = page_shift - 10;
7992 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7993 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7994 	unsigned int fl_align_log = fls(fl_align) - 1;
7995 
7996 	t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
7997 		     V_HOSTPAGESIZEPF0(sge_hps) |
7998 		     V_HOSTPAGESIZEPF1(sge_hps) |
7999 		     V_HOSTPAGESIZEPF2(sge_hps) |
8000 		     V_HOSTPAGESIZEPF3(sge_hps) |
8001 		     V_HOSTPAGESIZEPF4(sge_hps) |
8002 		     V_HOSTPAGESIZEPF5(sge_hps) |
8003 		     V_HOSTPAGESIZEPF6(sge_hps) |
8004 		     V_HOSTPAGESIZEPF7(sge_hps));
8005 
8006 	if (is_t4(adap->params.chip) || is_t4(chip_compat)) {
8007 		t4_set_reg_field(adap, A_SGE_CONTROL,
8008 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
8009 				 F_EGRSTATUSPAGESIZE,
8010 				 V_INGPADBOUNDARY(fl_align_log -
8011 						  X_INGPADBOUNDARY_SHIFT) |
8012 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
8013 	} else {
8014 		unsigned int pack_align;
8015 		unsigned int ingpad, ingpack;
8016 		unsigned int pcie_cap;
8017 
8018 		/* T5 introduced the separation of the Free List Padding and
8019 		 * Packing Boundaries.  Thus, we can select a smaller Padding
8020 		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
8021 		 * Bandwidth, and use a Packing Boundary which is large enough
8022 		 * to avoid false sharing between CPUs, etc.
8023 		 *
8024 		 * For the PCI Link, the smaller the Padding Boundary the
8025 		 * better.  For the Memory Controller, a smaller Padding
8026 		 * Boundary is better until we cross under the Memory Line
8027 		 * Size (the minimum unit of transfer to/from Memory).  If we
8028 		 * have a Padding Boundary which is smaller than the Memory
8029 		 * Line Size, that'll involve a Read-Modify-Write cycle on the
8030 		 * Memory Controller which is never good.
8031 		 */
8032 
8033 		/* We want the Packing Boundary to be based on the Cache Line
8034 		 * Size in order to help avoid False Sharing performance
8035 		 * issues between CPUs, etc.  We also want the Packing
8036 		 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
8037 		 * get best performance when the Packing Boundary is a
8038 		 * multiple of the Maximum Payload Size.
8039 		 */
8040 		pack_align = fl_align;
8041 		pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
8042 		if (pcie_cap) {
8043 			unsigned int mps, mps_log;
8044 			u16 devctl;
8045 
8046 			/*
8047 			 * The PCIe Device Control Maximum Payload Size field
8048 			 * [bits 7:5] encodes sizes as powers of 2 starting at
8049 			 * 128 bytes.
8050 			 */
8051 			t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
8052 					    &devctl);
8053 			mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
8054 			mps = 1 << mps_log;
8055 			if (mps > pack_align)
8056 				pack_align = mps;
8057 		}
8058 
8059 		/* N.B. T5/T6 have a crazy special interpretation of the "0"
8060 		 * value for the Packing Boundary.  This corresponds to 16
8061 		 * bytes instead of the expected 32 bytes.  So if we want 32
8062 		 * bytes, the best we can really do is 64 bytes ...
8063 		 */
8064 		if (pack_align <= 16) {
8065 			ingpack = X_INGPACKBOUNDARY_16B;
8066 			fl_align = 16;
8067 		} else if (pack_align == 32) {
8068 			ingpack = X_INGPACKBOUNDARY_64B;
8069 			fl_align = 64;
8070 		} else {
8071 			unsigned int pack_align_log = fls(pack_align) - 1;
8072 			ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
8073 			fl_align = pack_align;
8074 		}
8075 
8076 		/* Use the smallest Ingress Padding which isn't smaller than
8077 		 * the Memory Controller Read/Write Size.  We'll take that as
8078 		 * being 8 bytes since we don't know of any system with a
8079 		 * wider Memory Controller Bus Width.
8080 		 */
8081 		if (is_t5(adap->params.chip))
8082 			ingpad = X_INGPADBOUNDARY_32B;
8083 		else
8084 			ingpad = X_T6_INGPADBOUNDARY_8B;
8085 
8086 		t4_set_reg_field(adap, A_SGE_CONTROL,
8087 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
8088 				 F_EGRSTATUSPAGESIZE,
8089 				 V_INGPADBOUNDARY(ingpad) |
8090 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
8091 		t4_set_reg_field(adap, A_SGE_CONTROL2,
8092 				 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
8093 				 V_INGPACKBOUNDARY(ingpack));
8094 	}
8095 	/*
8096 	 * Adjust various SGE Free List Host Buffer Sizes.
8097 	 *
8098 	 * This is something of a crock since we're using fixed indices into
8099 	 * the array which are also known by the sge.c code and the T4
8100 	 * Firmware Configuration File.  We need to come up with a much better
8101 	 * approach to managing this array.  For now, the first four entries
8102 	 * are:
8103 	 *
8104 	 *   0: Host Page Size
8105 	 *   1: 64KB
8106 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
8107 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
8108 	 *
8109 	 * For the single-MTU buffers in unpacked mode we need to include
8110 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
8111 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
8112 	 * Padding boundary.  All of these are accommodated in the Factory
8113 	 * Default Firmware Configuration File but we need to adjust it for
8114 	 * this host's cache line size.
8115 	 */
8116 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
8117 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
8118 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1)
8119 		     & ~(fl_align-1));
8120 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
8121 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1)
8122 		     & ~(fl_align-1));
8123 
8124 	t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
8125 
8126 	return 0;
8127 }
8128 
8129 /**
8130  *	t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
8131  *	@adap: the adapter
8132  *	@page_size: the host's Base Page Size
8133  *	@cache_line_size: the host's Cache Line Size
8134  *
8135  *	Various registers in T4 contain values which are dependent on the
8136  *	host's Base Page and Cache Line Sizes.  This function will fix all of
8137  *	those registers with the appropriate values as passed in ...
8138  *
8139  *	This routine makes changes which are compatible with T4 chips.
8140  */
8141 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
8142 			 unsigned int cache_line_size)
8143 {
8144 	return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
8145 					   T4_LAST_REV);
8146 }
8147 
8148 /**
8149  *	t4_fw_initialize - ask FW to initialize the device
8150  *	@adap: the adapter
8151  *	@mbox: mailbox to use for the FW command
8152  *
8153  *	Issues a command to FW to partially initialize the device.  This
8154  *	performs initialization that generally doesn't depend on user input.
8155  */
8156 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
8157 {
8158 	struct fw_initialize_cmd c;
8159 
8160 	memset(&c, 0, sizeof(c));
8161 	INIT_CMD(c, INITIALIZE, WRITE);
8162 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8163 }
8164 
8165 /**
8166  *	t4_query_params_rw - query FW or device parameters
8167  *	@adap: the adapter
8168  *	@mbox: mailbox to use for the FW command
8169  *	@pf: the PF
8170  *	@vf: the VF
8171  *	@nparams: the number of parameters
8172  *	@params: the parameter names
8173  *	@val: the parameter values
8174  *	@rw: Write and read flag
8175  *	@sleep_ok: if true, we may sleep awaiting mbox cmd completion
8176  *
8177  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
8178  *	queried at once.
8179  */
8180 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
8181 		       unsigned int vf, unsigned int nparams, const u32 *params,
8182 		       u32 *val, int rw, bool sleep_ok)
8183 {
8184 	int i, ret;
8185 	struct fw_params_cmd c;
8186 	__be32 *p = &c.param[0].mnem;
8187 
8188 	if (nparams > 7)
8189 		return -EINVAL;
8190 
8191 	memset(&c, 0, sizeof(c));
8192 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
8193 				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
8194 				  V_FW_PARAMS_CMD_PFN(pf) |
8195 				  V_FW_PARAMS_CMD_VFN(vf));
8196 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8197 
8198 	for (i = 0; i < nparams; i++) {
8199 		*p++ = cpu_to_be32(*params++);
8200 		if (rw)
8201 			*p = cpu_to_be32(*(val + i));
8202 		p++;
8203 	}
8204 
8205 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8206 
8207 	/*
8208 	 * We always copy back the reults, even if there's an error.  We'll
8209 	 * get an error if any of the parameters was unknown to the Firmware,
8210 	 * but there will be results for the others ...  (Older Firmware
8211 	 * stopped at the first unknown parameter; newer Firmware processes
8212 	 * them all and flags the unknown parameters with a return value of
8213 	 * ~0UL.)
8214 	 */
8215 	for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
8216 		*val++ = be32_to_cpu(*p);
8217 
8218 	return ret;
8219 }
8220 
8221 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
8222 		    unsigned int vf, unsigned int nparams, const u32 *params,
8223 		    u32 *val)
8224 {
8225 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
8226 				  true);
8227 }
8228 
8229 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
8230 		    unsigned int vf, unsigned int nparams, const u32 *params,
8231 		    u32 *val)
8232 {
8233 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
8234 				  false);
8235 }
8236 
8237 /**
8238  *      t4_set_params_timeout - sets FW or device parameters
8239  *      @adap: the adapter
8240  *      @mbox: mailbox to use for the FW command
8241  *      @pf: the PF
8242  *      @vf: the VF
8243  *      @nparams: the number of parameters
8244  *      @params: the parameter names
8245  *      @val: the parameter values
8246  *      @timeout: the timeout time
8247  *
8248  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
8249  *      specified at once.
8250  */
8251 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
8252 			  unsigned int pf, unsigned int vf,
8253 			  unsigned int nparams, const u32 *params,
8254 			  const u32 *val, int timeout)
8255 {
8256 	struct fw_params_cmd c;
8257 	__be32 *p = &c.param[0].mnem;
8258 
8259 	if (nparams > 7)
8260 		return -EINVAL;
8261 
8262 	memset(&c, 0, sizeof(c));
8263 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
8264 				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8265 				  V_FW_PARAMS_CMD_PFN(pf) |
8266 				  V_FW_PARAMS_CMD_VFN(vf));
8267 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8268 
8269 	while (nparams--) {
8270 		*p++ = cpu_to_be32(*params++);
8271 		*p++ = cpu_to_be32(*val++);
8272 	}
8273 
8274 	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
8275 }
8276 
8277 /**
8278  *	t4_set_params - sets FW or device parameters
8279  *	@adap: the adapter
8280  *	@mbox: mailbox to use for the FW command
8281  *	@pf: the PF
8282  *	@vf: the VF
8283  *	@nparams: the number of parameters
8284  *	@params: the parameter names
8285  *	@val: the parameter values
8286  *
8287  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
8288  *	specified at once.
8289  */
8290 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
8291 		  unsigned int vf, unsigned int nparams, const u32 *params,
8292 		  const u32 *val)
8293 {
8294 	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
8295 				     FW_CMD_MAX_TIMEOUT);
8296 }
8297 
8298 /**
8299  *	t4_cfg_pfvf - configure PF/VF resource limits
8300  *	@adap: the adapter
8301  *	@mbox: mailbox to use for the FW command
8302  *	@pf: the PF being configured
8303  *	@vf: the VF being configured
8304  *	@txq: the max number of egress queues
8305  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
8306  *	@rxqi: the max number of interrupt-capable ingress queues
8307  *	@rxq: the max number of interruptless ingress queues
8308  *	@tc: the PCI traffic class
8309  *	@vi: the max number of virtual interfaces
8310  *	@cmask: the channel access rights mask for the PF/VF
8311  *	@pmask: the port access rights mask for the PF/VF
8312  *	@nexact: the maximum number of exact MPS filters
8313  *	@rcaps: read capabilities
8314  *	@wxcaps: write/execute capabilities
8315  *
8316  *	Configures resource limits and capabilities for a physical or virtual
8317  *	function.
8318  */
8319 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
8320 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
8321 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
8322 		unsigned int vi, unsigned int cmask, unsigned int pmask,
8323 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
8324 {
8325 	struct fw_pfvf_cmd c;
8326 
8327 	memset(&c, 0, sizeof(c));
8328 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
8329 				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
8330 				  V_FW_PFVF_CMD_VFN(vf));
8331 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8332 	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
8333 				     V_FW_PFVF_CMD_NIQ(rxq));
8334 	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
8335 				    V_FW_PFVF_CMD_PMASK(pmask) |
8336 				    V_FW_PFVF_CMD_NEQ(txq));
8337 	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
8338 				      V_FW_PFVF_CMD_NVI(vi) |
8339 				      V_FW_PFVF_CMD_NEXACTF(nexact));
8340 	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
8341 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
8342 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
8343 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8344 }
8345 
8346 /**
8347  *	t4_alloc_vi_func - allocate a virtual interface
8348  *	@adap: the adapter
8349  *	@mbox: mailbox to use for the FW command
8350  *	@port: physical port associated with the VI
8351  *	@pf: the PF owning the VI
8352  *	@vf: the VF owning the VI
8353  *	@nmac: number of MAC addresses needed (1 to 5)
8354  *	@mac: the MAC addresses of the VI
8355  *	@rss_size: size of RSS table slice associated with this VI
8356  *	@portfunc: which Port Application Function MAC Address is desired
8357  *	@idstype: Intrusion Detection Type
8358  *
8359  *	Allocates a virtual interface for the given physical port.  If @mac is
8360  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
8361  *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
8362  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
8363  *	stored consecutively so the space needed is @nmac * 6 bytes.
8364  *	Returns a negative error number or the non-negative VI id.
8365  */
8366 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
8367 		     unsigned int port, unsigned int pf, unsigned int vf,
8368 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
8369 		     u8 *vivld, u8 *vin,
8370 		     unsigned int portfunc, unsigned int idstype)
8371 {
8372 	int ret;
8373 	struct fw_vi_cmd c;
8374 
8375 	memset(&c, 0, sizeof(c));
8376 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
8377 				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
8378 				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
8379 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
8380 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
8381 				     V_FW_VI_CMD_FUNC(portfunc));
8382 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
8383 	c.nmac = nmac - 1;
8384 	if(!rss_size)
8385 		c.norss_rsssize = F_FW_VI_CMD_NORSS;
8386 
8387 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8388 	if (ret)
8389 		return ret;
8390 
8391 	if (mac) {
8392 		memcpy(mac, c.mac, sizeof(c.mac));
8393 		switch (nmac) {
8394 		case 5:
8395 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
8396 			/* FALLTHRU */
8397 		case 4:
8398 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
8399 			/* FALLTHRU */
8400 		case 3:
8401 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
8402 			/* FALLTHRU */
8403 		case 2:
8404 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
8405 		}
8406 	}
8407 	if (rss_size)
8408 		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
8409 
8410 	if (vivld)
8411 		*vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
8412 
8413 	if (vin)
8414 		*vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
8415 
8416 	return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
8417 }
8418 
8419 /**
8420  *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
8421  *      @adap: the adapter
8422  *      @mbox: mailbox to use for the FW command
8423  *      @port: physical port associated with the VI
8424  *      @pf: the PF owning the VI
8425  *      @vf: the VF owning the VI
8426  *      @nmac: number of MAC addresses needed (1 to 5)
8427  *      @mac: the MAC addresses of the VI
8428  *      @rss_size: size of RSS table slice associated with this VI
8429  *
8430  *	backwards compatible and convieniance routine to allocate a Virtual
8431  *	Interface with a Ethernet Port Application Function and Intrustion
8432  *	Detection System disabled.
8433  */
8434 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
8435 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
8436 		unsigned int *rss_size, u8 *vivld, u8 *vin)
8437 {
8438 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
8439 				vivld, vin, FW_VI_FUNC_ETH, 0);
8440 }
8441 
8442 
8443 /**
8444  * 	t4_free_vi - free a virtual interface
8445  * 	@adap: the adapter
8446  * 	@mbox: mailbox to use for the FW command
8447  * 	@pf: the PF owning the VI
8448  * 	@vf: the VF owning the VI
8449  * 	@viid: virtual interface identifiler
8450  *
8451  * 	Free a previously allocated virtual interface.
8452  */
8453 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8454 	       unsigned int vf, unsigned int viid)
8455 {
8456 	struct fw_vi_cmd c;
8457 
8458 	memset(&c, 0, sizeof(c));
8459 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8460 				  F_FW_CMD_REQUEST |
8461 				  F_FW_CMD_EXEC |
8462 				  V_FW_VI_CMD_PFN(pf) |
8463 				  V_FW_VI_CMD_VFN(vf));
8464 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8465 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8466 
8467 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8468 }
8469 
8470 /**
8471  *	t4_set_rxmode - set Rx properties of a virtual interface
8472  *	@adap: the adapter
8473  *	@mbox: mailbox to use for the FW command
8474  *	@viid: the VI id
8475  *	@mtu: the new MTU or -1
8476  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8477  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8478  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8479  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8480  *	@sleep_ok: if true we may sleep while awaiting command completion
8481  *
8482  *	Sets Rx properties of a virtual interface.
8483  */
8484 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8485 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
8486 		  bool sleep_ok)
8487 {
8488 	struct fw_vi_rxmode_cmd c;
8489 
8490 	/* convert to FW values */
8491 	if (mtu < 0)
8492 		mtu = M_FW_VI_RXMODE_CMD_MTU;
8493 	if (promisc < 0)
8494 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8495 	if (all_multi < 0)
8496 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8497 	if (bcast < 0)
8498 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8499 	if (vlanex < 0)
8500 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8501 
8502 	memset(&c, 0, sizeof(c));
8503 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8504 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8505 				   V_FW_VI_RXMODE_CMD_VIID(viid));
8506 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8507 	c.mtu_to_vlanexen =
8508 		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8509 			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8510 			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8511 			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8512 			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8513 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8514 }
8515 
8516 /**
8517  *	t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
8518  *	@adap: the adapter
8519  *	@viid: the VI id
8520  *	@mac: the MAC address
8521  *	@mask: the mask
8522  *	@vni: the VNI id for the tunnel protocol
8523  *	@vni_mask: mask for the VNI id
8524  *	@dip_hit: to enable DIP match for the MPS entry
8525  *	@lookup_type: MAC address for inner (1) or outer (0) header
8526  *	@sleep_ok: call is allowed to sleep
8527  *
8528  *	Allocates an MPS entry with specified MAC address and VNI value.
8529  *
8530  *	Returns a negative error number or the allocated index for this mac.
8531  */
8532 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
8533 			    const u8 *addr, const u8 *mask, unsigned int vni,
8534 			    unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
8535 			    bool sleep_ok)
8536 {
8537 	struct fw_vi_mac_cmd c;
8538 	struct fw_vi_mac_vni *p = c.u.exact_vni;
8539 	int ret = 0;
8540 	u32 val;
8541 
8542 	memset(&c, 0, sizeof(c));
8543 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8544 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8545 				   V_FW_VI_MAC_CMD_VIID(viid));
8546 	val = V_FW_CMD_LEN16(1) |
8547 	      V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI);
8548 	c.freemacs_to_len16 = cpu_to_be32(val);
8549 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8550 				      V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8551 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
8552 	memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
8553 
8554 	p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) |
8555 					    V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) |
8556 					    V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type));
8557 	p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask));
8558 
8559 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8560 	if (ret == 0)
8561 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8562 	return ret;
8563 }
8564 
8565 /**
8566  *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
8567  *	@adap: the adapter
8568  *	@viid: the VI id
8569  *	@mac: the MAC address
8570  *	@mask: the mask
8571  *	@idx: index at which to add this entry
8572  *	@port_id: the port index
8573  *	@lookup_type: MAC address for inner (1) or outer (0) header
8574  *	@sleep_ok: call is allowed to sleep
8575  *
8576  *	Adds the mac entry at the specified index using raw mac interface.
8577  *
8578  *	Returns a negative error number or the allocated index for this mac.
8579  */
8580 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
8581 			  const u8 *addr, const u8 *mask, unsigned int idx,
8582 			  u8 lookup_type, u8 port_id, bool sleep_ok)
8583 {
8584 	int ret = 0;
8585 	struct fw_vi_mac_cmd c;
8586 	struct fw_vi_mac_raw *p = &c.u.raw;
8587 	u32 val;
8588 
8589 	memset(&c, 0, sizeof(c));
8590 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8591 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8592 				   V_FW_VI_MAC_CMD_VIID(viid));
8593 	val = V_FW_CMD_LEN16(1) |
8594 	      V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8595 	c.freemacs_to_len16 = cpu_to_be32(val);
8596 
8597 	/* Specify that this is an inner mac address */
8598 	p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
8599 
8600 	/* Lookup Type. Outer header: 0, Inner header: 1 */
8601 	p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8602 				   V_DATAPORTNUM(port_id));
8603 	/* Lookup mask and port mask */
8604 	p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8605 				    V_DATAPORTNUM(M_DATAPORTNUM));
8606 
8607 	/* Copy the address and the mask */
8608 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8609 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8610 
8611 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8612 	if (ret == 0) {
8613 		ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
8614 		if (ret != idx)
8615 			ret = -ENOMEM;
8616 	}
8617 
8618 	return ret;
8619 }
8620 
8621 /**
8622  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
8623  *	@adap: the adapter
8624  *	@mbox: mailbox to use for the FW command
8625  *	@viid: the VI id
8626  *	@free: if true any existing filters for this VI id are first removed
8627  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8628  *	@addr: the MAC address(es)
8629  *	@idx: where to store the index of each allocated filter
8630  *	@hash: pointer to hash address filter bitmap
8631  *	@sleep_ok: call is allowed to sleep
8632  *
8633  *	Allocates an exact-match filter for each of the supplied addresses and
8634  *	sets it to the corresponding address.  If @idx is not %NULL it should
8635  *	have at least @naddr entries, each of which will be set to the index of
8636  *	the filter allocated for the corresponding MAC address.  If a filter
8637  *	could not be allocated for an address its index is set to 0xffff.
8638  *	If @hash is not %NULL addresses that fail to allocate an exact filter
8639  *	are hashed and update the hash filter bitmap pointed at by @hash.
8640  *
8641  *	Returns a negative error number or the number of filters allocated.
8642  */
8643 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8644 		      unsigned int viid, bool free, unsigned int naddr,
8645 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8646 {
8647 	int offset, ret = 0;
8648 	struct fw_vi_mac_cmd c;
8649 	unsigned int nfilters = 0;
8650 	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8651 	unsigned int rem = naddr;
8652 
8653 	if (naddr > max_naddr)
8654 		return -EINVAL;
8655 
8656 	for (offset = 0; offset < naddr ; /**/) {
8657 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8658 					 ? rem
8659 					 : ARRAY_SIZE(c.u.exact));
8660 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8661 						     u.exact[fw_naddr]), 16);
8662 		struct fw_vi_mac_exact *p;
8663 		int i;
8664 
8665 		memset(&c, 0, sizeof(c));
8666 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8667 					   F_FW_CMD_REQUEST |
8668 					   F_FW_CMD_WRITE |
8669 					   V_FW_CMD_EXEC(free) |
8670 					   V_FW_VI_MAC_CMD_VIID(viid));
8671 		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8672 						  V_FW_CMD_LEN16(len16));
8673 
8674 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8675 			p->valid_to_idx =
8676 				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8677 					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8678 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8679 		}
8680 
8681 		/*
8682 		 * It's okay if we run out of space in our MAC address arena.
8683 		 * Some of the addresses we submit may get stored so we need
8684 		 * to run through the reply to see what the results were ...
8685 		 */
8686 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8687 		if (ret && ret != -FW_ENOMEM)
8688 			break;
8689 
8690 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8691 			u16 index = G_FW_VI_MAC_CMD_IDX(
8692 						be16_to_cpu(p->valid_to_idx));
8693 
8694 			if (idx)
8695 				idx[offset+i] = (index >=  max_naddr
8696 						 ? 0xffff
8697 						 : index);
8698 			if (index < max_naddr)
8699 				nfilters++;
8700 			else if (hash)
8701 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8702 		}
8703 
8704 		free = false;
8705 		offset += fw_naddr;
8706 		rem -= fw_naddr;
8707 	}
8708 
8709 	if (ret == 0 || ret == -FW_ENOMEM)
8710 		ret = nfilters;
8711 	return ret;
8712 }
8713 
8714 /**
8715  *	t4_free_encap_mac_filt - frees MPS entry at given index
8716  *	@adap: the adapter
8717  *	@viid: the VI id
8718  *	@idx: index of MPS entry to be freed
8719  *	@sleep_ok: call is allowed to sleep
8720  *
8721  *	Frees the MPS entry at supplied index
8722  *
8723  *	Returns a negative error number or zero on success
8724  */
8725 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
8726 			   int idx, bool sleep_ok)
8727 {
8728 	struct fw_vi_mac_exact *p;
8729 	struct fw_vi_mac_cmd c;
8730 	u8 addr[] = {0,0,0,0,0,0};
8731 	int ret = 0;
8732 	u32 exact;
8733 
8734 	memset(&c, 0, sizeof(c));
8735 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8736 				   F_FW_CMD_REQUEST |
8737 				   F_FW_CMD_WRITE |
8738 				   V_FW_CMD_EXEC(0) |
8739 				   V_FW_VI_MAC_CMD_VIID(viid));
8740 	exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC);
8741 	c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8742 					  exact |
8743 					  V_FW_CMD_LEN16(1));
8744 	p = c.u.exact;
8745 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8746 				      V_FW_VI_MAC_CMD_IDX(idx));
8747 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
8748 
8749 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8750 	return ret;
8751 }
8752 
8753 /**
8754  *	t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
8755  *	@adap: the adapter
8756  *	@viid: the VI id
8757  *	@addr: the MAC address
8758  *	@mask: the mask
8759  *	@idx: index of the entry in mps tcam
8760  *	@lookup_type: MAC address for inner (1) or outer (0) header
8761  *	@port_id: the port index
8762  *	@sleep_ok: call is allowed to sleep
8763  *
8764  *	Removes the mac entry at the specified index using raw mac interface.
8765  *
8766  *	Returns a negative error number on failure.
8767  */
8768 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
8769 			 const u8 *addr, const u8 *mask, unsigned int idx,
8770 			 u8 lookup_type, u8 port_id, bool sleep_ok)
8771 {
8772 	struct fw_vi_mac_cmd c;
8773 	struct fw_vi_mac_raw *p = &c.u.raw;
8774 	u32 raw;
8775 
8776 	memset(&c, 0, sizeof(c));
8777 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8778 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8779 				   V_FW_CMD_EXEC(0) |
8780 				   V_FW_VI_MAC_CMD_VIID(viid));
8781 	raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8782 	c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8783 					  raw |
8784 					  V_FW_CMD_LEN16(1));
8785 
8786 	p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
8787 				     FW_VI_MAC_ID_BASED_FREE);
8788 
8789 	/* Lookup Type. Outer header: 0, Inner header: 1 */
8790 	p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
8791 				   V_DATAPORTNUM(port_id));
8792 	/* Lookup mask and port mask */
8793 	p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
8794 				    V_DATAPORTNUM(M_DATAPORTNUM));
8795 
8796 	/* Copy the address and the mask */
8797 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8798 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8799 
8800 	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8801 }
8802 
8803 /**
8804  *	t4_free_mac_filt - frees exact-match filters of given MAC addresses
8805  *	@adap: the adapter
8806  *	@mbox: mailbox to use for the FW command
8807  *	@viid: the VI id
8808  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8809  *	@addr: the MAC address(es)
8810  *	@sleep_ok: call is allowed to sleep
8811  *
8812  *	Frees the exact-match filter for each of the supplied addresses
8813  *
8814  *	Returns a negative error number or the number of filters freed.
8815  */
8816 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8817 		      unsigned int viid, unsigned int naddr,
8818 		      const u8 **addr, bool sleep_ok)
8819 {
8820 	int offset, ret = 0;
8821 	struct fw_vi_mac_cmd c;
8822 	unsigned int nfilters = 0;
8823 	unsigned int max_naddr = is_t4(adap->params.chip) ?
8824 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
8825 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8826 	unsigned int rem = naddr;
8827 
8828 	if (naddr > max_naddr)
8829 		return -EINVAL;
8830 
8831 	for (offset = 0; offset < (int)naddr ; /**/) {
8832 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8833 					 ? rem
8834 					 : ARRAY_SIZE(c.u.exact));
8835 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8836 						     u.exact[fw_naddr]), 16);
8837 		struct fw_vi_mac_exact *p;
8838 		int i;
8839 
8840 		memset(&c, 0, sizeof(c));
8841 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8842 				     F_FW_CMD_REQUEST |
8843 				     F_FW_CMD_WRITE |
8844 				     V_FW_CMD_EXEC(0) |
8845 				     V_FW_VI_MAC_CMD_VIID(viid));
8846 		c.freemacs_to_len16 =
8847 				cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8848 					    V_FW_CMD_LEN16(len16));
8849 
8850 		for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8851 			p->valid_to_idx = cpu_to_be16(
8852 				F_FW_VI_MAC_CMD_VALID |
8853 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
8854 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8855 		}
8856 
8857 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8858 		if (ret)
8859 			break;
8860 
8861 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8862 			u16 index = G_FW_VI_MAC_CMD_IDX(
8863 						be16_to_cpu(p->valid_to_idx));
8864 
8865 			if (index < max_naddr)
8866 				nfilters++;
8867 		}
8868 
8869 		offset += fw_naddr;
8870 		rem -= fw_naddr;
8871 	}
8872 
8873 	if (ret == 0)
8874 		ret = nfilters;
8875 	return ret;
8876 }
8877 
8878 /**
8879  *	t4_change_mac - modifies the exact-match filter for a MAC address
8880  *	@adap: the adapter
8881  *	@mbox: mailbox to use for the FW command
8882  *	@viid: the VI id
8883  *	@idx: index of existing filter for old value of MAC address, or -1
8884  *	@addr: the new MAC address value
8885  *	@persist: whether a new MAC allocation should be persistent
8886  *	@add_smt: if true also add the address to the HW SMT
8887  *
8888  *	Modifies an exact-match filter and sets it to the new MAC address if
8889  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
8890  *	latter case the address is added persistently if @persist is %true.
8891  *
8892  *	Note that in general it is not possible to modify the value of a given
8893  *	filter so the generic way to modify an address filter is to free the one
8894  *	being used by the old address value and allocate a new filter for the
8895  *	new address value.
8896  *
8897  *	Returns a negative error number or the index of the filter with the new
8898  *	MAC value.  Note that this index may differ from @idx.
8899  */
8900 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8901 		  int idx, const u8 *addr, bool persist, u8 *smt_idx)
8902 {
8903 	/* This will add this mac address to the destination TCAM region */
8904 	return t4_add_mac(adap, mbox, viid, idx, addr, persist, smt_idx, 0);
8905 }
8906 
8907 /**
8908  *	t4_set_addr_hash - program the MAC inexact-match hash filter
8909  *	@adap: the adapter
8910  *	@mbox: mailbox to use for the FW command
8911  *	@viid: the VI id
8912  *	@ucast: whether the hash filter should also match unicast addresses
8913  *	@vec: the value to be written to the hash filter
8914  *	@sleep_ok: call is allowed to sleep
8915  *
8916  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
8917  */
8918 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8919 		     bool ucast, u64 vec, bool sleep_ok)
8920 {
8921 	struct fw_vi_mac_cmd c;
8922 	u32 val;
8923 
8924 	memset(&c, 0, sizeof(c));
8925 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8926 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8927 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8928 	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8929 	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8930 	c.freemacs_to_len16 = cpu_to_be32(val);
8931 	c.u.hash.hashvec = cpu_to_be64(vec);
8932 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8933 }
8934 
8935 /**
8936  *      t4_enable_vi_params - enable/disable a virtual interface
8937  *      @adap: the adapter
8938  *      @mbox: mailbox to use for the FW command
8939  *      @viid: the VI id
8940  *      @rx_en: 1=enable Rx, 0=disable Rx
8941  *      @tx_en: 1=enable Tx, 0=disable Tx
8942  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8943  *
8944  *      Enables/disables a virtual interface.  Note that setting DCB Enable
8945  *      only makes sense when enabling a Virtual Interface ...
8946  */
8947 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8948 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8949 {
8950 	struct fw_vi_enable_cmd c;
8951 
8952 	memset(&c, 0, sizeof(c));
8953 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8954 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8955 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8956 	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8957 				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8958 				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8959 				     FW_LEN16(c));
8960 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8961 }
8962 
8963 /**
8964  *	t4_enable_vi - enable/disable a virtual interface
8965  *	@adap: the adapter
8966  *	@mbox: mailbox to use for the FW command
8967  *	@viid: the VI id
8968  *	@rx_en: 1=enable Rx, 0=disable Rx
8969  *	@tx_en: 1=enable Tx, 0=disable Tx
8970  *
8971  *	Enables/disables a virtual interface.  Note that setting DCB Enable
8972  *	only makes sense when enabling a Virtual Interface ...
8973  */
8974 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8975 		 bool rx_en, bool tx_en)
8976 {
8977 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8978 }
8979 
8980 /**
8981  *	t4_enable_pi_params - enable/disable a Port's Virtual Interface
8982  *      @adap: the adapter
8983  *      @mbox: mailbox to use for the FW command
8984  *      @pi: the Port Information structure
8985  *      @rx_en: 1=enable Rx, 0=disable Rx
8986  *      @tx_en: 1=enable Tx, 0=disable Tx
8987  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8988  *
8989  *      Enables/disables a Port's Virtual Interface.  Note that setting DCB
8990  *	Enable only makes sense when enabling a Virtual Interface ...
8991  *	If the Virtual Interface enable/disable operation is successful,
8992  *	we notify the OS-specific code of a potential Link Status change
8993  *	via the OS Contract API t4_os_link_changed().
8994  */
8995 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8996 			struct port_info *pi,
8997 			bool rx_en, bool tx_en, bool dcb_en)
8998 {
8999 	int ret = t4_enable_vi_params(adap, mbox, pi->viid,
9000 				      rx_en, tx_en, dcb_en);
9001 	if (ret)
9002 		return ret;
9003 	t4_os_link_changed(adap, pi->port_id,
9004 			   rx_en && tx_en && pi->link_cfg.link_ok);
9005 	return 0;
9006 }
9007 
9008 /**
9009  *	t4_identify_port - identify a VI's port by blinking its LED
9010  *	@adap: the adapter
9011  *	@mbox: mailbox to use for the FW command
9012  *	@viid: the VI id
9013  *	@nblinks: how many times to blink LED at 2.5 Hz
9014  *
9015  *	Identifies a VI's port by blinking its LED.
9016  */
9017 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
9018 		     unsigned int nblinks)
9019 {
9020 	struct fw_vi_enable_cmd c;
9021 
9022 	memset(&c, 0, sizeof(c));
9023 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
9024 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9025 				   V_FW_VI_ENABLE_CMD_VIID(viid));
9026 	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
9027 	c.blinkdur = cpu_to_be16(nblinks);
9028 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9029 }
9030 
9031 /**
9032  *	t4_iq_stop - stop an ingress queue and its FLs
9033  *	@adap: the adapter
9034  *	@mbox: mailbox to use for the FW command
9035  *	@pf: the PF owning the queues
9036  *	@vf: the VF owning the queues
9037  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
9038  *	@iqid: ingress queue id
9039  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
9040  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
9041  *
9042  *	Stops an ingress queue and its associated FLs, if any.  This causes
9043  *	any current or future data/messages destined for these queues to be
9044  *	tossed.
9045  */
9046 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
9047 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
9048 	       unsigned int fl0id, unsigned int fl1id)
9049 {
9050 	struct fw_iq_cmd c;
9051 
9052 	memset(&c, 0, sizeof(c));
9053 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
9054 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
9055 				  V_FW_IQ_CMD_VFN(vf));
9056 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
9057 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
9058 	c.iqid = cpu_to_be16(iqid);
9059 	c.fl0id = cpu_to_be16(fl0id);
9060 	c.fl1id = cpu_to_be16(fl1id);
9061 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9062 }
9063 
9064 /**
9065  *	t4_iq_free - free an ingress queue and its FLs
9066  *	@adap: the adapter
9067  *	@mbox: mailbox to use for the FW command
9068  *	@pf: the PF owning the queues
9069  *	@vf: the VF owning the queues
9070  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
9071  *	@iqid: ingress queue id
9072  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
9073  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
9074  *
9075  *	Frees an ingress queue and its associated FLs, if any.
9076  */
9077 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
9078 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
9079 	       unsigned int fl0id, unsigned int fl1id)
9080 {
9081 	struct fw_iq_cmd c;
9082 
9083 	memset(&c, 0, sizeof(c));
9084 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
9085 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
9086 				  V_FW_IQ_CMD_VFN(vf));
9087 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
9088 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
9089 	c.iqid = cpu_to_be16(iqid);
9090 	c.fl0id = cpu_to_be16(fl0id);
9091 	c.fl1id = cpu_to_be16(fl1id);
9092 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9093 }
9094 
9095 /**
9096  *	t4_eth_eq_free - free an Ethernet egress queue
9097  *	@adap: the adapter
9098  *	@mbox: mailbox to use for the FW command
9099  *	@pf: the PF owning the queue
9100  *	@vf: the VF owning the queue
9101  *	@eqid: egress queue id
9102  *
9103  *	Frees an Ethernet egress queue.
9104  */
9105 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
9106 		   unsigned int vf, unsigned int eqid)
9107 {
9108 	struct fw_eq_eth_cmd c;
9109 
9110 	memset(&c, 0, sizeof(c));
9111 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
9112 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9113 				  V_FW_EQ_ETH_CMD_PFN(pf) |
9114 				  V_FW_EQ_ETH_CMD_VFN(vf));
9115 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
9116 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
9117 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9118 }
9119 
9120 /**
9121  *	t4_ctrl_eq_free - free a control egress queue
9122  *	@adap: the adapter
9123  *	@mbox: mailbox to use for the FW command
9124  *	@pf: the PF owning the queue
9125  *	@vf: the VF owning the queue
9126  *	@eqid: egress queue id
9127  *
9128  *	Frees a control egress queue.
9129  */
9130 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
9131 		    unsigned int vf, unsigned int eqid)
9132 {
9133 	struct fw_eq_ctrl_cmd c;
9134 
9135 	memset(&c, 0, sizeof(c));
9136 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
9137 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9138 				  V_FW_EQ_CTRL_CMD_PFN(pf) |
9139 				  V_FW_EQ_CTRL_CMD_VFN(vf));
9140 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
9141 	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
9142 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9143 }
9144 
9145 /**
9146  *	t4_ofld_eq_free - free an offload egress queue
9147  *	@adap: the adapter
9148  *	@mbox: mailbox to use for the FW command
9149  *	@pf: the PF owning the queue
9150  *	@vf: the VF owning the queue
9151  *	@eqid: egress queue id
9152  *
9153  *	Frees a control egress queue.
9154  */
9155 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
9156 		    unsigned int vf, unsigned int eqid)
9157 {
9158 	struct fw_eq_ofld_cmd c;
9159 
9160 	memset(&c, 0, sizeof(c));
9161 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
9162 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
9163 				  V_FW_EQ_OFLD_CMD_PFN(pf) |
9164 				  V_FW_EQ_OFLD_CMD_VFN(vf));
9165 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
9166 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
9167 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
9168 }
9169 
9170 /**
9171  *	t4_link_down_rc_str - return a string for a Link Down Reason Code
9172  *	@link_down_rc: Link Down Reason Code
9173  *
9174  *	Returns a string representation of the Link Down Reason Code.
9175  */
9176 const char *t4_link_down_rc_str(unsigned char link_down_rc)
9177 {
9178 	static const char * const reason[] = {
9179 		"Link Down",
9180 		"Remote Fault",
9181 		"Auto-negotiation Failure",
9182 		"Reserved",
9183 		"Insufficient Airflow",
9184 		"Unable To Determine Reason",
9185 		"No RX Signal Detected",
9186 		"Reserved",
9187 	};
9188 
9189 	if (link_down_rc >= ARRAY_SIZE(reason))
9190 		return "Bad Reason Code";
9191 
9192 	return reason[link_down_rc];
9193 }
9194 
9195 /**
9196  * Return the highest speed set in the port capabilities, in Mb/s.
9197  */
9198 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
9199 {
9200 	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
9201 		do { \
9202 			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
9203 				return __speed; \
9204 		} while (0)
9205 
9206 	TEST_SPEED_RETURN(400G, 400000);
9207 	TEST_SPEED_RETURN(200G, 200000);
9208 	TEST_SPEED_RETURN(100G, 100000);
9209 	TEST_SPEED_RETURN(50G,   50000);
9210 	TEST_SPEED_RETURN(40G,   40000);
9211 	TEST_SPEED_RETURN(25G,   25000);
9212 	TEST_SPEED_RETURN(10G,   10000);
9213 	TEST_SPEED_RETURN(1G,     1000);
9214 	TEST_SPEED_RETURN(100M,    100);
9215 
9216 	#undef TEST_SPEED_RETURN
9217 
9218 	return 0;
9219 }
9220 
9221 /**
9222  *	fwcap_to_fwspeed - return highest speed in Port Capabilities
9223  *	@acaps: advertised Port Capabilities
9224  *
9225  *	Get the highest speed for the port from the advertised Port
9226  *	Capabilities.  It will be either the highest speed from the list of
9227  *	speeds or whatever user has set using ethtool.
9228  */
9229 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
9230 {
9231 	#define TEST_SPEED_RETURN(__caps_speed) \
9232 		do { \
9233 			if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
9234 				return FW_PORT_CAP32_SPEED_##__caps_speed; \
9235 		} while (0)
9236 
9237 	TEST_SPEED_RETURN(400G);
9238 	TEST_SPEED_RETURN(200G);
9239 	TEST_SPEED_RETURN(100G);
9240 	TEST_SPEED_RETURN(50G);
9241 	TEST_SPEED_RETURN(40G);
9242 	TEST_SPEED_RETURN(25G);
9243 	TEST_SPEED_RETURN(10G);
9244 	TEST_SPEED_RETURN(1G);
9245 	TEST_SPEED_RETURN(100M);
9246 
9247 	#undef TEST_SPEED_RETURN
9248 
9249 	return 0;
9250 }
9251 
9252 /**
9253  *	lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
9254  *	@lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
9255  *
9256  *	Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
9257  *	32-bit Port Capabilities value.
9258  */
9259 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
9260 {
9261 	fw_port_cap32_t linkattr = 0;
9262 
9263 	/*
9264 	 * Unfortunately the format of the Link Status in the old
9265 	 * 16-bit Port Information message isn't the same as the
9266 	 * 16-bit Port Capabilities bitfield used everywhere else ...
9267 	 */
9268 	if (lstatus & F_FW_PORT_CMD_RXPAUSE)
9269 		linkattr |= FW_PORT_CAP32_FC_RX;
9270 	if (lstatus & F_FW_PORT_CMD_TXPAUSE)
9271 		linkattr |= FW_PORT_CAP32_FC_TX;
9272 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
9273 		linkattr |= FW_PORT_CAP32_SPEED_100M;
9274 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
9275 		linkattr |= FW_PORT_CAP32_SPEED_1G;
9276 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
9277 		linkattr |= FW_PORT_CAP32_SPEED_10G;
9278 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
9279 		linkattr |= FW_PORT_CAP32_SPEED_25G;
9280 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
9281 		linkattr |= FW_PORT_CAP32_SPEED_40G;
9282 	if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
9283 		linkattr |= FW_PORT_CAP32_SPEED_100G;
9284 
9285 	return linkattr;
9286 }
9287 
9288 /**
9289  *	t4_handle_get_port_info - process a FW reply message
9290  *	@pi: the port info
9291  *	@rpl: start of the FW message
9292  *
9293  *	Processes a GET_PORT_INFO FW reply message.
9294  */
9295 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
9296 {
9297 	const struct fw_port_cmd *cmd = (const void *)rpl;
9298 	int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
9299 	struct adapter *adapter = pi->adapter;
9300 	struct link_config *lc = &pi->link_cfg;
9301 	int link_ok, linkdnrc;
9302 	enum fw_port_type port_type;
9303 	enum fw_port_module_type mod_type;
9304 	unsigned int speed, fc, fec;
9305 	fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
9306 
9307 	/*
9308 	 * Extract the various fields from the Port Information message.
9309 	 */
9310 	switch (action) {
9311 	case FW_PORT_ACTION_GET_PORT_INFO: {
9312 		u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
9313 
9314 		link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
9315 		linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
9316 		port_type = G_FW_PORT_CMD_PTYPE(lstatus);
9317 		mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
9318 		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
9319 		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
9320 		lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
9321 		linkattr = lstatus_to_fwcap(lstatus);
9322 		break;
9323 	}
9324 
9325 	case FW_PORT_ACTION_GET_PORT_INFO32: {
9326 		u32 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
9327 
9328 		link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
9329 		linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
9330 		port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
9331 		mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
9332 		pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
9333 		acaps = be32_to_cpu(cmd->u.info32.acaps32);
9334 		lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
9335 		linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
9336 		break;
9337 	}
9338 
9339 	default:
9340 		CH_ERR(adapter, "Handle Port Information: Bad Command/Action %#x\n",
9341 		       be32_to_cpu(cmd->action_to_len16));
9342 		return;
9343 	}
9344 
9345 	fec = fwcap_to_cc_fec(linkattr);
9346 	fc = fwcap_to_cc_pause(linkattr);
9347 	speed = fwcap_to_speed(linkattr);
9348 
9349 	/*
9350 	 * Reset state for communicating new Transceiver Module status and
9351 	 * whether the OS-dependent layer wants us to redo the current
9352 	 * "sticky" L1 Configure Link Parameters.
9353 	 */
9354 	lc->new_module = false;
9355 	lc->redo_l1cfg = false;
9356 
9357 	if (mod_type != pi->mod_type) {
9358 		/*
9359 		 * With the newer SFP28 and QSFP28 Transceiver Module Types,
9360 		 * various fundamental Port Capabilities which used to be
9361 		 * immutable can now change radically.  We can now have
9362 		 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
9363 		 * all change based on what Transceiver Module is inserted.
9364 		 * So we need to record the Physical "Port" Capabilities on
9365 		 * every Transceiver Module change.
9366 		 */
9367 		lc->pcaps = pcaps;
9368 
9369 		/*
9370 		 * When a new Transceiver Module is inserted, the Firmware
9371 		 * will examine its i2c EPROM to determine its type and
9372 		 * general operating parameters including things like Forward
9373 		 * Error Control, etc.  Various IEEE 802.3 standards dictate
9374 		 * how to interpret these i2c values to determine default
9375 		 * "sutomatic" settings.  We record these for future use when
9376 		 * the user explicitly requests these standards-based values.
9377 		 */
9378 		lc->def_acaps = acaps;
9379 
9380 		/*
9381 		 * Some versions of the early T6 Firmware "cheated" when
9382 		 * handling different Transceiver Modules by changing the
9383 		 * underlaying Port Type reported to the Host Drivers.  As
9384 		 * such we need to capture whatever Port Type the Firmware
9385 		 * sends us and record it in case it's different from what we
9386 		 * were told earlier.  Unfortunately, since Firmware is
9387 		 * forever, we'll need to keep this code here forever, but in
9388 		 * later T6 Firmware it should just be an assignment of the
9389 		 * same value already recorded.
9390 		 */
9391 		pi->port_type = port_type;
9392 
9393 		/*
9394 		 * Record new Module Type information.
9395 		 */
9396 		pi->mod_type = mod_type;
9397 
9398 		/*
9399 		 * Let the OS-dependent layer know if we have a new
9400 		 * Transceiver Module inserted.
9401 		 */
9402 		lc->new_module = t4_is_inserted_mod_type(mod_type);
9403 
9404 		t4_os_portmod_changed(adapter, pi->port_id);
9405 	}
9406 
9407 	if (link_ok != lc->link_ok || speed != lc->speed ||
9408 	    fc != lc->fc || fec != lc->fec) {	/* something changed */
9409 		if (!link_ok && lc->link_ok) {
9410 			lc->link_down_rc = linkdnrc;
9411 			CH_WARN_RATELIMIT(adapter,
9412 				"Port %d link down, reason: %s\n",
9413 				pi->tx_chan, t4_link_down_rc_str(linkdnrc));
9414 		}
9415 		lc->link_ok = link_ok;
9416 		lc->speed = speed;
9417 		lc->fc = fc;
9418 		lc->fec = fec;
9419 
9420 		lc->lpacaps = lpacaps;
9421 		lc->acaps = acaps & ADVERT_MASK;
9422 
9423 		/* If we're not physically capable of Auto-Negotiation, note
9424 		 * this as Auto-Negotiation disabled.  Otherwise, we track
9425 		 * what Auto-Negotiation settings we have.  Note parallel
9426 		 * structure in t4_link_l1cfg_core() and init_link_config().
9427 		 */
9428 		if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
9429 			lc->autoneg = AUTONEG_DISABLE;
9430 		} else if (lc->acaps & FW_PORT_CAP32_ANEG) {
9431 			lc->autoneg = AUTONEG_ENABLE;
9432 		} else {
9433 			/* When Autoneg is disabled, user needs to set
9434 			 * single speed.
9435 			 * Similar to cxgb4_ethtool.c: set_link_ksettings
9436 			 */
9437 			lc->acaps = 0;
9438 			lc->speed_caps = fwcap_to_fwspeed(acaps);
9439 			lc->autoneg = AUTONEG_DISABLE;
9440 		}
9441 
9442 		t4_os_link_changed(adapter, pi->port_id, link_ok);
9443 	}
9444 
9445 	/*
9446 	 * If we have a new Transceiver Module and the OS-dependent code has
9447 	 * told us that it wants us to redo whatever "sticky" L1 Configuration
9448 	 * Link Parameters are set, do that now.
9449 	 */
9450 	if (lc->new_module && lc->redo_l1cfg) {
9451 		struct link_config old_lc;
9452 		int ret;
9453 
9454 		/*
9455 		 * Save the current L1 Configuration and restore it if an
9456 		 * error occurs.  We probably should fix the l1_cfg*()
9457 		 * routines not to change the link_config when an error
9458 		 * occurs ...
9459 		 */
9460 		old_lc = *lc;
9461 		ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
9462 		if (ret) {
9463 			*lc = old_lc;
9464 			CH_WARN(adapter,
9465 				"Attempt to update new Transceiver Module settings failed\n");
9466 		}
9467 	}
9468 	lc->new_module = false;
9469 	lc->redo_l1cfg = false;
9470 }
9471 
9472 /**
9473  *	t4_update_port_info - retrieve and update port information if changed
9474  *	@pi: the port_info
9475  *
9476  *	We issue a Get Port Information Command to the Firmware and, if
9477  *	successful, we check to see if anything is different from what we
9478  *	last recorded and update things accordingly.
9479  */
9480 int t4_update_port_info(struct port_info *pi)
9481 {
9482 	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
9483 	struct fw_port_cmd port_cmd;
9484 	int ret;
9485 
9486 	memset(&port_cmd, 0, sizeof port_cmd);
9487 	port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9488 					    F_FW_CMD_REQUEST | F_FW_CMD_READ |
9489 					    V_FW_PORT_CMD_PORTID(pi->lport));
9490 	port_cmd.action_to_len16 = cpu_to_be32(
9491 		V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
9492 				     ? FW_PORT_ACTION_GET_PORT_INFO
9493 				     : FW_PORT_ACTION_GET_PORT_INFO32) |
9494 		FW_LEN16(port_cmd));
9495 	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
9496 			 &port_cmd, sizeof(port_cmd), &port_cmd);
9497 	if (ret)
9498 		return ret;
9499 
9500 	t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
9501 	return 0;
9502 }
9503 
9504 /**
9505  *	t4_get_link_params - retrieve basic link parameters for given port
9506  *	@pi: the port
9507  *	@link_okp: value return pointer for link up/down
9508  *	@speedp: value return pointer for speed (Mb/s)
9509  *	@mtup: value return pointer for mtu
9510  *
9511  *	Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
9512  *	and MTU for a specified port.  A negative error is returned on
9513  *	failure; 0 on success.
9514  */
9515 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
9516 		       unsigned int *speedp, unsigned int *mtup)
9517 {
9518 	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
9519 	struct fw_port_cmd port_cmd;
9520 	unsigned int action, link_ok, mtu;
9521 	fw_port_cap32_t linkattr;
9522 	int ret;
9523 
9524 	memset(&port_cmd, 0, sizeof port_cmd);
9525 	port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9526 					    F_FW_CMD_REQUEST | F_FW_CMD_READ |
9527 					    V_FW_PORT_CMD_PORTID(pi->tx_chan));
9528 	action = (fw_caps == FW_CAPS16
9529 		  ? FW_PORT_ACTION_GET_PORT_INFO
9530 		  : FW_PORT_ACTION_GET_PORT_INFO32);
9531 	port_cmd.action_to_len16 = cpu_to_be32(
9532 		V_FW_PORT_CMD_ACTION(action) |
9533 		FW_LEN16(port_cmd));
9534 	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
9535 			 &port_cmd, sizeof(port_cmd), &port_cmd);
9536 	if (ret)
9537 		return ret;
9538 
9539 	if (action == FW_PORT_ACTION_GET_PORT_INFO) {
9540 		u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
9541 
9542 		link_ok = !!(lstatus & F_FW_PORT_CMD_LSTATUS);
9543 		linkattr = lstatus_to_fwcap(lstatus);
9544 		mtu = be16_to_cpu(port_cmd.u.info.mtu);;
9545 	} else {
9546 		u32 lstatus32 = be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
9547 
9548 		link_ok = !!(lstatus32 & F_FW_PORT_CMD_LSTATUS32);
9549 		linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
9550 		mtu = G_FW_PORT_CMD_MTU32(
9551 			be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
9552 	}
9553 
9554 	*link_okp = link_ok;
9555 	*speedp = fwcap_to_speed(linkattr);
9556 	*mtup = mtu;
9557 
9558 	return 0;
9559 }
9560 
9561 /**
9562  *      t4_handle_fw_rpl - process a FW reply message
9563  *      @adap: the adapter
9564  *      @rpl: start of the FW message
9565  *
9566  *      Processes a FW message, such as link state change messages.
9567  */
9568 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
9569 {
9570 	u8 opcode = *(const u8 *)rpl;
9571 
9572 	/*
9573 	 * This might be a port command ... this simplifies the following
9574 	 * conditionals ...  We can get away with pre-dereferencing
9575 	 * action_to_len16 because it's in the first 16 bytes and all messages
9576 	 * will be at least that long.
9577 	 */
9578 	const struct fw_port_cmd *p = (const void *)rpl;
9579 	unsigned int action =
9580 		G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
9581 
9582 	if (opcode == FW_PORT_CMD &&
9583 	    (action == FW_PORT_ACTION_GET_PORT_INFO ||
9584 	     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
9585 		int i;
9586 		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
9587 		struct port_info *pi = NULL;
9588 
9589 		for_each_port(adap, i) {
9590 			pi = adap2pinfo(adap, i);
9591 			if (pi->lport == chan)
9592 				break;
9593 		}
9594 
9595 		t4_handle_get_port_info(pi, rpl);
9596 	} else {
9597 		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
9598 		return -EINVAL;
9599 	}
9600 	return 0;
9601 }
9602 
9603 /**
9604  *	get_pci_mode - determine a card's PCI mode
9605  *	@adapter: the adapter
9606  *	@p: where to store the PCI settings
9607  *
9608  *	Determines a card's PCI mode and associated parameters, such as speed
9609  *	and width.
9610  */
9611 static void get_pci_mode(struct adapter *adapter,
9612 				   struct pci_params *p)
9613 {
9614 	u16 val;
9615 	u32 pcie_cap;
9616 
9617 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9618 	if (pcie_cap) {
9619 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
9620 		p->speed = val & PCI_EXP_LNKSTA_CLS;
9621 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
9622 	}
9623 }
9624 
9625 /**
9626  *	init_link_config - initialize a link's SW state
9627  *	@lc: pointer to structure holding the link state
9628  *	@pcaps: link Port Capabilities
9629  *	@acaps: link current Advertised Port Capabilities
9630  *
9631  *	Initializes the SW state maintained for each link, including the link's
9632  *	capabilities and default speed/flow-control/autonegotiation settings.
9633  */
9634 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
9635 			     fw_port_cap32_t acaps)
9636 {
9637 	lc->pcaps = pcaps;
9638 	lc->def_acaps = acaps;
9639 	lc->lpacaps = 0;
9640 	lc->speed_caps = 0;
9641 	lc->speed = 0;
9642 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
9643 
9644 	/*
9645 	 * For Forward Error Control, we default to whatever the Firmware
9646 	 * tells us the Link is currently advertising.
9647 	 */
9648 	lc->requested_fec = FEC_AUTO;
9649 	lc->fec = fwcap_to_cc_fec(lc->def_acaps);
9650 
9651 	/* If the Port is capable of Auto-Negtotiation, initialize it as
9652 	 * "enabled" and copy over all of the Physical Port Capabilities
9653 	 * to the Advertised Port Capabilities.  Otherwise mark it as
9654 	 * Auto-Negotiate disabled and select the highest supported speed
9655 	 * for the link.  Note parallel structure in t4_link_l1cfg_core()
9656 	 * and t4_handle_get_port_info().
9657 	 */
9658 	if (lc->pcaps & FW_PORT_CAP32_ANEG) {
9659 		lc->acaps = lc->pcaps & ADVERT_MASK;
9660 		lc->autoneg = AUTONEG_ENABLE;
9661 		lc->requested_fc |= PAUSE_AUTONEG;
9662 	} else {
9663 		lc->acaps = 0;
9664 		lc->autoneg = AUTONEG_DISABLE;
9665 		lc->speed_caps = fwcap_to_fwspeed(acaps);
9666 	}
9667 }
9668 
9669 /**
9670  *	t4_wait_dev_ready - wait till to reads of registers work
9671  *
9672  *	Right after the device is RESET is can take a small amount of time
9673  *	for it to respond to register reads.  Until then, all reads will
9674  *	return either 0xff...ff or 0xee...ee.  Return an error if reads
9675  *	don't work within a reasonable time frame.
9676  */
9677 int t4_wait_dev_ready(struct adapter *adapter)
9678 {
9679 	u32 whoami;
9680 
9681 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
9682 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
9683 		return 0;
9684 
9685 	msleep(500);
9686 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
9687 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
9688 		return 0;
9689 
9690 	CH_ERR(adapter, "Device didn't become ready for access, "
9691 	       "whoami = %#x\n", whoami);
9692 	return -EIO;
9693 }
9694 
9695 struct flash_desc {
9696 	u32 vendor_and_model_id;
9697 	u32 size_mb;
9698 };
9699 
9700 int t4_get_flash_params(struct adapter *adapter)
9701 {
9702 	/*
9703 	 * Table for non-standard supported Flash parts.  Note, all Flash
9704 	 * parts must have 64KB sectors.
9705 	 */
9706 	static struct flash_desc supported_flash[] = {
9707 		{ 0x00150201, 4 << 20 },	/* Spansion 4MB S25FL032P */
9708 	};
9709 
9710 	int ret;
9711 	u32 flashid = 0;
9712 	unsigned int part, manufacturer;
9713 	unsigned int density, size = 0;
9714 
9715 
9716 	/*
9717 	 * Issue a Read ID Command to the Flash part.  We decode supported
9718 	 * Flash parts and their sizes from this.  There's a newer Query
9719 	 * Command which can retrieve detailed geometry information but many
9720 	 * Flash parts don't support it.
9721 	 */
9722 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
9723 	if (!ret)
9724 		ret = sf1_read(adapter, 3, 0, 1, &flashid);
9725 	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
9726 	if (ret < 0)
9727 		return ret;
9728 
9729 	/*
9730 	 * Check to see if it's one of our non-standard supported Flash parts.
9731 	 */
9732 	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
9733 		if (supported_flash[part].vendor_and_model_id == flashid) {
9734 			adapter->params.sf_size =
9735 				supported_flash[part].size_mb;
9736 			adapter->params.sf_nsec =
9737 				adapter->params.sf_size / SF_SEC_SIZE;
9738 			goto found;
9739 		}
9740 
9741 	/*
9742 	 * Decode Flash part size.  The code below looks repetative with
9743 	 * common encodings, but that's not guaranteed in the JEDEC
9744 	 * specification for the Read JADEC ID command.  The only thing that
9745 	 * we're guaranteed by the JADEC specification is where the
9746 	 * Manufacturer ID is in the returned result.  After that each
9747 	 * Manufacturer ~could~ encode things completely differently.
9748 	 * Note, all Flash parts must have 64KB sectors.
9749 	 */
9750 	manufacturer = flashid & 0xff;
9751 	switch (manufacturer) {
9752 	case 0x20: { /* Micron/Numonix */
9753 		/*
9754 		 * This Density -> Size decoding table is taken from Micron
9755 		 * Data Sheets.
9756 		 */
9757 		density = (flashid >> 16) & 0xff;
9758 		switch (density) {
9759 		case 0x14: size = 1 << 20; break; /*   1MB */
9760 		case 0x15: size = 1 << 21; break; /*   2MB */
9761 		case 0x16: size = 1 << 22; break; /*   4MB */
9762 		case 0x17: size = 1 << 23; break; /*   8MB */
9763 		case 0x18: size = 1 << 24; break; /*  16MB */
9764 		case 0x19: size = 1 << 25; break; /*  32MB */
9765 		case 0x20: size = 1 << 26; break; /*  64MB */
9766 		case 0x21: size = 1 << 27; break; /* 128MB */
9767 		case 0x22: size = 1 << 28; break; /* 256MB */
9768 		}
9769 		break;
9770 	}
9771 
9772 	case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9773 		/*
9774 		 * This Density -> Size decoding table is taken from ISSI
9775 		 * Data Sheets.
9776 		 */
9777 		density = (flashid >> 16) & 0xff;
9778 		switch (density) {
9779 		case 0x16: size = 1 << 25; break; /*  32MB */
9780 		case 0x17: size = 1 << 26; break; /*  64MB */
9781 		}
9782 		break;
9783 	}
9784 
9785 	case 0xc2: { /* Macronix */
9786 		/*
9787 		 * This Density -> Size decoding table is taken from Macronix
9788 		 * Data Sheets.
9789 		 */
9790 		density = (flashid >> 16) & 0xff;
9791 		switch (density) {
9792 		case 0x17: size = 1 << 23; break; /*   8MB */
9793 		case 0x18: size = 1 << 24; break; /*  16MB */
9794 		}
9795 		break;
9796 	}
9797 
9798 	case 0xef: { /* Winbond */
9799 		/*
9800 		 * This Density -> Size decoding table is taken from Winbond
9801 		 * Data Sheets.
9802 		 */
9803 		density = (flashid >> 16) & 0xff;
9804 		switch (density) {
9805 		case 0x17: size = 1 << 23; break; /*   8MB */
9806 		case 0x18: size = 1 << 24; break; /*  16MB */
9807 		}
9808 		break;
9809 	}
9810 	}
9811 
9812 	/*
9813 	 * If we didn't recognize the FLASH part, that's no real issue: the
9814 	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9815 	 * use a FLASH part which is at least 4MB in size and has 64KB
9816 	 * sectors.  The unrecognized FLASH part is likely to be much larger
9817 	 * than 4MB, but that's all we really need.
9818 	 */
9819 	if (size == 0) {
9820 		CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
9821 		size = 1 << 22;
9822 	}
9823 
9824 	/*
9825 	 * Store decoded Flash size and fall through into vetting code.
9826 	 */
9827 	adapter->params.sf_size = size;
9828 	adapter->params.sf_nsec = size / SF_SEC_SIZE;
9829 
9830  found:
9831 	/*
9832 	 * We should ~probably~ reject adapters with FLASHes which are too
9833 	 * small but we have some legacy FPGAs with small FLASHes that we'd
9834 	 * still like to use.  So instead we emit a scary message ...
9835 	 */
9836 	if (adapter->params.sf_size < FLASH_MIN_SIZE)
9837 		CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9838 			flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
9839 
9840 	return 0;
9841 }
9842 
9843 static void set_pcie_completion_timeout(struct adapter *adapter,
9844 						  u8 range)
9845 {
9846 	u16 val;
9847 	u32 pcie_cap;
9848 
9849 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
9850 	if (pcie_cap) {
9851 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
9852 		val &= 0xfff0;
9853 		val |= range ;
9854 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
9855 	}
9856 }
9857 
9858 /**
9859  *	t4_get_chip_type - Determine chip type from device ID
9860  *	@adap: the adapter
9861  *	@ver: adapter version
9862  */
9863 enum chip_type t4_get_chip_type(struct adapter *adap, int ver)
9864 {
9865 	enum chip_type chip = 0;
9866 	u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
9867 
9868 	/* Retrieve adapter's device ID */
9869 	switch (ver) {
9870 		case CHELSIO_T4_FPGA:
9871 			chip |= CHELSIO_CHIP_FPGA;
9872 			/*FALLTHROUGH*/
9873 		case CHELSIO_T4:
9874 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
9875 			break;
9876 		case CHELSIO_T5_FPGA:
9877 			chip |= CHELSIO_CHIP_FPGA;
9878 			/*FALLTHROUGH*/
9879 		case CHELSIO_T5:
9880 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9881 			break;
9882 		case CHELSIO_T6_FPGA:
9883 			chip |= CHELSIO_CHIP_FPGA;
9884 			/*FALLTHROUGH*/
9885 		case CHELSIO_T6:
9886 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9887 			break;
9888 		default:
9889 			CH_ERR(adap, "Device %d is not supported\n",
9890 			       adap->params.pci.device_id);
9891 			return -EINVAL;
9892 	}
9893 
9894 	/* T4A1 chip is no longer supported */
9895 	if (chip == T4_A1) {
9896 		CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n");
9897 		return -EINVAL;
9898 	}
9899 	return chip;
9900 }
9901 
9902 /**
9903  *	t4_prep_pf - prepare SW and HW for PF operation
9904  *	@adapter: the adapter
9905  *
9906  *	Initialize adapter SW state for the various HW modules, set initial
9907  *	values for some adapter tunables on each PF.
9908  */
9909 int t4_prep_pf(struct adapter *adapter)
9910 {
9911 	int ret, ver;
9912 
9913 	ret = t4_wait_dev_ready(adapter);
9914 	if (ret < 0)
9915 		return ret;
9916 
9917 	get_pci_mode(adapter, &adapter->params.pci);
9918 
9919 
9920 	/* Retrieve adapter's device ID
9921 	 */
9922 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id);
9923 	t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id);
9924 
9925 	ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
9926 	adapter->params.chip = t4_get_chip_type(adapter, ver);
9927 	if (is_t4(adapter->params.chip)) {
9928 		adapter->params.arch.sge_fl_db = F_DBPRIO;
9929 		adapter->params.arch.mps_tcam_size =
9930 				 NUM_MPS_CLS_SRAM_L_INSTANCES;
9931 		adapter->params.arch.mps_rplc_size = 128;
9932 		adapter->params.arch.nchan = NCHAN;
9933 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9934 		adapter->params.arch.vfcount = 128;
9935 		/* Congestion map is for 4 channels so that
9936 		 * MPS can have 4 priority per port.
9937 		 */
9938 		adapter->params.arch.cng_ch_bits_log = 2;
9939 	} else if (is_t5(adapter->params.chip)) {
9940 		adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
9941 		adapter->params.arch.mps_tcam_size =
9942 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9943 		adapter->params.arch.mps_rplc_size = 128;
9944 		adapter->params.arch.nchan = NCHAN;
9945 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9946 		adapter->params.arch.vfcount = 128;
9947 		adapter->params.arch.cng_ch_bits_log = 2;
9948 	} else if (is_t6(adapter->params.chip)) {
9949 		adapter->params.arch.sge_fl_db = 0;
9950 		adapter->params.arch.mps_tcam_size =
9951 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9952 		adapter->params.arch.mps_rplc_size = 256;
9953 		adapter->params.arch.nchan = 2;
9954 		adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9955 		adapter->params.arch.vfcount = 256;
9956 		/* Congestion map will be for 2 channels so that
9957 		 * MPS can have 8 priority per port.
9958 		 */
9959 		adapter->params.arch.cng_ch_bits_log = 3;
9960 	} else {
9961 		CH_ERR(adapter, "Device %d is not supported\n",
9962 			adapter->params.pci.device_id);
9963 		return -EINVAL;
9964 	}
9965 
9966 	adapter->params.pci.vpd_cap_addr =
9967 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
9968 
9969 	if (is_fpga(adapter->params.chip)) {
9970 		/* FPGA */
9971 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
9972 	} else {
9973 		/* ASIC */
9974 		adapter->params.cim_la_size = CIMLA_SIZE;
9975 	}
9976 
9977 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9978 
9979 	/*
9980 	 * Default port and clock for debugging in case we can't reach FW.
9981 	 */
9982 	adapter->params.nports = 1;
9983 	adapter->params.portvec = 1;
9984 	adapter->params.vpd.cclk = 50000;
9985 
9986 	/* Set pci completion timeout value to 4 seconds. */
9987 	set_pcie_completion_timeout(adapter, 0xd);
9988 	return 0;
9989 }
9990 
9991 /**
9992  *      t4_prep_master_pf - prepare SW for master PF operations
9993  *      @adapter: the adapter
9994  *
9995  */
9996 int t4_prep_master_pf(struct adapter *adapter)
9997 {
9998 	int ret;
9999 
10000 	ret = t4_prep_pf(adapter);
10001 	if (ret < 0)
10002 		return ret;
10003 
10004 	ret = t4_get_flash_params(adapter);
10005 	if (ret < 0) {
10006 		CH_ERR(adapter,
10007 		       "Unable to retrieve Flash parameters ret = %d\n", -ret);
10008 		return ret;
10009 	}
10010 
10011 	return 0;
10012 }
10013 
10014 /**
10015  *      t4_prep_adapter - prepare SW and HW for operation
10016  *      @adapter: the adapter
10017  *      @reset: if true perform a HW reset
10018  *
10019  *      Initialize adapter SW state for the various HW modules, set initial
10020  *      values for some adapter tunables.
10021  */
10022 int t4_prep_adapter(struct adapter *adapter, bool reset)
10023 {
10024 	return t4_prep_master_pf(adapter);
10025 }
10026 
10027 /**
10028  *	t4_shutdown_adapter - shut down adapter, host & wire
10029  *	@adapter: the adapter
10030  *
10031  *	Perform an emergency shutdown of the adapter and stop it from
10032  *	continuing any further communication on the ports or DMA to the
10033  *	host.  This is typically used when the adapter and/or firmware
10034  *	have crashed and we want to prevent any further accidental
10035  *	communication with the rest of the world.  This will also force
10036  *	the port Link Status to go down -- if register writes work --
10037  *	which should help our peers figure out that we're down.
10038  */
10039 int t4_shutdown_adapter(struct adapter *adapter)
10040 {
10041 	int port;
10042 
10043 	t4_intr_disable(adapter);
10044 	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
10045 	for_each_port(adapter, port) {
10046 		u32 a_port_cfg = is_t4(adapter->params.chip) ?
10047 				 PORT_REG(port, A_XGMAC_PORT_CFG) :
10048 				 T5_PORT_REG(port, A_MAC_PORT_CFG);
10049 
10050 		t4_write_reg(adapter, a_port_cfg,
10051 			     t4_read_reg(adapter, a_port_cfg)
10052 			     & ~V_SIGNAL_DET(1));
10053 	}
10054 	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
10055 
10056 	return 0;
10057 }
10058 
10059 /**
10060  *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
10061  *	@adapter: the adapter
10062  *	@qid: the Queue ID
10063  *	@qtype: the Ingress or Egress type for @qid
10064  *	@user: true if this request is for a user mode queue
10065  *	@pbar2_qoffset: BAR2 Queue Offset
10066  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
10067  *
10068  *	Returns the BAR2 SGE Queue Registers information associated with the
10069  *	indicated Absolute Queue ID.  These are passed back in return value
10070  *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
10071  *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
10072  *
10073  *	This may return an error which indicates that BAR2 SGE Queue
10074  *	registers aren't available.  If an error is not returned, then the
10075  *	following values are returned:
10076  *
10077  *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
10078  *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
10079  *
10080  *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
10081  *	require the "Inferred Queue ID" ability may be used.  E.g. the
10082  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
10083  *	then these "Inferred Queue ID" register may not be used.
10084  */
10085 int t4_bar2_sge_qregs(struct adapter *adapter,
10086 		      unsigned int qid,
10087 		      enum t4_bar2_qtype qtype,
10088 		      int user,
10089 		      u64 *pbar2_qoffset,
10090 		      unsigned int *pbar2_qid)
10091 {
10092 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
10093 	u64 bar2_page_offset, bar2_qoffset;
10094 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
10095 
10096 	/* T4 doesn't support BAR2 SGE Queue registers for kernel
10097 	 * mode queues.
10098 	 */
10099 	if (!user && is_t4(adapter->params.chip))
10100 		return -EINVAL;
10101 
10102 	/* Get our SGE Page Size parameters.
10103 	 */
10104 	page_shift = adapter->params.sge.hps + 10;
10105 	page_size = 1 << page_shift;
10106 
10107 	/* Get the right Queues per Page parameters for our Queue.
10108 	 */
10109 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
10110 		     ? adapter->params.sge.eq_qpp
10111 		     : adapter->params.sge.iq_qpp);
10112 	qpp_mask = (1 << qpp_shift) - 1;
10113 
10114 	/* Calculate the basics of the BAR2 SGE Queue register area:
10115 	 *  o The BAR2 page the Queue registers will be in.
10116 	 *  o The BAR2 Queue ID.
10117 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
10118 	 */
10119 	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
10120 	bar2_qid = qid & qpp_mask;
10121 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
10122 
10123 	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
10124 	 * hardware will infer the Absolute Queue ID simply from the writes to
10125 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
10126 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
10127 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
10128 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
10129 	 * from the BAR2 Page and BAR2 Queue ID.
10130 	 *
10131 	 * One important censequence of this is that some BAR2 SGE registers
10132 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
10133 	 * there.  But other registers synthesize the SGE Queue ID purely
10134 	 * from the writes to the registers -- the Write Combined Doorbell
10135 	 * Buffer is a good example.  These BAR2 SGE Registers are only
10136 	 * available for those BAR2 SGE Register areas where the SGE Absolute
10137 	 * Queue ID can be inferred from simple writes.
10138 	 */
10139 	bar2_qoffset = bar2_page_offset;
10140 	bar2_qinferred = (bar2_qid_offset < page_size);
10141 	if (bar2_qinferred) {
10142 		bar2_qoffset += bar2_qid_offset;
10143 		bar2_qid = 0;
10144 	}
10145 
10146 	*pbar2_qoffset = bar2_qoffset;
10147 	*pbar2_qid = bar2_qid;
10148 	return 0;
10149 }
10150 
10151 /**
10152  *	t4_init_devlog_params - initialize adapter->params.devlog
10153  *	@adap: the adapter
10154  *	@fw_attach: whether we can talk to the firmware
10155  *
10156  *	Initialize various fields of the adapter's Firmware Device Log
10157  *	Parameters structure.
10158  */
10159 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
10160 {
10161 	struct devlog_params *dparams = &adap->params.devlog;
10162 	u32 pf_dparams;
10163 	unsigned int devlog_meminfo;
10164 	struct fw_devlog_cmd devlog_cmd;
10165 	int ret;
10166 
10167 	/* If we're dealing with newer firmware, the Device Log Paramerters
10168 	 * are stored in a designated register which allows us to access the
10169 	 * Device Log even if we can't talk to the firmware.
10170 	 */
10171 	pf_dparams =
10172 		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
10173 	if (pf_dparams) {
10174 		unsigned int nentries, nentries128;
10175 
10176 		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
10177 		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
10178 
10179 		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
10180 		nentries = (nentries128 + 1) * 128;
10181 		dparams->size = nentries * sizeof(struct fw_devlog_e);
10182 
10183 		return 0;
10184 	}
10185 
10186 	/*
10187 	 * For any failing returns ...
10188 	 */
10189 	memset(dparams, 0, sizeof *dparams);
10190 
10191 	/*
10192 	 * If we can't talk to the firmware, there's really nothing we can do
10193 	 * at this point.
10194 	 */
10195 	if (!fw_attach)
10196 		return -ENXIO;
10197 
10198 	/* Otherwise, ask the firmware for it's Device Log Parameters.
10199 	 */
10200 	memset(&devlog_cmd, 0, sizeof devlog_cmd);
10201 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
10202 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
10203 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
10204 	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
10205 			 &devlog_cmd);
10206 	if (ret)
10207 		return ret;
10208 
10209 	devlog_meminfo =
10210 		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
10211 	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
10212 	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
10213 	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
10214 
10215 	return 0;
10216 }
10217 
10218 /**
10219  *	t4_init_sge_params - initialize adap->params.sge
10220  *	@adapter: the adapter
10221  *
10222  *	Initialize various fields of the adapter's SGE Parameters structure.
10223  */
10224 int t4_init_sge_params(struct adapter *adapter)
10225 {
10226 	struct sge_params *sge_params = &adapter->params.sge;
10227 	u32 hps, qpp;
10228 	unsigned int s_hps, s_qpp;
10229 
10230 	/* Extract the SGE Page Size for our PF.
10231 	 */
10232 	hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
10233 	s_hps = (S_HOSTPAGESIZEPF0 +
10234 		 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf);
10235 	sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
10236 
10237 	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
10238 	 */
10239 	s_qpp = (S_QUEUESPERPAGEPF0 +
10240 		(S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
10241 	qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
10242 	sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
10243 	qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
10244 	sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
10245 
10246 	return 0;
10247 }
10248 
10249 /**
10250  *      t4_init_tp_params - initialize adap->params.tp
10251  *      @adap: the adapter
10252  * 	@sleep_ok: if true we may sleep while awaiting command completion
10253  *
10254  *      Initialize various fields of the adapter's TP Parameters structure.
10255  */
10256 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
10257 {
10258 	u32 param, val, v;
10259 	int chan, ret;
10260 
10261 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
10262 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
10263 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
10264 
10265 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
10266 	for (chan = 0; chan < NCHAN; chan++)
10267 		adap->params.tp.tx_modq[chan] = chan;
10268 
10269 	/* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
10270 	 * Configuration.
10271 	 */
10272 
10273 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10274 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
10275 		 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
10276 
10277 	/* Read current value */
10278 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
10279 			      &param, &val);
10280 	if (ret == 0) {
10281 		CH_INFO(adap,
10282 			 "Current filter mode/mask 0x%x:0x%x\n",
10283 			 G_FW_PARAMS_PARAM_FILTER_MODE(val),
10284 			 G_FW_PARAMS_PARAM_FILTER_MASK(val));
10285 		adap->params.tp.vlan_pri_map = G_FW_PARAMS_PARAM_FILTER_MODE(val);
10286 		adap->params.tp.filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val);
10287 	} else {
10288 		CH_WARN(adap,
10289 			 "Reading filter mode/mask not supported via fw api, "
10290 			 "falling back to older indirect-reg-read \n");
10291 
10292 		/* Incase of older-fw (which doesn't expose the api
10293 		 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
10294 		 * the fw api) combination, fall-back to older method of reading
10295 		 * the filter mode from indirect-register
10296 		 */
10297 		t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
10298 			       A_TP_VLAN_PRI_MAP, sleep_ok);
10299 
10300 		/* With the older-fw and newer-driver combination we might run
10301 		 * into an issue when user wants to use hash filter region but
10302 		 * the filter_mask is zero, in this case filter_mask validation
10303 		 * is tough. To avoid that we set the filter_mask same as filter
10304 		 * mode, which will behave exactly as the older way of ignoring
10305 		 * the filter mask validation.
10306 		 */
10307 		adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
10308 	}
10309 
10310 	t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
10311 		       A_TP_INGRESS_CONFIG, sleep_ok);
10312 
10313 	/* For T6, cache the adapter's compressed error vector
10314 	 * and passing outer header info for encapsulated packets.
10315 	 */
10316 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
10317 		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
10318 		adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
10319 	}
10320 
10321 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
10322 	 * shift positions of several elements of the Compressed Filter Tuple
10323 	 * for this adapter which we need frequently ...
10324 	 */
10325 	adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
10326 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
10327 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
10328 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
10329 	adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
10330 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
10331 	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
10332 								F_ETHERTYPE);
10333 	adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
10334 								F_MACMATCH);
10335 	adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
10336 								F_MPSHITTYPE);
10337 	adap->params.tp.frag_shift = t4_filter_field_shift(adap,
10338 							   F_FRAGMENTATION);
10339 	return 0;
10340 }
10341 
10342 /**
10343  *      t4_filter_field_shift - calculate filter field shift
10344  *      @adap: the adapter
10345  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
10346  *
10347  *      Return the shift position of a filter field within the Compressed
10348  *      Filter Tuple.  The filter field is specified via its selection bit
10349  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
10350  */
10351 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
10352 {
10353 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
10354 	unsigned int sel;
10355 	int field_shift;
10356 
10357 	if ((filter_mode & filter_sel) == 0)
10358 		return -1;
10359 
10360 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
10361 		switch (filter_mode & sel) {
10362 		case F_FCOE:
10363 			field_shift += W_FT_FCOE;
10364 			break;
10365 		case F_PORT:
10366 			field_shift += W_FT_PORT;
10367 			break;
10368 		case F_VNIC_ID:
10369 			field_shift += W_FT_VNIC_ID;
10370 			break;
10371 		case F_VLAN:
10372 			field_shift += W_FT_VLAN;
10373 			break;
10374 		case F_TOS:
10375 			field_shift += W_FT_TOS;
10376 			break;
10377 		case F_PROTOCOL:
10378 			field_shift += W_FT_PROTOCOL;
10379 			break;
10380 		case F_ETHERTYPE:
10381 			field_shift += W_FT_ETHERTYPE;
10382 			break;
10383 		case F_MACMATCH:
10384 			field_shift += W_FT_MACMATCH;
10385 			break;
10386 		case F_MPSHITTYPE:
10387 			field_shift += W_FT_MPSHITTYPE;
10388 			break;
10389 		case F_FRAGMENTATION:
10390 			field_shift += W_FT_FRAGMENTATION;
10391 			break;
10392 		}
10393 	}
10394 	return field_shift;
10395 }
10396 
10397 /**
10398  *	t4_create_filter_info - return Compressed Filter Value/Mask tuple
10399  *	@adapter: the adapter
10400  *	@filter_value: Filter Value return value pointer
10401  *	@filter_mask: Filter Mask return value pointer
10402  *	@fcoe: FCoE filter selection
10403  *	@port: physical port filter selection
10404  *	@vnic: Virtual NIC ID filter selection
10405  *	@vlan: VLAN ID filter selection
10406  *	@vlan_pcp: VLAN Priority Code Point
10407  *	@vlan_dei: VLAN Drop Eligibility Indicator
10408  *	@tos: Type Of Server filter selection
10409  *	@protocol: IP Protocol filter selection
10410  *	@ethertype: Ethernet Type filter selection
10411  *	@macmatch: MPS MAC Index filter selection
10412  *	@matchtype: MPS Hit Type filter selection
10413  *	@frag: IP Fragmentation filter selection
10414  *
10415  *	Construct a Compressed Filter Value/Mask tuple based on a set of
10416  *	"filter selection" values.  For each passed filter selection value
10417  *	which is greater than or equal to 0, we put that value into the
10418  *	constructed Filter Value and the appropriate mask into the Filter
10419  *	Mask.  If a filter selections is specified which is not currently
10420  *	configured into the hardware, an error will be returned.  Otherwise
10421  *	the constructed FIlter Value/Mask tuple will be returned via the
10422  *	specified return value pointers and success will be returned.
10423  *
10424  *	All filter selection values and the returned Filter Value/Mask values
10425  *	are in Host-Endian format.
10426  */
10427 int t4_create_filter_info(const struct adapter *adapter,
10428 			  u64 *filter_value, u64 *filter_mask,
10429 			  int fcoe, int port, int vnic,
10430 			  int vlan, int vlan_pcp, int vlan_dei,
10431 			  int tos, int protocol, int ethertype,
10432 			  int macmatch, int matchtype, int frag)
10433 {
10434 	const struct tp_params *tp = &adapter->params.tp;
10435 	u64 v, m;
10436 
10437 	/*
10438 	 * If any selected filter field isn't enabled, return an error.
10439 	 */
10440 	#define BAD_FILTER(__field) \
10441 		((__field) >= 0 && tp->__field##_shift < 0)
10442 	if (BAD_FILTER(fcoe)       ||
10443 	    BAD_FILTER(port)       ||
10444 	    BAD_FILTER(vnic)       ||
10445 	    BAD_FILTER(vlan)       ||
10446 	    BAD_FILTER(tos)        ||
10447 	    BAD_FILTER(protocol)   ||
10448 	    BAD_FILTER(ethertype)  ||
10449 	    BAD_FILTER(macmatch)   ||
10450 	    BAD_FILTER(matchtype) ||
10451 	    BAD_FILTER(frag))
10452 		return -EINVAL;
10453 	#undef BAD_FILTER
10454 
10455 	/*
10456 	 * We have to have VLAN ID selected if we want to also select on
10457 	 * either the Priority Code Point or Drop Eligibility Indicator
10458 	 * fields.
10459 	 */
10460 	if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0)
10461 		return -EINVAL;
10462 
10463 	/*
10464 	 * Construct Filter Value and Mask.
10465 	 */
10466 	v = m = 0;
10467 	#define SET_FILTER_FIELD(__field, __width) \
10468 	do { \
10469 		if ((__field) >= 0) { \
10470 			const int shift = tp->__field##_shift; \
10471 			\
10472 			v |= (__field) << shift; \
10473 			m |= ((1ULL << (__width)) - 1) << shift; \
10474 		} \
10475 	} while (0)
10476 	SET_FILTER_FIELD(fcoe,      W_FT_FCOE);
10477 	SET_FILTER_FIELD(port,      W_FT_PORT);
10478 	SET_FILTER_FIELD(tos,       W_FT_TOS);
10479 	SET_FILTER_FIELD(protocol,  W_FT_PROTOCOL);
10480 	SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE);
10481 	SET_FILTER_FIELD(macmatch,  W_FT_MACMATCH);
10482 	SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE);
10483 	SET_FILTER_FIELD(frag,      W_FT_FRAGMENTATION);
10484 	#undef SET_FILTER_FIELD
10485 
10486 	/*
10487 	 * We handle VNIC ID and VLANs separately because they're slightly
10488 	 * different than the rest of the fields.  Both require that a
10489 	 * corresponding "valid" bit be set in the Filter Value and Mask.
10490 	 * These bits are in the top bit of the field.  Additionally, we can
10491 	 * select the Priority Code Point and Drop Eligibility Indicator
10492 	 * fields for VLANs as an option.  Remember that the format of a VLAN
10493 	 * Tag is:
10494 	 *
10495 	 * bits: 3  1      12
10496 	 *     +---+-+------------+
10497 	 *     |PCP|D|   VLAN ID  |
10498 	 *     +---+-+------------+
10499 	 */
10500 	if (vnic >= 0) {
10501 		v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift;
10502 		m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift;
10503 	}
10504 	if (vlan >= 0) {
10505 		v |= ((1ULL << (W_FT_VLAN-1)) | vlan)  << tp->vlan_shift;
10506 		m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift;
10507 
10508 		if (vlan_dei >= 0) {
10509 			v |= vlan_dei << (tp->vlan_shift + 12);
10510 			m |= 0x7      << (tp->vlan_shift + 12);
10511 		}
10512 		if (vlan_pcp >= 0) {
10513 			v |= vlan_pcp << (tp->vlan_shift + 13);
10514 			m |= 0x7      << (tp->vlan_shift + 13);
10515 		}
10516 	}
10517 
10518 	/*
10519 	 * Pass back computed Filter Value and Mask; return success.
10520 	 */
10521 	*filter_value = v;
10522 	*filter_mask = m;
10523 	return 0;
10524 }
10525 
10526 int t4_init_rss_mode(struct adapter *adap, int mbox)
10527 {
10528 	int i, ret;
10529 	struct fw_rss_vi_config_cmd rvc;
10530 
10531 	memset(&rvc, 0, sizeof(rvc));
10532 
10533 	for_each_port(adap, i) {
10534 		struct port_info *p = adap2pinfo(adap, i);
10535 		rvc.op_to_viid =
10536 			cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
10537 				    F_FW_CMD_REQUEST | F_FW_CMD_READ |
10538 				    V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
10539 		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
10540 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
10541 		if (ret)
10542 			return ret;
10543 		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
10544 	}
10545 	return 0;
10546 }
10547 
10548 static int t4_init_portmirror(struct port_info *pi, int mbox,
10549 		       int port, int pf, int vf)
10550 {
10551 	struct adapter *adapter = pi->adapter;
10552 	int ret;
10553 	u8 vivld = 0, vin = 0;
10554 
10555 	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
10556 			  &vivld, &vin);
10557 	if (ret < 0)
10558 		return ret;
10559 
10560 	pi->viid_mirror = ret;
10561 
10562 	/* If fw supports returning the VIN as part of FW_VI_CMD,
10563 	 * save the returned values.
10564 	 */
10565 	if (adapter->params.viid_smt_extn_support) {
10566 		pi->vivld_mirror = vivld;
10567 		pi->vin_mirror = vin;
10568 	} else {
10569 		/* Retrieve the values from VIID */
10570 		pi->vivld_mirror = G_FW_VIID_VIVLD(pi->viid_mirror);
10571 		pi->vin_mirror = G_FW_VIID_VIN(pi->viid_mirror);
10572 	}
10573 
10574 	CH_INFO(pi->adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n",
10575 		port, pf, pi->vin_mirror);
10576 	return 0;
10577 }
10578 
10579 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf,
10580 		   bool enable_ringbb)
10581 {
10582 	int ret, i, j = 0;
10583 
10584 	for_each_port(adap, i) {
10585 		struct port_info *pi = adap2pinfo(adap, i);
10586 
10587 		/* We want mirroring only on Port0 for ringbackbone
10588 		 * configuration.
10589 		 */
10590 		if (enable_ringbb && i)
10591 			break;
10592 		while ((adap->params.portvec & (1 << j)) == 0)
10593 			j++;
10594 
10595 		ret = t4_init_portmirror(pi, mbox, j, pf, vf);
10596 		if (ret)
10597 			return ret;
10598 		j++;
10599 	}
10600 	return 0;
10601 }
10602 
10603 /**
10604  *	t4_init_portinfo_viid - allocate a virtual interface and initialize
10605  *	port_info
10606  *	@pi: the port_info
10607  *	@mbox: mailbox to use for the FW command
10608  *	@port: physical port associated with the VI
10609  *	@pf: the PF owning the VI
10610  *	@vf: the VF owning the VI
10611  *	@mac: the MAC address of the VI
10612  *	@alloc_vi: Indicator to alloc VI
10613  *
10614  *	Allocates a virtual interface for the given physical port.  If @mac is
10615  *	not %NULL it contains the MAC address of the VI as assigned by FW.
10616  *	@mac should be large enough to hold an Ethernet address.
10617  *	Returns < 0 on error.
10618  */
10619 int t4_init_portinfo_viid(struct port_info *pi, int mbox,
10620 		     int port, int pf, int vf, u8 mac[], bool alloc_vi)
10621 {
10622 	struct adapter *adapter = pi->adapter;
10623 	unsigned int fw_caps = adapter->params.fw_caps_support;
10624 	struct fw_port_cmd cmd;
10625 	unsigned int rss_size;
10626 	enum fw_port_type port_type;
10627 	int mdio_addr;
10628 	fw_port_cap32_t pcaps, acaps;
10629 	int ret;
10630 
10631 	/*
10632 	 * If we haven't yet determined whether we're talking to Firmware
10633 	 * which knows the new 32-bit Port Capabilities, it's time to find
10634 	 * out now.  This will also tell new Firmware to send us Port Status
10635 	 * Updates using the new 32-bit Port Capabilities version of the
10636 	 * Port Information message.
10637 	 */
10638 	if (fw_caps == FW_CAPS_UNKNOWN) {
10639 		u32 param, val;
10640 
10641 		param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
10642 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
10643 		val = 1;
10644 		ret = t4_set_params(adapter, mbox, pf, vf, 1, &param, &val);
10645 		fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
10646 		adapter->params.fw_caps_support = fw_caps;
10647 	}
10648 
10649 	memset(&cmd, 0, sizeof(cmd));
10650 	cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
10651 				       F_FW_CMD_REQUEST | F_FW_CMD_READ |
10652 				       V_FW_PORT_CMD_PORTID(port));
10653 	cmd.action_to_len16 = cpu_to_be32(
10654 		V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16
10655 				     ? FW_PORT_ACTION_GET_PORT_INFO
10656 				     : FW_PORT_ACTION_GET_PORT_INFO32) |
10657 		FW_LEN16(cmd));
10658 	ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
10659 	if (ret)
10660 		return ret;
10661 
10662 	/*
10663 	 * Extract the various fields from the Port Information message.
10664 	 */
10665 	if (fw_caps == FW_CAPS16) {
10666 		u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
10667 
10668 		port_type = G_FW_PORT_CMD_PTYPE(lstatus);
10669 		mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP)
10670 			     ? G_FW_PORT_CMD_MDIOADDR(lstatus)
10671 			     : -1);
10672 		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
10673 		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
10674 	} else {
10675 		u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
10676 
10677 		port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
10678 		mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32)
10679 			     ? G_FW_PORT_CMD_MDIOADDR32(lstatus32)
10680 			     : -1);
10681 		pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
10682 		acaps = be32_to_cpu(cmd.u.info32.acaps32);
10683 	}
10684 
10685 	if (alloc_vi) {
10686 		u8 vivld = 0, vin = 0;
10687 
10688 		ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac,
10689 				  &rss_size, &vivld, &vin);
10690 		if (ret < 0)
10691 			return ret;
10692 
10693 		pi->viid = ret;
10694 		pi->rss_size = rss_size;
10695 
10696 		/* If fw supports returning the VIN as part of FW_VI_CMD,
10697 		 * save the returned values.
10698 		 */
10699 		if (adapter->params.viid_smt_extn_support) {
10700 			pi->vivld = vivld;
10701 			pi->vin = vin;
10702 		} else {
10703 			/* Retrieve the values from VIID */
10704 			pi->vivld = G_FW_VIID_VIVLD(pi->viid);
10705 			pi->vin = G_FW_VIID_VIN(pi->viid);
10706 		}
10707 	}
10708 
10709 	pi->tx_chan = port;
10710 	pi->lport = port;
10711 	pi->rx_chan = port;
10712 	pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
10713 
10714 	pi->port_type = port_type;
10715 	pi->mdio_addr = mdio_addr;
10716 	pi->mod_type = FW_PORT_MOD_TYPE_NA;
10717 
10718 	init_link_config(&pi->link_cfg, pcaps, acaps);
10719 	return 0;
10720 }
10721 
10722 /**
10723  *	t4_init_portinfo - allocate a virtual interface and initialize port_info
10724  *	@pi: the port_info
10725  *	@mbox: mailbox to use for the FW command
10726  *	@port: physical port associated with the VI
10727  *	@pf: the PF owning the VI
10728  *	@vf: the VF owning the VI
10729  *	@mac: the MAC address of the VI
10730  *
10731  *	Allocates a virtual interface for the given physical port.  If @mac is
10732  *	not %NULL it contains the MAC address of the VI as assigned by FW.
10733  *	@mac should be large enough to hold an Ethernet address.
10734  *	Returns < 0 on error.
10735  */
10736 int t4_init_portinfo(struct port_info *pi, int mbox,
10737 		     int port, int pf, int vf, u8 mac[])
10738 {
10739 	return t4_init_portinfo_viid(pi, mbox, port, pf, vf, mac, true);
10740 }
10741 
10742 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
10743 {
10744 	u8 addr[6];
10745 	int ret, i, j = 0;
10746 
10747 	for_each_port(adap, i) {
10748 		struct port_info *pi = adap2pinfo(adap, i);
10749 
10750 		while ((adap->params.portvec & (1 << j)) == 0)
10751 			j++;
10752 
10753 		ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
10754 		if (ret)
10755 			return ret;
10756 
10757 		t4_os_set_hw_addr(adap, i, addr);
10758 		j++;
10759 	}
10760 	return 0;
10761 }
10762 
10763 /**
10764  *	t4_read_cimq_cfg - read CIM queue configuration
10765  *	@adap: the adapter
10766  *	@base: holds the queue base addresses in bytes
10767  *	@size: holds the queue sizes in bytes
10768  *	@thres: holds the queue full thresholds in bytes
10769  *
10770  *	Returns the current configuration of the CIM queues, starting with
10771  *	the IBQs, then the OBQs.
10772  */
10773 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
10774 {
10775 	unsigned int i, v;
10776 	int cim_num_obq = is_t4(adap->params.chip) ?
10777 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
10778 
10779 	for (i = 0; i < CIM_NUM_IBQ; i++) {
10780 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
10781 			     V_QUENUMSELECT(i));
10782 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10783 		/* value is in 256-byte units */
10784 		*base++ = G_CIMQBASE(v) * 256;
10785 		*size++ = G_CIMQSIZE(v) * 256;
10786 		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
10787 	}
10788 	for (i = 0; i < cim_num_obq; i++) {
10789 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
10790 			     V_QUENUMSELECT(i));
10791 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10792 		/* value is in 256-byte units */
10793 		*base++ = G_CIMQBASE(v) * 256;
10794 		*size++ = G_CIMQSIZE(v) * 256;
10795 	}
10796 }
10797 
10798 /**
10799  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
10800  *	@adap: the adapter
10801  *	@qid: the queue index
10802  *	@data: where to store the queue contents
10803  *	@n: capacity of @data in 32-bit words
10804  *
10805  *	Reads the contents of the selected CIM queue starting at address 0 up
10806  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
10807  *	error and the number of 32-bit words actually read on success.
10808  */
10809 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10810 {
10811 	int i, err, attempts;
10812 	unsigned int addr;
10813 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
10814 
10815 	if (qid > 5 || (n & 3))
10816 		return -EINVAL;
10817 
10818 	addr = qid * nwords;
10819 	if (n > nwords)
10820 		n = nwords;
10821 
10822 	/* It might take 3-10ms before the IBQ debug read access is allowed.
10823 	 * Wait for 1 Sec with a delay of 1 usec.
10824 	 */
10825 	attempts = 1000000;
10826 
10827 	for (i = 0; i < n; i++, addr++) {
10828 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
10829 			     F_IBQDBGEN);
10830 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
10831 				      attempts, 1);
10832 		if (err)
10833 			return err;
10834 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
10835 	}
10836 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
10837 	return i;
10838 }
10839 
10840 /**
10841  *	t4_read_cim_obq - read the contents of a CIM outbound queue
10842  *	@adap: the adapter
10843  *	@qid: the queue index
10844  *	@data: where to store the queue contents
10845  *	@n: capacity of @data in 32-bit words
10846  *
10847  *	Reads the contents of the selected CIM queue starting at address 0 up
10848  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
10849  *	error and the number of 32-bit words actually read on success.
10850  */
10851 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
10852 {
10853 	int i, err;
10854 	unsigned int addr, v, nwords;
10855 	int cim_num_obq = is_t4(adap->params.chip) ?
10856 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
10857 
10858 	if ((qid > (cim_num_obq - 1)) || (n & 3))
10859 		return -EINVAL;
10860 
10861 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
10862 		     V_QUENUMSELECT(qid));
10863 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
10864 
10865 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
10866 	nwords = G_CIMQSIZE(v) * 64;  /* same */
10867 	if (n > nwords)
10868 		n = nwords;
10869 
10870 	for (i = 0; i < n; i++, addr++) {
10871 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
10872 			     F_OBQDBGEN);
10873 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
10874 				      2, 1);
10875 		if (err)
10876 			return err;
10877 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
10878 	}
10879 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
10880 	return i;
10881 }
10882 
10883 /**
10884  *	t4_cim_read - read a block from CIM internal address space
10885  *	@adap: the adapter
10886  *	@addr: the start address within the CIM address space
10887  *	@n: number of words to read
10888  *	@valp: where to store the result
10889  *
10890  *	Reads a block of 4-byte words from the CIM intenal address space.
10891  */
10892 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
10893 		unsigned int *valp)
10894 {
10895 	int ret = 0;
10896 
10897 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
10898 		return -EBUSY;
10899 
10900 	for ( ; !ret && n--; addr += 4) {
10901 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
10902 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
10903 				      0, 5, 2);
10904 		if (!ret)
10905 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
10906 	}
10907 	return ret;
10908 }
10909 
10910 /**
10911  *	t4_cim_write - write a block into CIM internal address space
10912  *	@adap: the adapter
10913  *	@addr: the start address within the CIM address space
10914  *	@n: number of words to write
10915  *	@valp: set of values to write
10916  *
10917  *	Writes a block of 4-byte words into the CIM intenal address space.
10918  */
10919 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
10920 		 const unsigned int *valp)
10921 {
10922 	int ret = 0;
10923 
10924 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
10925 		return -EBUSY;
10926 
10927 	for ( ; !ret && n--; addr += 4) {
10928 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
10929 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
10930 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
10931 				      0, 5, 2);
10932 	}
10933 	return ret;
10934 }
10935 
10936 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
10937 			 unsigned int val)
10938 {
10939 	return t4_cim_write(adap, addr, 1, &val);
10940 }
10941 
10942 /**
10943  *	t4_cim_read_la - read CIM LA capture buffer
10944  *	@adap: the adapter
10945  *	@la_buf: where to store the LA data
10946  *	@wrptr: the HW write pointer within the capture buffer
10947  *
10948  *	Reads the contents of the CIM LA buffer with the most recent entry at
10949  *	the end	of the returned data and with the entry at @wrptr first.
10950  *	We try to leave the LA in the running state we find it in.
10951  */
10952 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
10953 {
10954 	int i, ret;
10955 	unsigned int cfg, val, idx;
10956 
10957 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
10958 	if (ret)
10959 		return ret;
10960 
10961 	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
10962 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
10963 		if (ret)
10964 			return ret;
10965 	}
10966 
10967 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
10968 	if (ret)
10969 		goto restart;
10970 
10971 	idx = G_UPDBGLAWRPTR(val);
10972 	if (wrptr)
10973 		*wrptr = idx;
10974 
10975 	for (i = 0; i < adap->params.cim_la_size; i++) {
10976 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
10977 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
10978 		if (ret)
10979 			break;
10980 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
10981 		if (ret)
10982 			break;
10983 		if (val & F_UPDBGLARDEN) {
10984 			ret = -ETIMEDOUT;
10985 			break;
10986 		}
10987 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
10988 		if (ret)
10989 			break;
10990 
10991 		/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
10992 		 * identify the 32-bit portion of the full 312-bit data
10993 		 */
10994 		if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
10995 			idx = (idx & 0xff0) + 0x10;
10996 		else
10997 			idx++;
10998 		/* address can't exceed 0xfff */
10999 		idx &= M_UPDBGLARDPTR;
11000 	}
11001 restart:
11002 	if (cfg & F_UPDBGLAEN) {
11003 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
11004 				      cfg & ~F_UPDBGLARDEN);
11005 		if (!ret)
11006 			ret = r;
11007 	}
11008 	return ret;
11009 }
11010 
11011 /**
11012  *	t4_tp_read_la - read TP LA capture buffer
11013  *	@adap: the adapter
11014  *	@la_buf: where to store the LA data
11015  *	@wrptr: the HW write pointer within the capture buffer
11016  *
11017  *	Reads the contents of the TP LA buffer with the most recent entry at
11018  *	the end	of the returned data and with the entry at @wrptr first.
11019  *	We leave the LA in the running state we find it in.
11020  */
11021 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
11022 {
11023 	bool last_incomplete;
11024 	unsigned int i, cfg, val, idx;
11025 
11026 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
11027 	if (cfg & F_DBGLAENABLE)			/* freeze LA */
11028 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11029 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
11030 
11031 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
11032 	idx = G_DBGLAWPTR(val);
11033 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
11034 	if (last_incomplete)
11035 		idx = (idx + 1) & M_DBGLARPTR;
11036 	if (wrptr)
11037 		*wrptr = idx;
11038 
11039 	val &= 0xffff;
11040 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
11041 	val |= adap->params.tp.la_mask;
11042 
11043 	for (i = 0; i < TPLA_SIZE; i++) {
11044 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
11045 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
11046 		idx = (idx + 1) & M_DBGLARPTR;
11047 	}
11048 
11049 	/* Wipe out last entry if it isn't valid */
11050 	if (last_incomplete)
11051 		la_buf[TPLA_SIZE - 1] = ~0ULL;
11052 
11053 	if (cfg & F_DBGLAENABLE)		/* restore running state */
11054 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
11055 			     cfg | adap->params.tp.la_mask);
11056 }
11057 
11058 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
11059  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
11060  * state for more than the Warning Threshold then we'll issue a warning about
11061  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
11062  * appears to be hung every Warning Repeat second till the situation clears.
11063  * If the situation clears, we'll note that as well.
11064  */
11065 #define SGE_IDMA_WARN_THRESH 1
11066 #define SGE_IDMA_WARN_REPEAT 300
11067 
11068 /**
11069  *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
11070  *	@adapter: the adapter
11071  *	@idma: the adapter IDMA Monitor state
11072  *
11073  *	Initialize the state of an SGE Ingress DMA Monitor.
11074  */
11075 void t4_idma_monitor_init(struct adapter *adapter,
11076 			  struct sge_idma_monitor_state *idma)
11077 {
11078 	/* Initialize the state variables for detecting an SGE Ingress DMA
11079 	 * hang.  The SGE has internal counters which count up on each clock
11080 	 * tick whenever the SGE finds its Ingress DMA State Engines in the
11081 	 * same state they were on the previous clock tick.  The clock used is
11082 	 * the Core Clock so we have a limit on the maximum "time" they can
11083 	 * record; typically a very small number of seconds.  For instance,
11084 	 * with a 600MHz Core Clock, we can only count up to a bit more than
11085 	 * 7s.  So we'll synthesize a larger counter in order to not run the
11086 	 * risk of having the "timers" overflow and give us the flexibility to
11087 	 * maintain a Hung SGE State Machine of our own which operates across
11088 	 * a longer time frame.
11089 	 */
11090 	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
11091 	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
11092 }
11093 
11094 /**
11095  *	t4_idma_monitor - monitor SGE Ingress DMA state
11096  *	@adapter: the adapter
11097  *	@idma: the adapter IDMA Monitor state
11098  *	@hz: number of ticks/second
11099  *	@ticks: number of ticks since the last IDMA Monitor call
11100  */
11101 void t4_idma_monitor(struct adapter *adapter,
11102 		     struct sge_idma_monitor_state *idma,
11103 		     int hz, int ticks)
11104 {
11105 	int i, idma_same_state_cnt[2];
11106 
11107 	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
11108 	  * are counters inside the SGE which count up on each clock when the
11109 	  * SGE finds its Ingress DMA State Engines in the same states they
11110 	  * were in the previous clock.  The counters will peg out at
11111 	  * 0xffffffff without wrapping around so once they pass the 1s
11112 	  * threshold they'll stay above that till the IDMA state changes.
11113 	  */
11114 	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
11115 	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
11116 	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11117 
11118 	for (i = 0; i < 2; i++) {
11119 		u32 debug0, debug11;
11120 
11121 		/* If the Ingress DMA Same State Counter ("timer") is less
11122 		 * than 1s, then we can reset our synthesized Stall Timer and
11123 		 * continue.  If we have previously emitted warnings about a
11124 		 * potential stalled Ingress Queue, issue a note indicating
11125 		 * that the Ingress Queue has resumed forward progress.
11126 		 */
11127 		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
11128 			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
11129 				CH_WARN(adapter, "SGE idma%d, queue %u, "
11130 					"resumed after %d seconds\n",
11131 					i, idma->idma_qid[i],
11132 					idma->idma_stalled[i]/hz);
11133 			idma->idma_stalled[i] = 0;
11134 			continue;
11135 		}
11136 
11137 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
11138 		 * domain.  The first time we get here it'll be because we
11139 		 * passed the 1s Threshold; each additional time it'll be
11140 		 * because the RX Timer Callback is being fired on its regular
11141 		 * schedule.
11142 		 *
11143 		 * If the stall is below our Potential Hung Ingress Queue
11144 		 * Warning Threshold, continue.
11145 		 */
11146 		if (idma->idma_stalled[i] == 0) {
11147 			idma->idma_stalled[i] = hz;
11148 			idma->idma_warn[i] = 0;
11149 		} else {
11150 			idma->idma_stalled[i] += ticks;
11151 			idma->idma_warn[i] -= ticks;
11152 		}
11153 
11154 		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
11155 			continue;
11156 
11157 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
11158 		 */
11159 		if (idma->idma_warn[i] > 0)
11160 			continue;
11161 		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
11162 
11163 		/* Read and save the SGE IDMA State and Queue ID information.
11164 		 * We do this every time in case it changes across time ...
11165 		 * can't be too careful ...
11166 		 */
11167 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
11168 		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11169 		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
11170 
11171 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
11172 		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
11173 		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
11174 
11175 		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
11176 			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
11177 			i, idma->idma_qid[i], idma->idma_state[i],
11178 			idma->idma_stalled[i]/hz,
11179 			debug0, debug11);
11180 		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
11181 	}
11182 }
11183 
11184 /**
11185  *     t4_set_vf_mac - Set MAC address for the specified VF
11186  *     @adapter: The adapter
11187  *     @vf: one of the VFs instantiated by the specified PF
11188  *     @naddr: the number of MAC addresses
11189  *     @addr: the MAC address(es) to be set to the specified VF
11190  */
11191 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
11192 		      unsigned int naddr, u8 *addr)
11193 {
11194 	struct fw_acl_mac_cmd cmd;
11195 
11196 	memset(&cmd, 0, sizeof(cmd));
11197 	cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
11198 				    F_FW_CMD_REQUEST |
11199 				    F_FW_CMD_WRITE |
11200 				    V_FW_ACL_MAC_CMD_PFN(adapter->pf) |
11201 				    V_FW_ACL_MAC_CMD_VFN(vf));
11202 
11203 	/* Note: Do not enable the ACL */
11204 	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
11205 	cmd.nmac = naddr;
11206 
11207 	switch (adapter->pf) {
11208 	case 3:
11209 		memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
11210 		break;
11211 	case 2:
11212 		memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
11213 		break;
11214 	case 1:
11215 		memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
11216 		break;
11217 	case 0:
11218 		memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
11219 		break;
11220 	}
11221 
11222 	return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
11223 }
11224 
11225 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper
11226  * functions
11227  */
11228 
11229 /**
11230  *	t4_read_pace_tbl - read the pace table
11231  *	@adap: the adapter
11232  *	@pace_vals: holds the returned values
11233  *
11234  *	Returns the values of TP's pace table in microseconds.
11235  */
11236 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
11237 {
11238 	unsigned int i, v;
11239 
11240 	for (i = 0; i < NTX_SCHED; i++) {
11241 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
11242 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
11243 		pace_vals[i] = dack_ticks_to_usec(adap, v);
11244 	}
11245 }
11246 
11247 /**
11248  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
11249  *	@adap: the adapter
11250  *	@sched: the scheduler index
11251  *	@kbps: the byte rate in Kbps
11252  *	@ipg: the interpacket delay in tenths of nanoseconds
11253  * 	@sleep_ok: if true we may sleep while awaiting command completion
11254  *
11255  *	Return the current configuration of a HW Tx scheduler.
11256  */
11257 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
11258 		     unsigned int *ipg, bool sleep_ok)
11259 {
11260 	unsigned int v, addr, bpt, cpt;
11261 
11262 	if (kbps) {
11263 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
11264 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11265 		if (sched & 1)
11266 			v >>= 16;
11267 		bpt = (v >> 8) & 0xff;
11268 		cpt = v & 0xff;
11269 		if (!cpt)
11270 			*kbps = 0;	/* scheduler disabled */
11271 		else {
11272 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
11273 			*kbps = (v * bpt) / 125;
11274 		}
11275 	}
11276 	if (ipg) {
11277 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
11278 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
11279 		if (sched & 1)
11280 			v >>= 16;
11281 		v &= 0xffff;
11282 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
11283 	}
11284 }
11285 
11286 /**
11287  *	t4_load_cfg - download config file
11288  *	@adap: the adapter
11289  *	@cfg_data: the cfg text file to write
11290  *	@size: text file size
11291  *
11292  *	Write the supplied config text file to the card's serial flash.
11293  */
11294 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
11295 {
11296 	int ret, i, n, cfg_addr;
11297 	unsigned int addr;
11298 	unsigned int flash_cfg_start_sec;
11299 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11300 
11301 	cfg_addr = t4_flash_cfg_addr(adap);
11302 	if (cfg_addr < 0)
11303 		return cfg_addr;
11304 
11305 	addr = cfg_addr;
11306 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
11307 
11308 	if (size > FLASH_CFG_MAX_SIZE) {
11309 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
11310 		       FLASH_CFG_MAX_SIZE);
11311 		return -EFBIG;
11312 	}
11313 
11314 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
11315 			 sf_sec_size);
11316 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11317 				     flash_cfg_start_sec + i - 1);
11318 	/*
11319 	 * If size == 0 then we're simply erasing the FLASH sectors associated
11320 	 * with the on-adapter Firmware Configuration File.
11321 	 */
11322 	if (ret || size == 0)
11323 		goto out;
11324 
11325 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
11326 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
11327 		if ( (size - i) <  SF_PAGE_SIZE)
11328 			n = size - i;
11329 		else
11330 			n = SF_PAGE_SIZE;
11331 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
11332 		if (ret)
11333 			goto out;
11334 
11335 		addr += SF_PAGE_SIZE;
11336 		cfg_data += SF_PAGE_SIZE;
11337 	}
11338 
11339 out:
11340 	if (ret)
11341 		CH_ERR(adap, "config file %s failed %d\n",
11342 		       (size == 0 ? "clear" : "download"), ret);
11343 	return ret;
11344 }
11345 
11346 /**
11347  *	t5_fw_init_extern_mem - initialize the external memory
11348  *	@adap: the adapter
11349  *
11350  *	Initializes the external memory on T5.
11351  */
11352 int t5_fw_init_extern_mem(struct adapter *adap)
11353 {
11354 	u32 params[1], val[1];
11355 	int ret;
11356 
11357 	if (!is_t5(adap->params.chip))
11358 		return 0;
11359 
11360 	val[0] = 0xff; /* Initialize all MCs */
11361 	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
11362 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
11363 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
11364 			FW_CMD_MAX_TIMEOUT);
11365 
11366 	return ret;
11367 }
11368 
11369 /* BIOS boot headers */
11370 typedef struct pci_expansion_rom_header {
11371 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
11372 	u8	reserved[22]; /* Reserved per processor Architecture data */
11373 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
11374 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
11375 
11376 /* Legacy PCI Expansion ROM Header */
11377 typedef struct legacy_pci_expansion_rom_header {
11378 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
11379 	u8	size512; /* Current Image Size in units of 512 bytes */
11380 	u8	initentry_point[4];
11381 	u8	cksum; /* Checksum computed on the entire Image */
11382 	u8	reserved[16]; /* Reserved */
11383 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
11384 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
11385 
11386 /* EFI PCI Expansion ROM Header */
11387 typedef struct efi_pci_expansion_rom_header {
11388 	u8	signature[2]; // ROM signature. The value 0xaa55
11389 	u8	initialization_size[2]; /* Units 512. Includes this header */
11390 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
11391 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
11392 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
11393 	u8	compression_type[2]; /* Compression type. */
11394 		/*
11395 		 * Compression type definition
11396 		 * 0x0: uncompressed
11397 		 * 0x1: Compressed
11398 		 * 0x2-0xFFFF: Reserved
11399 		 */
11400 	u8	reserved[8]; /* Reserved */
11401 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
11402 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
11403 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
11404 
11405 /* PCI Data Structure Format */
11406 typedef struct pcir_data_structure { /* PCI Data Structure */
11407 	u8	signature[4]; /* Signature. The string "PCIR" */
11408 	u8	vendor_id[2]; /* Vendor Identification */
11409 	u8	device_id[2]; /* Device Identification */
11410 	u8	vital_product[2]; /* Pointer to Vital Product Data */
11411 	u8	length[2]; /* PCIR Data Structure Length */
11412 	u8	revision; /* PCIR Data Structure Revision */
11413 	u8	class_code[3]; /* Class Code */
11414 	u8	image_length[2]; /* Image Length. Multiple of 512B */
11415 	u8	code_revision[2]; /* Revision Level of Code/Data */
11416 	u8	code_type; /* Code Type. */
11417 		/*
11418 		 * PCI Expansion ROM Code Types
11419 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
11420 		 * 0x01: Open Firmware standard for PCI. FCODE
11421 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
11422 		 * 0x03: EFI Image. EFI
11423 		 * 0x04-0xFF: Reserved.
11424 		 */
11425 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
11426 	u8	reserved[2]; /* Reserved */
11427 } pcir_data_t; /* PCI__DATA_STRUCTURE */
11428 
11429 /* BOOT constants */
11430 enum {
11431 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
11432 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
11433 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
11434 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
11435 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
11436 	VENDOR_ID = 0x1425, /* Vendor ID */
11437 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
11438 };
11439 
11440 /*
11441  *	modify_device_id - Modifies the device ID of the Boot BIOS image
11442  *	@adatper: the device ID to write.
11443  *	@boot_data: the boot image to modify.
11444  *
11445  *	Write the supplied device ID to the boot BIOS image.
11446  */
11447 static void modify_device_id(int device_id, u8 *boot_data)
11448 {
11449 	legacy_pci_exp_rom_header_t *header;
11450 	pcir_data_t *pcir_header;
11451 	u32 cur_header = 0;
11452 
11453 	/*
11454 	 * Loop through all chained images and change the device ID's
11455 	 */
11456 	while (1) {
11457 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
11458 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
11459 			      le16_to_cpu(*(u16*)header->pcir_offset)];
11460 
11461 		/*
11462 		 * Only modify the Device ID if code type is Legacy or HP.
11463 		 * 0x00: Okay to modify
11464 		 * 0x01: FCODE. Do not be modify
11465 		 * 0x03: Okay to modify
11466 		 * 0x04-0xFF: Do not modify
11467 		 */
11468 		if (pcir_header->code_type == 0x00) {
11469 			u8 csum = 0;
11470 			int i;
11471 
11472 			/*
11473 			 * Modify Device ID to match current adatper
11474 			 */
11475 			*(u16*) pcir_header->device_id = device_id;
11476 
11477 			/*
11478 			 * Set checksum temporarily to 0.
11479 			 * We will recalculate it later.
11480 			 */
11481 			header->cksum = 0x0;
11482 
11483 			/*
11484 			 * Calculate and update checksum
11485 			 */
11486 			for (i = 0; i < (header->size512 * 512); i++)
11487 				csum += (u8)boot_data[cur_header + i];
11488 
11489 			/*
11490 			 * Invert summed value to create the checksum
11491 			 * Writing new checksum value directly to the boot data
11492 			 */
11493 			boot_data[cur_header + 7] = -csum;
11494 
11495 		} else if (pcir_header->code_type == 0x03) {
11496 
11497 			/*
11498 			 * Modify Device ID to match current adatper
11499 			 */
11500 			*(u16*) pcir_header->device_id = device_id;
11501 
11502 		}
11503 
11504 
11505 		/*
11506 		 * Check indicator element to identify if this is the last
11507 		 * image in the ROM.
11508 		 */
11509 		if (pcir_header->indicator & 0x80)
11510 			break;
11511 
11512 		/*
11513 		 * Move header pointer up to the next image in the ROM.
11514 		 */
11515 		cur_header += header->size512 * 512;
11516 	}
11517 }
11518 
11519 #ifdef CHELSIO_T4_DIAGS
11520 /*
11521  *	t4_earse_sf - Erase entire serial Flash region
11522  *	@adapter: the adapter
11523  *
11524  *	Clears the entire serial flash region.
11525  */
11526 int t4_erase_sf(struct adapter *adap)
11527 {
11528 	unsigned int nsectors;
11529 	int ret;
11530 
11531 	nsectors = FLASH_END_SEC;
11532 	if (nsectors > adap->params.sf_nsec)
11533 		nsectors = adap->params.sf_nsec;
11534 
11535 	// Erase all sectors of flash before and including the FW.
11536 	// Flash layout is in t4_hw.h.
11537 	ret = t4_flash_erase_sectors(adap, 0, nsectors - 1);
11538 	if (ret)
11539 		CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret);
11540 	return ret;
11541 }
11542 #endif
11543 
11544 /*
11545  *	t4_load_boot - download boot flash
11546  *	@adapter: the adapter
11547  *	@boot_data: the boot image to write
11548  *	@boot_addr: offset in flash to write boot_data
11549  *	@size: image size
11550  *
11551  *	Write the supplied boot image to the card's serial flash.
11552  *	The boot image has the following sections: a 28-byte header and the
11553  *	boot image.
11554  */
11555 int t4_load_boot(struct adapter *adap, u8 *boot_data,
11556 		 unsigned int boot_addr, unsigned int size)
11557 {
11558 	pci_exp_rom_header_t *header;
11559 	int pcir_offset ;
11560 	pcir_data_t *pcir_header;
11561 	int ret, addr;
11562 	uint16_t device_id;
11563 	unsigned int i;
11564 	unsigned int boot_sector = (boot_addr * 1024 );
11565 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11566 
11567 	/*
11568 	 * Make sure the boot image does not encroach on the firmware region
11569 	 */
11570 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
11571 		CH_ERR(adap, "boot image encroaching on firmware region\n");
11572 		return -EFBIG;
11573 	}
11574 
11575 	/*
11576 	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
11577 	 * and Boot configuration data sections. These 3 boot sections span
11578 	 * sectors 0 to 7 in flash and live right before the FW image location.
11579 	 */
11580 	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
11581 			sf_sec_size);
11582 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
11583 				     (boot_sector >> 16) + i - 1);
11584 
11585 	/*
11586 	 * If size == 0 then we're simply erasing the FLASH sectors associated
11587 	 * with the on-adapter option ROM file
11588 	 */
11589 	if (ret || (size == 0))
11590 		goto out;
11591 
11592 	/* Get boot header */
11593 	header = (pci_exp_rom_header_t *)boot_data;
11594 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
11595 	/* PCIR Data Structure */
11596 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
11597 
11598 	/*
11599 	 * Perform some primitive sanity testing to avoid accidentally
11600 	 * writing garbage over the boot sectors.  We ought to check for
11601 	 * more but it's not worth it for now ...
11602 	 */
11603 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
11604 		CH_ERR(adap, "boot image too small/large\n");
11605 		return -EFBIG;
11606 	}
11607 
11608 #ifndef CHELSIO_T4_DIAGS
11609 	/*
11610 	 * Check BOOT ROM header signature
11611 	 */
11612 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
11613 		CH_ERR(adap, "Boot image missing signature\n");
11614 		return -EINVAL;
11615 	}
11616 
11617 	/*
11618 	 * Check PCI header signature
11619 	 */
11620 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
11621 		CH_ERR(adap, "PCI header missing signature\n");
11622 		return -EINVAL;
11623 	}
11624 
11625 	/*
11626 	 * Check Vendor ID matches Chelsio ID
11627 	 */
11628 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
11629 		CH_ERR(adap, "Vendor ID missing signature\n");
11630 		return -EINVAL;
11631 	}
11632 #endif
11633 
11634 	/*
11635 	 * Retrieve adapter's device ID
11636 	 */
11637 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
11638 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
11639 	device_id = device_id & 0xf0ff;
11640 
11641 	/*
11642 	 * Check PCIE Device ID
11643 	 */
11644 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
11645 		/*
11646 		 * Change the device ID in the Boot BIOS image to match
11647 		 * the Device ID of the current adapter.
11648 		 */
11649 		modify_device_id(device_id, boot_data);
11650 	}
11651 
11652 	/*
11653 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
11654 	 * we finish copying the rest of the boot image. This will ensure
11655 	 * that the BIOS boot header will only be written if the boot image
11656 	 * was written in full.
11657 	 */
11658 	addr = boot_sector;
11659 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
11660 		addr += SF_PAGE_SIZE;
11661 		boot_data += SF_PAGE_SIZE;
11662 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
11663 		if (ret)
11664 			goto out;
11665 	}
11666 
11667 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
11668 			     (const u8 *)header, 0);
11669 
11670 out:
11671 	if (ret)
11672 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
11673 	return ret;
11674 }
11675 
11676 /*
11677  *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
11678  *	@adapter: the adapter
11679  *
11680  *	Return the address within the flash where the OptionROM Configuration
11681  *	is stored, or an error if the device FLASH is too small to contain
11682  *	a OptionROM Configuration.
11683  */
11684 static int t4_flash_bootcfg_addr(struct adapter *adapter)
11685 {
11686 	/*
11687 	 * If the device FLASH isn't large enough to hold a Firmware
11688 	 * Configuration File, return an error.
11689 	 */
11690 	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
11691 		return -ENOSPC;
11692 
11693 	return FLASH_BOOTCFG_START;
11694 }
11695 
11696 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
11697 {
11698 	int ret, i, n, cfg_addr;
11699 	unsigned int addr;
11700 	unsigned int flash_cfg_start_sec;
11701 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
11702 
11703 	cfg_addr = t4_flash_bootcfg_addr(adap);
11704 	if (cfg_addr < 0)
11705 		return cfg_addr;
11706 
11707 	addr = cfg_addr;
11708 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
11709 
11710 	if (size > FLASH_BOOTCFG_MAX_SIZE) {
11711 		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
11712 			FLASH_BOOTCFG_MAX_SIZE);
11713 		return -EFBIG;
11714 	}
11715 
11716 	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
11717 			 sf_sec_size);
11718 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
11719 					flash_cfg_start_sec + i - 1);
11720 
11721 	/*
11722 	 * If size == 0 then we're simply erasing the FLASH sectors associated
11723 	 * with the on-adapter OptionROM Configuration File.
11724 	 */
11725 	if (ret || size == 0)
11726 		goto out;
11727 
11728 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
11729 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
11730 		if ( (size - i) <  SF_PAGE_SIZE)
11731 			n = size - i;
11732 		else
11733 			n = SF_PAGE_SIZE;
11734 		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
11735 		if (ret)
11736 			goto out;
11737 
11738 		addr += SF_PAGE_SIZE;
11739 		cfg_data += SF_PAGE_SIZE;
11740 	}
11741 
11742 out:
11743 	if (ret)
11744 		CH_ERR(adap, "boot config data %s failed %d\n",
11745 				(size == 0 ? "clear" : "download"), ret);
11746 	return ret;
11747 }
11748 
11749 /**
11750  * t4_read_bootcfg - read the current (boot)OptionROM configuration from FLASH
11751  * @adap: the adapter
11752  * @cfg_data: where to store the read OptionROM configuration data
11753  *
11754  * Read the current OptionROM configuration from FLASH and write to the
11755  * buffer @cfg_data supplied.
11756  */
11757 int t4_read_bootcfg(struct adapter *adap, u8 *cfg_data, unsigned int size)
11758 {
11759 	u32 *ptr = (u32 *)cfg_data;
11760 	int i, n, cfg_addr;
11761 	int ret = 0;
11762 
11763 	if (size > FLASH_BOOTCFG_MAX_SIZE) {
11764 		CH_ERR(adap, "bootcfg file too big, max is %u bytes\n",
11765 			FLASH_BOOTCFG_MAX_SIZE);
11766 		return -EINVAL;
11767 	}
11768 
11769 	cfg_addr = t4_flash_bootcfg_addr(adap);
11770 	if (cfg_addr < 0)
11771 		return cfg_addr;
11772 
11773 	size = size / sizeof (u32);
11774 	for (i = 0; i < size; i += SF_PAGE_SIZE) {
11775 		if ( (size - i) <  SF_PAGE_SIZE)
11776 			n = size - i;
11777 		else
11778 			n = SF_PAGE_SIZE;
11779 
11780 		ret = t4_read_flash(adap, cfg_addr, n, ptr, 0);
11781 		if (ret)
11782 			goto out;
11783 
11784 		cfg_addr += (n*4);
11785 		ptr += n;
11786 	}
11787 
11788 out:
11789 	return ret;
11790 }
11791 
11792 /**
11793  *	t4_set_filter_mode - configure the optional components of filter tuples
11794  *	@adap: the adapter
11795  *	@mode_map: a bitmap selcting which optional filter components to enable
11796  * 	@sleep_ok: if true we may sleep while awaiting command completion
11797  *
11798  *	Sets the filter mode by selecting the optional components to enable
11799  *	in filter tuples.  Returns 0 on success and a negative error if the
11800  *	requested mode needs more bits than are available for optional
11801  *	components.
11802  */
11803 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
11804 		       bool sleep_ok)
11805 {
11806 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
11807 
11808 	int i, nbits = 0;
11809 
11810 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
11811 		if (mode_map & (1 << i))
11812 			nbits += width[i];
11813 	if (nbits > FILTER_OPT_LEN)
11814 		return -EINVAL;
11815 
11816 	t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
11817 
11818 	return 0;
11819 }
11820 
11821 /**
11822  *	t4_clr_port_stats - clear port statistics
11823  *	@adap: the adapter
11824  *	@idx: the port index
11825  *
11826  *	Clear HW statistics for the given port.
11827  */
11828 void t4_clr_port_stats(struct adapter *adap, int idx)
11829 {
11830 	unsigned int i;
11831 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
11832 	u32 port_base_addr;
11833 
11834 	if (is_t4(adap->params.chip))
11835 		port_base_addr = PORT_BASE(idx);
11836 	else
11837 		port_base_addr = T5_PORT_BASE(idx);
11838 
11839 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
11840 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
11841 		t4_write_reg(adap, port_base_addr + i, 0);
11842 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
11843 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
11844 		t4_write_reg(adap, port_base_addr + i, 0);
11845 	for (i = 0; i < 4; i++)
11846 		if (bgmap & (1 << i)) {
11847 			t4_write_reg(adap,
11848 			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
11849 			t4_write_reg(adap,
11850 			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
11851 		}
11852 }
11853 
11854 /**
11855  *	t4_i2c_io - read/write I2C data from adapter
11856  *	@adap: the adapter
11857  *	@port: Port number if per-port device; <0 if not
11858  *	@devid: per-port device ID or absolute device ID
11859  *	@offset: byte offset into device I2C space
11860  *	@len: byte length of I2C space data
11861  *	@buf: buffer in which to return I2C data for read
11862  *	      buffer which holds the I2C data for write
11863  *	@write: if true, do a write; else do a read
11864  *	Reads/Writes the I2C data from/to the indicated device and location.
11865  */
11866 int t4_i2c_io(struct adapter *adap, unsigned int mbox,
11867 	      int port, unsigned int devid,
11868 	      unsigned int offset, unsigned int len,
11869 	      u8 *buf, bool write)
11870 {
11871 	struct fw_ldst_cmd ldst_cmd, ldst_rpl;
11872 	unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
11873 	int ret = 0;
11874 
11875 	if (len > I2C_PAGE_SIZE)
11876 		return -EINVAL;
11877 
11878 	/* Dont allow reads that spans multiple pages */
11879 	if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
11880 		return -EINVAL;
11881 
11882 	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
11883 	ldst_cmd.op_to_addrspace =
11884 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
11885 			    F_FW_CMD_REQUEST |
11886 			    (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) |
11887 			    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C));
11888 	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
11889 	ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
11890 	ldst_cmd.u.i2c.did = devid;
11891 
11892 	while (len > 0) {
11893 		unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
11894 
11895 		ldst_cmd.u.i2c.boffset = offset;
11896 		ldst_cmd.u.i2c.blen = i2c_len;
11897 
11898 		if (write)
11899 			memcpy(ldst_cmd.u.i2c.data, buf, i2c_len);
11900 
11901 		ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
11902 				 write ? NULL : &ldst_rpl);
11903 		if (ret)
11904 			break;
11905 
11906 		if (!write)
11907 			memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
11908 		offset += i2c_len;
11909 		buf += i2c_len;
11910 		len -= i2c_len;
11911 	}
11912 
11913 	return ret;
11914 }
11915 
11916 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
11917 	      int port, unsigned int devid,
11918 	      unsigned int offset, unsigned int len,
11919 	      u8 *buf)
11920 {
11921 	return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false);
11922 }
11923 
11924 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
11925 	      int port, unsigned int devid,
11926 	      unsigned int offset, unsigned int len,
11927 	      u8 *buf)
11928 {
11929 	return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true);
11930 }
11931 
11932 /**
11933  * 	t4_sge_ctxt_rd - read an SGE context through FW
11934  * 	@adap: the adapter
11935  * 	@mbox: mailbox to use for the FW command
11936  * 	@cid: the context id
11937  * 	@ctype: the context type
11938  * 	@data: where to store the context data
11939  *
11940  * 	Issues a FW command through the given mailbox to read an SGE context.
11941  */
11942 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
11943 		   enum ctxt_type ctype, u32 *data)
11944 {
11945 	int ret;
11946 	struct fw_ldst_cmd c;
11947 
11948 	if (ctype == CTXT_EGRESS)
11949 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
11950 	else if (ctype == CTXT_INGRESS)
11951 		ret = FW_LDST_ADDRSPC_SGE_INGC;
11952 	else if (ctype == CTXT_FLM)
11953 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
11954 	else
11955 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
11956 
11957 	memset(&c, 0, sizeof(c));
11958 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
11959 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
11960 					V_FW_LDST_CMD_ADDRSPACE(ret));
11961 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
11962 	c.u.idctxt.physid = cpu_to_be32(cid);
11963 
11964 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
11965 	if (ret == 0) {
11966 		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
11967 		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
11968 		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
11969 		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
11970 		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
11971 		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
11972 	}
11973 	return ret;
11974 }
11975 
11976 /**
11977  * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
11978  * 	@adap: the adapter
11979  * 	@cid: the context id
11980  * 	@ctype: the context type
11981  * 	@data: where to store the context data
11982  *
11983  * 	Reads an SGE context directly, bypassing FW.  This is only for
11984  * 	debugging when FW is unavailable.
11985  */
11986 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
11987 		      u32 *data)
11988 {
11989 	int i, ret;
11990 
11991 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
11992 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
11993 	if (!ret)
11994 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
11995 			*data++ = t4_read_reg(adap, i);
11996 	return ret;
11997 }
11998 
11999 int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
12000 {
12001 	struct fw_sched_cmd cmd;
12002 
12003 	memset(&cmd, 0, sizeof(cmd));
12004 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12005 				      F_FW_CMD_REQUEST |
12006 				      F_FW_CMD_WRITE);
12007 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12008 
12009 	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
12010 	cmd.u.config.type = type;
12011 	cmd.u.config.minmaxen = minmaxen;
12012 
12013 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12014 			       NULL, 1);
12015 }
12016 
12017 int t4_sched_params(struct adapter *adapter,
12018 		    int channel, int cls,
12019 		    int level, int mode, int type,
12020 		    int rateunit, int ratemode,
12021 		    int minrate, int maxrate, int weight,
12022 		    int pktsize, int burstsize)
12023 {
12024 	struct fw_sched_cmd cmd;
12025 
12026 	memset(&cmd, 0, sizeof(cmd));
12027 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12028 				      F_FW_CMD_REQUEST |
12029 				      F_FW_CMD_WRITE);
12030 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12031 
12032 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12033 	cmd.u.params.type = type;
12034 	cmd.u.params.level = level;
12035 	cmd.u.params.mode = mode;
12036 	cmd.u.params.ch = channel;
12037 	cmd.u.params.cl = cls;
12038 	cmd.u.params.unit = rateunit;
12039 	cmd.u.params.rate = ratemode;
12040 	cmd.u.params.min = cpu_to_be32(minrate);
12041 	cmd.u.params.max = cpu_to_be32(maxrate);
12042 	cmd.u.params.weight = cpu_to_be16(weight);
12043 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
12044 	cmd.u.params.burstsize = cpu_to_be16(burstsize);
12045 
12046 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
12047 			       NULL, 1);
12048 }
12049 
12050 int t4_read_sched_params(struct adapter *adapter,
12051 		    int channel, int cls,
12052 		    int *level, int *mode, int *type,
12053 		    int *rateunit, int *ratemode,
12054 		    int *minrate, int *maxrate, int *weight,
12055 		    int *pktsize, int *burstsize)
12056 {
12057 	struct fw_sched_cmd cmd;
12058 	int ret = 0;
12059 
12060 	memset(&cmd, 0, sizeof(cmd));
12061 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
12062 				      F_FW_CMD_REQUEST |
12063 				      F_FW_CMD_READ);
12064 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
12065 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
12066 	cmd.u.params.ch = channel;
12067 	cmd.u.params.cl = cls;
12068 
12069 	ret = t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
12070 			      &cmd, 1);
12071 	if (ret)
12072 		goto out;
12073 
12074 	*level = cmd.u.params.level;
12075 	*mode = cmd.u.params.mode;
12076 	*type = cmd.u.params.type;
12077 	*rateunit = cmd.u.params.unit;
12078 	*ratemode = cmd.u.params.rate;
12079 	*minrate = be32_to_cpu(cmd.u.params.min);
12080 	*maxrate = be32_to_cpu(cmd.u.params.max);
12081 	*weight = be16_to_cpu(cmd.u.params.weight);
12082 	*pktsize = be16_to_cpu(cmd.u.params.pktsize);
12083 	*burstsize = be16_to_cpu(cmd.u.params.burstsize);
12084 
12085 out:
12086 	return ret;
12087 }
12088 
12089 /*
12090  *	t4_config_watchdog - configure (enable/disable) a watchdog timer
12091  *	@adapter: the adapter
12092  * 	@mbox: mailbox to use for the FW command
12093  * 	@pf: the PF owning the queue
12094  * 	@vf: the VF owning the queue
12095  *	@timeout: watchdog timeout in ms
12096  *	@action: watchdog timer / action
12097  *
12098  *	There are separate watchdog timers for each possible watchdog
12099  *	action.  Configure one of the watchdog timers by setting a non-zero
12100  *	timeout.  Disable a watchdog timer by using a timeout of zero.
12101  */
12102 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
12103 		       unsigned int pf, unsigned int vf,
12104 		       unsigned int timeout, unsigned int action)
12105 {
12106 	struct fw_watchdog_cmd wdog;
12107 	unsigned int ticks;
12108 
12109 	/*
12110 	 * The watchdog command expects a timeout in units of 10ms so we need
12111 	 * to convert it here (via rounding) and force a minimum of one 10ms
12112 	 * "tick" if the timeout is non-zero but the convertion results in 0
12113 	 * ticks.
12114 	 */
12115 	ticks = (timeout + 5)/10;
12116 	if (timeout && !ticks)
12117 		ticks = 1;
12118 
12119 	memset(&wdog, 0, sizeof wdog);
12120 	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
12121 				     F_FW_CMD_REQUEST |
12122 				     F_FW_CMD_WRITE |
12123 				     V_FW_PARAMS_CMD_PFN(pf) |
12124 				     V_FW_PARAMS_CMD_VFN(vf));
12125 	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
12126 	wdog.timeout = cpu_to_be32(ticks);
12127 	wdog.action = cpu_to_be32(action);
12128 
12129 	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
12130 }
12131 
12132 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
12133 {
12134 	struct fw_devlog_cmd devlog_cmd;
12135 	int ret;
12136 
12137 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12138 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12139 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
12140 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12141 	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12142 			 sizeof(devlog_cmd), &devlog_cmd);
12143 	if (ret)
12144 		return ret;
12145 
12146 	*level = devlog_cmd.level;
12147 	return 0;
12148 }
12149 
12150 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
12151 {
12152 	struct fw_devlog_cmd devlog_cmd;
12153 
12154 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
12155 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
12156 					     F_FW_CMD_REQUEST |
12157 					     F_FW_CMD_WRITE);
12158 	devlog_cmd.level = level;
12159 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
12160 	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
12161 			  sizeof(devlog_cmd), &devlog_cmd);
12162 }
12163 
12164 int t4_configure_add_smac(struct adapter *adap)
12165 {
12166 	unsigned int param, val;
12167 	int ret = 0;
12168 
12169 	adap->params.smac_add_support = 0;
12170 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12171 		  V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC));
12172 	/* Query FW to check if FW supports adding source mac address
12173 	 * to TCAM feature or not.
12174 	 * If FW returns 1, driver can use this feature and driver need to send
12175 	 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to
12176 	 * enable adding smac to TCAM.
12177 	 */
12178 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
12179 	if (ret)
12180 		return ret;
12181 
12182 	if (val == 1) {
12183 		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
12184 				    &param, &val);
12185 		if (!ret)
12186 			/* Firmware allows adding explicit TCAM entries.
12187 			 * Save this internally.
12188 			 */
12189 			adap->params.smac_add_support = 1;
12190 	}
12191 
12192 	return ret;
12193 }
12194 
12195 int t4_configure_ringbb(struct adapter *adap)
12196 {
12197 	unsigned int param, val;
12198 	int ret = 0;
12199 
12200 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
12201 		  V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE));
12202 	/* Query FW to check if FW supports ring switch feature or not.
12203 	 * If FW returns 1, driver can use this feature and driver need to send
12204 	 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to
12205 	 * enable the ring backbone configuration.
12206 	 */
12207 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
12208 	if (ret < 0) {
12209 		CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n",
12210 			ret);
12211 		goto out;
12212 	}
12213 
12214 	if (val != 1) {
12215 		CH_ERR(adap, "FW doesnot support ringbackbone features\n");
12216 		goto out;
12217 	}
12218 
12219 	ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
12220 	if (ret < 0) {
12221 		CH_ERR(adap, "Could not set Ringbackbone, err= %d\n",
12222 			ret);
12223 		goto out;
12224 	}
12225 
12226 out:
12227 	return ret;
12228 }
12229 
12230 /*
12231  *	t4_set_vlan_acl - Set a VLAN id for the specified VF
12232  *	@adapter: the adapter
12233  *	@mbox: mailbox to use for the FW command
12234  *	@vf: one of the VFs instantiated by the specified PF
12235  *	@vlan: The vlanid to be set
12236  *
12237  */
12238 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
12239 		    u16 vlan)
12240 {
12241 	struct fw_acl_vlan_cmd vlan_cmd;
12242 	unsigned int enable;
12243 
12244 	enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0);
12245 	memset(&vlan_cmd, 0, sizeof(vlan_cmd));
12246 	vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
12247 					 F_FW_CMD_REQUEST |
12248 					 F_FW_CMD_WRITE |
12249 					 F_FW_CMD_EXEC |
12250 					 V_FW_ACL_VLAN_CMD_PFN(adap->pf) |
12251 					 V_FW_ACL_VLAN_CMD_VFN(vf));
12252 	vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
12253 	/* Drop all packets that donot match vlan id */
12254 	vlan_cmd.dropnovlan_fm = (enable
12255 				  ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
12256 				     F_FW_ACL_VLAN_CMD_FM)
12257 				  : 0);
12258 	if (enable != 0) {
12259 		vlan_cmd.nvlan = 1;
12260 		vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
12261 	}
12262 
12263 	return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
12264 }
12265 
12266 /**
12267  *	t4_del_mac - Removes the exact-match filter for a MAC address
12268  *	@adap: the adapter
12269  *	@mbox: mailbox to use for the FW command
12270  *	@viid: the VI id
12271  *	@addr: the MAC address value
12272  *	@smac: if true, delete from only the smac region of MPS
12273  *
12274  *	Modifies an exact-match filter and sets it to the new MAC address if
12275  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
12276  *	latter case the address is added persistently if @persist is %true.
12277  *
12278  *	Returns a negative error number or the index of the filter with the new
12279  *	MAC value.  Note that this index may differ from @idx.
12280  */
12281 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12282 	       const u8 *addr, bool smac)
12283 {
12284 	int ret;
12285 	struct fw_vi_mac_cmd c;
12286 	struct fw_vi_mac_exact *p = c.u.exact;
12287 	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
12288 
12289 	memset(&c, 0, sizeof(c));
12290 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12291 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12292 				   V_FW_VI_MAC_CMD_VIID(viid));
12293 	c.freemacs_to_len16 = cpu_to_be32(
12294 					V_FW_CMD_LEN16(1) |
12295 					(smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12296 
12297 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
12298 	p->valid_to_idx = cpu_to_be16(
12299 				F_FW_VI_MAC_CMD_VALID |
12300 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
12301 
12302 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12303 	if (ret == 0) {
12304 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12305 		if (ret < max_mac_addr)
12306 			return -ENOMEM;
12307 	}
12308 
12309 	return ret;
12310 }
12311 
12312 /**
12313  *	t4_add_mac - Adds an exact-match filter for a MAC address
12314  *	@adap: the adapter
12315  *	@mbox: mailbox to use for the FW command
12316  *	@viid: the VI id
12317  *	@idx: index of existing filter for old value of MAC address, or -1
12318  *	@addr: the new MAC address value
12319  *	@persist: whether a new MAC allocation should be persistent
12320  *	@add_smt: if true also add the address to the HW SMT
12321  *	@smac: if true, update only the smac region of MPS
12322  *
12323  *	Modifies an exact-match filter and sets it to the new MAC address if
12324  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
12325  *	latter case the address is added persistently if @persist is %true.
12326  *
12327  *	Returns a negative error number or the index of the filter with the new
12328  *	MAC value.  Note that this index may differ from @idx.
12329  */
12330 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
12331 	       int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac)
12332 {
12333 	int ret, mode;
12334 	struct fw_vi_mac_cmd c;
12335 	struct fw_vi_mac_exact *p = c.u.exact;
12336 	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
12337 
12338 	if (idx < 0)		/* new allocation */
12339 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
12340 	mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
12341 
12342 	memset(&c, 0, sizeof(c));
12343 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
12344 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
12345 				   V_FW_VI_MAC_CMD_VIID(viid));
12346 	c.freemacs_to_len16 = cpu_to_be32(
12347 				V_FW_CMD_LEN16(1) |
12348 				(smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0));
12349 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
12350 				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
12351 				      V_FW_VI_MAC_CMD_IDX(idx));
12352 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
12353 
12354 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
12355 	if (ret == 0) {
12356 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
12357 		if (ret >= max_mac_addr)
12358 			return -ENOMEM;
12359 		if (smt_idx) {
12360 			/* Does fw supports returning smt_idx? */
12361 			if (adap->params.viid_smt_extn_support)
12362 				*smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid));
12363 			else {
12364 				/* In T4/T5, SMT contains 256 SMAC entries
12365 				 * organized in 128 rows of 2 entries each.
12366 				 * In T6, SMT contains 256 SMAC entries in
12367 				 * 256 rows.
12368 				 */
12369 				if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
12370 					*smt_idx = ((viid & M_FW_VIID_VIN) << 1);
12371 				else
12372 					*smt_idx = (viid & M_FW_VIID_VIN);
12373 			}
12374 		}
12375 	}
12376 
12377 	return ret;
12378 }
12379 
12380