xref: /illumos-gate/usr/src/uts/common/io/cxgbe/common/t4_hw.c (revision 3dde7c95de085cfe31f989eff6cefb775563eeb8)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4/T5/T6 Ethernet driver.
14  *
15  * Copyright (C) 2003-2017 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 #include "common.h"
24 #include "t4_regs.h"
25 #include "t4_regs_values.h"
26 #include "t4fw_interface.h"
27 
28 /**
29  *	t4_wait_op_done_val - wait until an operation is completed
30  *	@adapter: the adapter performing the operation
31  *	@reg: the register to check for completion
32  *	@mask: a single-bit field within @reg that indicates completion
33  *	@polarity: the value of the field when the operation is completed
34  *	@attempts: number of check iterations
35  *	@delay: delay in usecs between iterations
36  *	@valp: where to store the value of the register at completion time
37  *
38  *	Wait until an operation is completed by checking a bit in a register
39  *	up to @attempts times.  If @valp is not NULL the value of the register
40  *	at the time it indicated completion is stored there.  Returns 0 if the
41  *	operation completes and	-EAGAIN	otherwise.
42  */
43 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
44 			       int polarity, int attempts, int delay, u32 *valp)
45 {
46 	while (1) {
47 		u32 val = t4_read_reg(adapter, reg);
48 
49 		if (!!(val & mask) == polarity) {
50 			if (valp)
51 				*valp = val;
52 			return 0;
53 		}
54 		if (--attempts == 0)
55 			return -EAGAIN;
56 		if (delay)
57 			udelay(delay);
58 	}
59 }
60 
61 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
62 				  int polarity, int attempts, int delay)
63 {
64 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
65 				   delay, NULL);
66 }
67 
68 /**
69  *	t4_set_reg_field - set a register field to a value
70  *	@adapter: the adapter to program
71  *	@addr: the register address
72  *	@mask: specifies the portion of the register to modify
73  *	@val: the new value for the register field
74  *
75  *	Sets a register field specified by the supplied mask to the
76  *	given value.
77  */
78 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
79 		      u32 val)
80 {
81 	u32 v = t4_read_reg(adapter, addr) & ~mask;
82 
83 	t4_write_reg(adapter, addr, v | val);
84 	(void) t4_read_reg(adapter, addr);      /* flush */
85 }
86 
87 /**
88  *	t4_read_indirect - read indirectly addressed registers
89  *	@adap: the adapter
90  *	@addr_reg: register holding the indirect address
91  *	@data_reg: register holding the value of the indirect register
92  *	@vals: where the read register values are stored
93  *	@nregs: how many indirect registers to read
94  *	@start_idx: index of first indirect register to read
95  *
96  *	Reads registers that are accessed indirectly through an address/data
97  *	register pair.
98  */
99 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
100 			     unsigned int data_reg, u32 *vals,
101 			     unsigned int nregs, unsigned int start_idx)
102 {
103 	while (nregs--) {
104 		t4_write_reg(adap, addr_reg, start_idx);
105 		*vals++ = t4_read_reg(adap, data_reg);
106 		start_idx++;
107 	}
108 }
109 
110 /**
111  *	t4_write_indirect - write indirectly addressed registers
112  *	@adap: the adapter
113  *	@addr_reg: register holding the indirect addresses
114  *	@data_reg: register holding the value for the indirect registers
115  *	@vals: values to write
116  *	@nregs: how many indirect registers to write
117  *	@start_idx: address of first indirect register to write
118  *
119  *	Writes a sequential block of registers that are accessed indirectly
120  *	through an address/data register pair.
121  */
122 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
123 		       unsigned int data_reg, const u32 *vals,
124 		       unsigned int nregs, unsigned int start_idx)
125 {
126 	while (nregs--) {
127 		t4_write_reg(adap, addr_reg, start_idx++);
128 		t4_write_reg(adap, data_reg, *vals++);
129 	}
130 }
131 
132 /*
133  * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
134  * mechanism.  This guarantees that we get the real value even if we're
135  * operating within a Virtual Machine and the Hypervisor is trapping our
136  * Configuration Space accesses.
137  *
138  * N.B. This routine should only be used as a last resort: the firmware uses
139  *      the backdoor registers on a regular basis and we can end up
140  *      conflicting with it's uses!
141  */
142 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
143 {
144 	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
145 
146 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
147 		req |= F_ENABLE;
148 	else
149 		req |= F_T6_ENABLE;
150 
151 	if (is_t4(adap->params.chip))
152 		req |= F_LOCALCFG;
153 
154 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
155 	*val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
156 
157 	/* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
158 	 * Configuration Space read.  (None of the other fields matter when
159 	 * F_ENABLE is 0 so a simple register write is easier than a
160 	 * read-modify-write via t4_set_reg_field().)
161 	 */
162 	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
163 }
164 
165 /*
166  * t4_report_fw_error - report firmware error
167  * @adap: the adapter
168  *
169  * The adapter firmware can indicate error conditions to the host.
170  * If the firmware has indicated an error, print out the reason for
171  * the firmware error.
172  */
173 static void t4_report_fw_error(struct adapter *adap)
174 {
175 	static const char *const reason[] = {
176 		"Crash",			/* PCIE_FW_EVAL_CRASH */
177 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
178 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
179 		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
180 		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
181 		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
182 		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
183 		"Reserved",			/* reserved */
184 	};
185 	u32 pcie_fw;
186 
187 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
188 	if (pcie_fw & F_PCIE_FW_ERR)
189 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
190 			reason[G_PCIE_FW_EVAL(pcie_fw)]);
191 }
192 
193 /*
194  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
195  */
196 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
197 			 u32 mbox_addr)
198 {
199 	for ( ; nflit; nflit--, mbox_addr += 8)
200 		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
201 }
202 
203 /*
204  * Handle a FW assertion reported in a mailbox.
205  */
206 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
207 {
208 	CH_ALERT(adap,
209 		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
210 		  asrt->u.assert.filename_0_7,
211 		  be32_to_cpu(asrt->u.assert.line),
212 		  be32_to_cpu(asrt->u.assert.x),
213 		  be32_to_cpu(asrt->u.assert.y));
214 }
215 
216 #define X_CIM_PF_NOACCESS 0xeeeeeeee
217 
218 /*
219  * If the Host OS Driver needs locking arround accesses to the mailbox, this
220  * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
221  */
222 /* makes single-statement usage a bit cleaner ... */
223 #ifdef T4_OS_NEEDS_MBOX_LOCKING
224 #define T4_OS_MBOX_LOCKING(x) x
225 #else
226 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
227 #endif
228 
229 /*
230  * If the OS Driver wants busy waits to keep a watchdog happy, tap it during
231  * busy loops which don't sleep.
232  */
233 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG
234 #define T4_OS_TOUCH_NMI_WATCHDOG()	t4_os_touch_nmi_watchdog()
235 #else
236 #define T4_OS_TOUCH_NMI_WATCHDOG()
237 #endif
238 
239 #ifdef T4_OS_LOG_MBOX_CMDS
240 /**
241  *	t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
242  *	@adapter: the adapter
243  *	@cmd: the Firmware Mailbox Command or Reply
244  *	@size: command length in bytes
245  *	@access: the time (ms) needed to access the Firmware Mailbox
246  *	@execute: the time (ms) the command spent being executed
247  */
248 static void t4_record_mbox(struct adapter *adapter,
249 			   const __be64 *cmd, unsigned int size,
250 			   int access, int execute)
251 {
252 	struct mbox_cmd_log *log = adapter->mbox_log;
253 	struct mbox_cmd *entry;
254 	int i;
255 
256 	entry = mbox_cmd_log_entry(log, log->cursor++);
257 	if (log->cursor == log->size)
258 		log->cursor = 0;
259 
260 	for (i = 0; i < size/8; i++)
261 		entry->cmd[i] = be64_to_cpu(cmd[i]);
262 	while (i < MBOX_LEN/8)
263 		entry->cmd[i++] = 0;
264 	entry->timestamp = t4_os_timestamp();
265 	entry->seqno = log->seqno++;
266 	entry->access = access;
267 	entry->execute = execute;
268 }
269 
270 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
271 	t4_record_mbox(__adapter, __cmd, __size, __access, __execute)
272 
273 #else /* !T4_OS_LOG_MBOX_CMDS */
274 
275 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \
276 	/* nothing */
277 
278 #endif /* !T4_OS_LOG_MBOX_CMDS */
279 
280 /**
281  *	t4_record_mbox_marker - record a marker in the mailbox log
282  *	@adapter: the adapter
283  *	@marker: byte array marker
284  *	@size: marker size in bytes
285  *
286  *	We inject a "fake mailbox command" into the Firmware Mailbox Log
287  *	using a known command token and then the bytes of the specified
288  *	marker.  This lets debugging code inject markers into the log to
289  *	help identify which commands are in response to higher level code.
290  */
291 void t4_record_mbox_marker(struct adapter *adapter,
292 			   const void *marker, unsigned int size)
293 {
294 #ifdef T4_OS_LOG_MBOX_CMDS
295 	__be64 marker_cmd[MBOX_LEN/8];
296 	const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64);
297 	unsigned int marker_cmd_size;
298 
299 	if (size > max_marker)
300 		size = max_marker;
301 
302 	marker_cmd[0] = cpu_to_be64(~0LLU);
303 	memcpy(&marker_cmd[1], marker, size);
304 	memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size);
305 	marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64));
306 
307 	t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0);
308 #endif /* T4_OS_LOG_MBOX_CMDS */
309 }
310 
311 /*
312  * Delay time in microseconds to wait for mailbox access/fw reply
313  * to mailbox command
314  */
315 #define MIN_MBOX_CMD_DELAY 900
316 #define MBOX_CMD_DELAY 1000
317 
318 /**
319  *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
320  *	@adap: the adapter
321  *	@mbox: index of the mailbox to use
322  *	@cmd: the command to write
323  *	@size: command length in bytes
324  *	@rpl: where to optionally store the reply
325  *	@sleep_ok: if true we may sleep while awaiting command completion
326  *	@timeout: time to wait for command to finish before timing out
327  *		(negative implies @sleep_ok=false)
328  *
329  *	Sends the given command to FW through the selected mailbox and waits
330  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
331  *	store the FW's reply to the command.  The command and its optional
332  *	reply are of the same length.  Some FW commands like RESET and
333  *	INITIALIZE can take a considerable amount of time to execute.
334  *	@sleep_ok determines whether we may sleep while awaiting the response.
335  *	If sleeping is allowed we use progressive backoff otherwise we spin.
336  *	Note that passing in a negative @timeout is an alternate mechanism
337  *	for specifying @sleep_ok=false.  This is useful when a higher level
338  *	interface allows for specification of @timeout but not @sleep_ok ...
339  *
340  *	The return value is 0 on success or a negative errno on failure.  A
341  *	failure can happen either because we are not able to execute the
342  *	command or FW executes it but signals an error.  In the latter case
343  *	the return value is the error code indicated by FW (negated).
344  */
345 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
346 			    int size, void *rpl, bool sleep_ok, int timeout)
347 {
348 #ifdef T4_OS_NEEDS_MBOX_LOCKING
349 	u16 access = 0;
350 #endif
351 	u16 execute = 0;
352 	u32 v;
353 	u64 res;
354 	int i, ret;
355 	const __be64 *p = cmd;
356 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
357 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
358 	u32 ctl;
359 	__be64 cmd_rpl[MBOX_LEN/8];
360 	T4_OS_MBOX_LOCKING(t4_os_list_t entry);
361 	u32 pcie_fw;
362 
363 	if ((size & 15) || size > MBOX_LEN)
364 		return -EINVAL;
365 
366 	/*
367 	 * If we have a negative timeout, that implies that we can't sleep.
368 	 */
369 	if (timeout < 0) {
370 		sleep_ok = false;
371 		timeout = -timeout;
372 	}
373 
374 #ifdef T4_OS_NEEDS_MBOX_LOCKING
375 	/*
376 	 * Queue ourselves onto the mailbox access list.  When our entry is at
377 	 * the front of the list, we have rights to access the mailbox.  So we
378 	 * wait [for a while] till we're at the front [or bail out with an
379 	 * EBUSY] ...
380 	 */
381 	t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
382 
383 	for (i = 0; ; i++) {
384 		/*
385 		 * If we've waited too long, return a busy indication.  This
386 		 * really ought to be based on our initial position in the
387 		 * mailbox access list but this is a start.  We very rarely
388 		 * contend on access to the mailbox ...  Also check for a
389 		 * firmware error which we'll report as a device error.
390 		 */
391 		pcie_fw = t4_read_reg(adap, A_PCIE_FW);
392 		if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) {
393 			t4_os_atomic_list_del(&entry, &adap->mbox_lock);
394 			t4_report_fw_error(adap);
395 			ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
396 			T4_RECORD_MBOX(adap, cmd, size, ret, 0);
397 			return ret;
398 		}
399 
400 		/*
401 		 * If we're at the head, break out and start the mailbox
402 		 * protocol.
403 		 */
404 		if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
405 			break;
406 
407 		/*
408 		 * Delay for a bit before checking again ...
409 		 */
410 		if (sleep_ok) {
411 			usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
412 		} else {
413 			T4_OS_TOUCH_NMI_WATCHDOG();
414 			udelay(MBOX_CMD_DELAY);
415 		}
416 	}
417 	access = i;
418 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
419 
420 	/*
421 	 * Attempt to gain access to the mailbox.
422 	 */
423 	for (i = 0; i < 4; i++) {
424 		ctl = t4_read_reg(adap, ctl_reg);
425 		v = G_MBOWNER(ctl);
426 		if (v != X_MBOWNER_NONE)
427 			break;
428 	}
429 
430 	/*
431 	 * If we were unable to gain access, dequeue ourselves from the
432 	 * mailbox atomic access list and report the error to our caller.
433 	 */
434 	if (v != X_MBOWNER_PL) {
435 		T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
436 							 &adap->mbox_lock));
437 		t4_report_fw_error(adap);
438 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
439 		T4_RECORD_MBOX(adap, cmd, size, access, ret);
440 		return ret;
441 	}
442 
443 	/*
444 	 * If we gain ownership of the mailbox and there's a "valid" message
445 	 * in it, this is likely an asynchronous error message from the
446 	 * firmware.  So we'll report that and then proceed on with attempting
447 	 * to issue our own command ... which may well fail if the error
448 	 * presaged the firmware crashing ...
449 	 */
450 	if (ctl & F_MBMSGVALID) {
451 		CH_ERR(adap, "found VALID command in mbox %u: "
452 		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
453 		       (unsigned long long)t4_read_reg64(adap, data_reg),
454 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
455 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
456 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
457 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
458 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
459 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
460 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
461 	}
462 
463 	/*
464 	 * Copy in the new mailbox command and send it on its way ...
465 	 */
466 	T4_RECORD_MBOX(adap, cmd, size, access, 0);
467 	for (i = 0; i < size; i += 8, p++)
468 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
469 
470 	/*
471 	 * XXX It's not clear that we need this anymore now
472 	 * XXX that we have mailbox logging ...
473 	 */
474 	CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
475 
476 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
477 	(void) t4_read_reg(adap, ctl_reg);	/* flush write */
478 
479 	/*
480 	 * Loop waiting for the reply; bail out if we time out or the firmware
481 	 * reports an error.
482 	 */
483 	for (i = 0;
484 	     !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
485 	     i < timeout;
486 	     i++) {
487 		if (sleep_ok) {
488 			usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY);
489 		} else {
490 			T4_OS_TOUCH_NMI_WATCHDOG();
491 			udelay(MBOX_CMD_DELAY);
492 		}
493 
494 		v = t4_read_reg(adap, ctl_reg);
495 		if (v == X_CIM_PF_NOACCESS)
496 			continue;
497 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
498 			if (!(v & F_MBMSGVALID)) {
499 				t4_write_reg(adap, ctl_reg,
500 					     V_MBOWNER(X_MBOWNER_NONE));
501 				continue;
502 			}
503 
504 			/*
505 			 * Retrieve the command reply and release the mailbox.
506 			 */
507 			get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
508 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
509 			T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
510 								 &adap->mbox_lock));
511 
512 			execute = i + 1;
513 			T4_RECORD_MBOX(adap, cmd_rpl, size, access, execute);
514 
515 			/*
516 			 * XXX It's not clear that we need this anymore now
517 			 * XXX that we have mailbox logging ...
518 			 */
519 			CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
520 			CH_MSG(adap, INFO, HW,
521 			       "command completed in %d ms (%ssleeping)\n",
522 			       execute, sleep_ok ? "" : "non-");
523 
524 			res = be64_to_cpu(cmd_rpl[0]);
525 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
526 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
527 				res = V_FW_CMD_RETVAL(EIO);
528 			} else if (rpl)
529 				memcpy(rpl, cmd_rpl, size);
530 			return -G_FW_CMD_RETVAL((int)res);
531 		}
532 	}
533 
534 	/*
535 	 * We timed out waiting for a reply to our mailbox command.  Report
536 	 * the error and also check to see if the firmware reported any
537 	 * errors ...
538 	 */
539 	T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, &adap->mbox_lock));
540 
541 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
542 	T4_RECORD_MBOX(adap, cmd, size, access, ret);
543 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
544 	       *(const u8 *)cmd, mbox);
545 
546 	t4_report_fw_error(adap);
547 	t4_fatal_err(adap);
548 	return ret;
549 }
550 
551 #ifdef CONFIG_CUDBG
552 /*
553  * The maximum number of times to iterate for FW reply before
554  * issuing a mailbox timeout
555  */
556 #define FW_REPLY_WAIT_LOOP 6000000
557 
558 /**
559  *	t4_wr_mbox_meat_timeout_panic - send a command to FW through the given
560  *	mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout()
561  *	and is only invoked during a kernel crash. Since this function is
562  *	called through a atomic notifier chain ,we cannot sleep awaiting a
563  *	response from FW, hence repeatedly loop until we get a reply.
564  *
565  *	@adap: the adapter
566  *	@mbox: index of the mailbox to use
567  *	@cmd: the command to write
568  *	@size: command length in bytes
569  *	@rpl: where to optionally store the reply
570  */
571 
572 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox,
573 			    const void *cmd, int size, void *rpl)
574 {
575 	u32 v;
576 	u64 res;
577 	int i, ret;
578 	u64 cnt;
579 	const __be64 *p = cmd;
580 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
581 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
582 	u32 ctl;
583 	__be64 cmd_rpl[MBOX_LEN/8];
584 	u32 pcie_fw;
585 
586 	if ((size & 15) || size > MBOX_LEN)
587 		return -EINVAL;
588 
589 	/*
590 	 * Check for a firmware error which we'll report as a
591 	 * device error.
592 	 */
593 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
594 	if (pcie_fw & F_PCIE_FW_ERR) {
595 		t4_report_fw_error(adap);
596 		ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
597 		return ret;
598 	}
599 
600 	/*
601 	 * Attempt to gain access to the mailbox.
602 	 */
603 	for (i = 0; i < 4; i++) {
604 		ctl = t4_read_reg(adap, ctl_reg);
605 		v = G_MBOWNER(ctl);
606 		if (v != X_MBOWNER_NONE)
607 			break;
608 	}
609 
610 	/*
611 	 * If we were unable to gain access, report the error to our caller.
612 	 */
613 	if (v != X_MBOWNER_PL) {
614 		t4_report_fw_error(adap);
615 		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
616 		return ret;
617 	}
618 
619 	/*
620 	 * If we gain ownership of the mailbox and there's a "valid" message
621 	 * in it, this is likely an asynchronous error message from the
622 	 * firmware.  So we'll report that and then proceed on with attempting
623 	 * to issue our own command ... which may well fail if the error
624 	 * presaged the firmware crashing ...
625 	 */
626 	if (ctl & F_MBMSGVALID) {
627 		CH_ERR(adap, "found VALID command in mbox %u: "
628 		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
629 		       (unsigned long long)t4_read_reg64(adap, data_reg),
630 		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
631 		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
632 		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
633 		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
634 		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
635 		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
636 		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
637 	}
638 
639 	/*
640 	 * Copy in the new mailbox command and send it on its way ...
641 	 */
642 	for (i = 0; i < size; i += 8, p++)
643 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
644 
645 	CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
646 
647 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
648 	t4_read_reg(adap, ctl_reg);	/* flush write */
649 
650 	/*
651 	 * Loop waiting for the reply; bail out if we time out or the firmware
652 	 * reports an error.
653 	 */
654 	for (cnt = 0;
655 	    !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
656 	    cnt < FW_REPLY_WAIT_LOOP;
657 	    cnt++) {
658 		v = t4_read_reg(adap, ctl_reg);
659 		if (v == X_CIM_PF_NOACCESS)
660 			continue;
661 		if (G_MBOWNER(v) == X_MBOWNER_PL) {
662 			if (!(v & F_MBMSGVALID)) {
663 				t4_write_reg(adap, ctl_reg,
664 					     V_MBOWNER(X_MBOWNER_NONE));
665 				continue;
666 			}
667 
668 			/*
669 			 * Retrieve the command reply and release the mailbox.
670 			 */
671 			get_mbox_rpl(adap, cmd_rpl, size/8, data_reg);
672 			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
673 
674 			CH_DUMP_MBOX(adap, mbox, data_reg, size / 8);
675 
676 			res = be64_to_cpu(cmd_rpl[0]);
677 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
678 				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
679 				res = V_FW_CMD_RETVAL(EIO);
680 			} else if (rpl)
681 				memcpy(rpl, cmd_rpl, size);
682 			return -G_FW_CMD_RETVAL((int)res);
683 		}
684 	}
685 
686 	/*
687 	 * We timed out waiting for a reply to our mailbox command.  Report
688 	 * the error and also check to see if the firmware reported any
689 	 * errors ...
690 	 */
691 	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
692 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
693 	       *(const u8 *)cmd, mbox);
694 
695 	t4_report_fw_error(adap);
696 	t4_fatal_err(adap);
697 	return ret;
698 }
699 #endif
700 
701 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
702 		    void *rpl, bool sleep_ok)
703 {
704 #ifdef CONFIG_CUDBG
705 	if (adap->flags & K_CRASH)
706 		return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size,
707 						     rpl);
708 	else
709 #endif
710 		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
711 					       sleep_ok, FW_CMD_MAX_TIMEOUT);
712 
713 }
714 
715 static int t4_edc_err_read(struct adapter *adap, int idx)
716 {
717 	u32 edc_ecc_err_addr_reg;
718 	u32 edc_bist_status_rdata_reg;
719 
720 	if (is_t4(adap->params.chip)) {
721 		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
722 		return 0;
723 	}
724 	if (idx != MEM_EDC0 && idx != MEM_EDC1) {
725 		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
726 		return 0;
727 	}
728 
729 	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
730 	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
731 
732 	CH_WARN(adap,
733 		"edc%d err addr 0x%x: 0x%x.\n",
734 		idx, edc_ecc_err_addr_reg,
735 		t4_read_reg(adap, edc_ecc_err_addr_reg));
736 	CH_WARN(adap,
737 	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
738 		edc_bist_status_rdata_reg,
739 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
740 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
741 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
742 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
743 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
744 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
745 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
746 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
747 		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
748 
749 	return 0;
750 }
751 
752 /**
753  *	t4_memory_rw_addr - read/write adapter memory via PCIE memory window
754  *	@adap: the adapter
755  *	@win: PCI-E Memory Window to use
756  *	@addr: address within adapter memory
757  *	@len: amount of memory to transfer
758  *	@hbuf: host memory buffer
759  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
760  *
761  *	Reads/writes an [almost] arbitrary memory region in the firmware: the
762  *	firmware memory address and host buffer must be aligned on 32-bit
763  *	boudaries; the length may be arbitrary.
764  *
765  *	NOTES:
766  *	 1. The memory is transferred as a raw byte sequence from/to the
767  *	    firmware's memory.  If this memory contains data structures which
768  *	    contain multi-byte integers, it's the caller's responsibility to
769  *	    perform appropriate byte order conversions.
770  *
771  *	 2. It is the Caller's responsibility to ensure that no other code
772  *	    uses the specified PCI-E Memory Window while this routine is
773  *	    using it.  This is typically done via the use of OS-specific
774  *	    locks, etc.
775  */
776 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
777 		      u32 len, void *hbuf, int dir)
778 {
779 	u32 pos, offset, resid;
780 	u32 win_pf, mem_reg, mem_aperture, mem_base;
781 	u32 *buf;
782 
783 	/* Argument sanity checks ...
784 	 */
785 	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
786 		return -EINVAL;
787 	buf = (u32 *)hbuf;
788 
789 	/* It's convenient to be able to handle lengths which aren't a
790 	 * multiple of 32-bits because we often end up transferring files to
791 	 * the firmware.  So we'll handle that by normalizing the length here
792 	 * and then handling any residual transfer at the end.
793 	 */
794 	resid = len & 0x3;
795 	len -= resid;
796 
797 	/* Each PCI-E Memory Window is programmed with a window size -- or
798 	 * "aperture" -- which controls the granularity of its mapping onto
799 	 * adapter memory.  We need to grab that aperture in order to know
800 	 * how to use the specified window.  The window is also programmed
801 	 * with the base address of the Memory Window in BAR0's address
802 	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
803 	 * the address is relative to BAR0.
804 	 */
805 	mem_reg = t4_read_reg(adap,
806 			      PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
807 						  win));
808 
809 	/* a dead adapter will return 0xffffffff for PIO reads */
810 	if (mem_reg == 0xffffffff) {
811 		CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n",
812 			win);
813 		return -ENXIO;
814 	}
815 
816 	mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
817 	mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
818 	if (is_t4(adap->params.chip))
819 		mem_base -= adap->t4_bar0;
820 	win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
821 
822 	/* Calculate our initial PCI-E Memory Window Position and Offset into
823 	 * that Window.
824 	 */
825 	pos = addr & ~(mem_aperture-1);
826 	offset = addr - pos;
827 
828 	/* Set up initial PCI-E Memory Window to cover the start of our
829 	 * transfer.  (Read it back to ensure that changes propagate before we
830 	 * attempt to use the new value.)
831 	 */
832 	t4_write_reg(adap,
833 		     PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
834 		     pos | win_pf);
835 	t4_read_reg(adap,
836 		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
837 
838 	/* Transfer data to/from the adapter as long as there's an integral
839 	 * number of 32-bit transfers to complete.
840 	 *
841 	 * A note on Endianness issues:
842 	 *
843 	 * The "register" reads and writes below from/to the PCI-E Memory
844 	 * Window invoke the standard adapter Big-Endian to PCI-E Link
845 	 * Little-Endian "swizzel."  As a result, if we have the following
846 	 * data in adapter memory:
847 	 *
848 	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
849 	 *     Address:      i+0  i+1  i+2  i+3
850 	 *
851 	 * Then a read of the adapter memory via the PCI-E Memory Window
852 	 * will yield:
853 	 *
854 	 *     x = readl(i)
855 	 *	   31                  0
856 	 *         [ b3 | b2 | b1 | b0 ]
857 	 *
858 	 * If this value is stored into local memory on a Little-Endian system
859 	 * it will show up correctly in local memory as:
860 	 *
861 	 *     ( ..., b0, b1, b2, b3, ... )
862 	 *
863 	 * But on a Big-Endian system, the store will show up in memory
864 	 * incorrectly swizzled as:
865 	 *
866 	 *     ( ..., b3, b2, b1, b0, ... )
867 	 *
868 	 * So we need to account for this in the reads and writes to the
869 	 * PCI-E Memory Window below by undoing the register read/write
870 	 * swizzels.
871 	 */
872 	while (len > 0) {
873 		if (dir == T4_MEMORY_READ)
874 			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
875 						mem_base + offset));
876 		else
877 			t4_write_reg(adap, mem_base + offset,
878 				     (__force u32)cpu_to_le32(*buf++));
879 		offset += sizeof(__be32);
880 		len -= sizeof(__be32);
881 
882 		/* If we've reached the end of our current window aperture,
883 		 * move the PCI-E Memory Window on to the next.  Note that
884 		 * doing this here after "len" may be 0 allows us to set up
885 		 * the PCI-E Memory Window for a possible final residual
886 		 * transfer below ...
887 		 */
888 		if (offset == mem_aperture) {
889 			pos += mem_aperture;
890 			offset = 0;
891 			t4_write_reg(adap,
892 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
893 						    win), pos | win_pf);
894 			t4_read_reg(adap,
895 				PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
896 						    win));
897 		}
898 	}
899 
900 	/* If the original transfer had a length which wasn't a multiple of
901 	 * 32-bits, now's where we need to finish off the transfer of the
902 	 * residual amount.  The PCI-E Memory Window has already been moved
903 	 * above (if necessary) to cover this final transfer.
904 	 */
905 	if (resid) {
906 		union {
907 			u32 word;
908 			char byte[4];
909 		} last;
910 		unsigned char *bp;
911 		int i;
912 
913 		if (dir == T4_MEMORY_READ) {
914 			last.word = le32_to_cpu(
915 					(__force __le32)t4_read_reg(adap,
916 						mem_base + offset));
917 			for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
918 				bp[i] = last.byte[i];
919 		} else {
920 			last.word = *buf;
921 			for (i = resid; i < 4; i++)
922 				last.byte[i] = 0;
923 			t4_write_reg(adap, mem_base + offset,
924 				     (__force u32)cpu_to_le32(last.word));
925 		}
926 	}
927 
928 	return 0;
929 }
930 
931 /**
932  *	t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window
933  *	@adap: the adapter
934  *	@win: PCI-E Memory Window to use
935  *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
936  *	@maddr: address within indicated memory type
937  *	@len: amount of memory to transfer
938  *	@hbuf: host memory buffer
939  *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
940  *
941  *	Reads/writes adapter memory using t4_memory_rw_addr().  This routine
942  *	provides an (memory type, address withing memory type) interface.
943  */
944 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
945 		       u32 len, void *hbuf, int dir)
946 {
947 	u32 mtype_offset;
948 	u32 edc_size, mc_size;
949 
950 	/* Offset into the region of memory which is being accessed
951 	 * MEM_EDC0 = 0
952 	 * MEM_EDC1 = 1
953 	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
954 	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
955 	 */
956 	edc_size  = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
957 	if (mtype != MEM_MC1)
958 		mtype_offset = (mtype * (edc_size * 1024 * 1024));
959 	else {
960 		mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
961 						      A_MA_EXT_MEMORY0_BAR));
962 		mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
963 	}
964 
965 	return t4_memory_rw_addr(adap, win,
966 				 mtype_offset + maddr, len,
967 				 hbuf, dir);
968 }
969 
970 /*
971  * Return the specified PCI-E Configuration Space register from our Physical
972  * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
973  * since we prefer to let the firmware own all of these registers, but if that
974  * fails we go for it directly ourselves.
975  */
976 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
977 {
978 	u32 val;
979 
980 	/*
981 	 * If fw_attach != 0, construct and send the Firmware LDST Command to
982 	 * retrieve the specified PCI-E Configuration Space register.
983 	 */
984 	if (drv_fw_attach != 0) {
985 		struct fw_ldst_cmd ldst_cmd;
986 		int ret;
987 
988 		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
989 		ldst_cmd.op_to_addrspace =
990 			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
991 				    F_FW_CMD_REQUEST |
992 				    F_FW_CMD_READ |
993 				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
994 		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
995 		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
996 		ldst_cmd.u.pcie.ctrl_to_fn =
997 			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
998 		ldst_cmd.u.pcie.r = reg;
999 
1000 		/*
1001 		 * If the LDST Command succeeds, return the result, otherwise
1002 		 * fall through to reading it directly ourselves ...
1003 		 */
1004 		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1005 				 &ldst_cmd);
1006 		if (ret == 0)
1007 			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
1008 
1009 		CH_WARN(adap, "Firmware failed to return "
1010 			"Configuration Space register %d, err = %d\n",
1011 			reg, -ret);
1012 	}
1013 
1014 	/*
1015 	 * Read the desired Configuration Space register via the PCI-E
1016 	 * Backdoor mechanism.
1017 	 */
1018 	t4_hw_pci_read_cfg4(adap, reg, &val);
1019 	return val;
1020 }
1021 
1022 /*
1023  * Get the window based on base passed to it.
1024  * Window aperture is currently unhandled, but there is no use case for it
1025  * right now
1026  */
1027 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach)
1028 {
1029 	if (is_t4(adap->params.chip)) {
1030 		u32 bar0;
1031 
1032 		/*
1033 		 * Truncation intentional: we only read the bottom 32-bits of
1034 		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
1035 		 * mechanism to read BAR0 instead of using
1036 		 * pci_resource_start() because we could be operating from
1037 		 * within a Virtual Machine which is trapping our accesses to
1038 		 * our Configuration Space and we need to set up the PCI-E
1039 		 * Memory Window decoders with the actual addresses which will
1040 		 * be coming across the PCI-E link.
1041 		 */
1042 		bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach);
1043 		bar0 &= pci_mask;
1044 		adap->t4_bar0 = bar0;
1045 
1046 		return bar0 + memwin_base;
1047 	} else {
1048 		/* For T5, only relative offset inside the PCIe BAR is passed */
1049 		return memwin_base;
1050 	}
1051 }
1052 
1053 /* Get the default utility window (win0) used by everyone */
1054 int t4_get_util_window(struct adapter *adap, int drv_fw_attach)
1055 {
1056 	return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach);
1057 }
1058 
1059 /*
1060  * Set up memory window for accessing adapter memory ranges.  (Read
1061  * back MA register to ensure that changes propagate before we attempt
1062  * to use the new values.)
1063  */
1064 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
1065 {
1066 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window),
1067 		     memwin_base | V_BIR(0) |
1068 		     V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
1069 	t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window));
1070 }
1071 
1072 /**
1073  *	t4_get_regs_len - return the size of the chips register set
1074  *	@adapter: the adapter
1075  *
1076  *	Returns the size of the chip's BAR0 register space.
1077  */
1078 unsigned int t4_get_regs_len(struct adapter *adapter)
1079 {
1080 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
1081 
1082 	switch (chip_version) {
1083 	case CHELSIO_T4:
1084 		return T4_REGMAP_SIZE;
1085 
1086 	case CHELSIO_T5:
1087 	case CHELSIO_T6:
1088 		return T5_REGMAP_SIZE;
1089 	}
1090 
1091 	CH_ERR(adapter,
1092 		"Unsupported chip version %d\n", chip_version);
1093 	return 0;
1094 }
1095 
1096 /**
1097  *	t4_get_regs - read chip registers into provided buffer
1098  *	@adap: the adapter
1099  *	@buf: register buffer
1100  *	@buf_size: size (in bytes) of register buffer
1101  *
1102  *	If the provided register buffer isn't large enough for the chip's
1103  *	full register range, the register dump will be truncated to the
1104  *	register buffer's size.
1105  */
1106 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
1107 {
1108 	static const unsigned int t4_reg_ranges[] = {
1109 		0x1008, 0x1108,
1110 		0x1180, 0x1184,
1111 		0x1190, 0x1194,
1112 		0x11a0, 0x11a4,
1113 		0x11b0, 0x11b4,
1114 		0x11fc, 0x123c,
1115 		0x1300, 0x173c,
1116 		0x1800, 0x18fc,
1117 		0x3000, 0x30d8,
1118 		0x30e0, 0x30e4,
1119 		0x30ec, 0x5910,
1120 		0x5920, 0x5924,
1121 		0x5960, 0x5960,
1122 		0x5968, 0x5968,
1123 		0x5970, 0x5970,
1124 		0x5978, 0x5978,
1125 		0x5980, 0x5980,
1126 		0x5988, 0x5988,
1127 		0x5990, 0x5990,
1128 		0x5998, 0x5998,
1129 		0x59a0, 0x59d4,
1130 		0x5a00, 0x5ae0,
1131 		0x5ae8, 0x5ae8,
1132 		0x5af0, 0x5af0,
1133 		0x5af8, 0x5af8,
1134 		0x6000, 0x6098,
1135 		0x6100, 0x6150,
1136 		0x6200, 0x6208,
1137 		0x6240, 0x6248,
1138 		0x6280, 0x62b0,
1139 		0x62c0, 0x6338,
1140 		0x6370, 0x638c,
1141 		0x6400, 0x643c,
1142 		0x6500, 0x6524,
1143 		0x6a00, 0x6a04,
1144 		0x6a14, 0x6a38,
1145 		0x6a60, 0x6a70,
1146 		0x6a78, 0x6a78,
1147 		0x6b00, 0x6b0c,
1148 		0x6b1c, 0x6b84,
1149 		0x6bf0, 0x6bf8,
1150 		0x6c00, 0x6c0c,
1151 		0x6c1c, 0x6c84,
1152 		0x6cf0, 0x6cf8,
1153 		0x6d00, 0x6d0c,
1154 		0x6d1c, 0x6d84,
1155 		0x6df0, 0x6df8,
1156 		0x6e00, 0x6e0c,
1157 		0x6e1c, 0x6e84,
1158 		0x6ef0, 0x6ef8,
1159 		0x6f00, 0x6f0c,
1160 		0x6f1c, 0x6f84,
1161 		0x6ff0, 0x6ff8,
1162 		0x7000, 0x700c,
1163 		0x701c, 0x7084,
1164 		0x70f0, 0x70f8,
1165 		0x7100, 0x710c,
1166 		0x711c, 0x7184,
1167 		0x71f0, 0x71f8,
1168 		0x7200, 0x720c,
1169 		0x721c, 0x7284,
1170 		0x72f0, 0x72f8,
1171 		0x7300, 0x730c,
1172 		0x731c, 0x7384,
1173 		0x73f0, 0x73f8,
1174 		0x7400, 0x7450,
1175 		0x7500, 0x7530,
1176 		0x7600, 0x760c,
1177 		0x7614, 0x761c,
1178 		0x7680, 0x76cc,
1179 		0x7700, 0x7798,
1180 		0x77c0, 0x77fc,
1181 		0x7900, 0x79fc,
1182 		0x7b00, 0x7b58,
1183 		0x7b60, 0x7b84,
1184 		0x7b8c, 0x7c38,
1185 		0x7d00, 0x7d38,
1186 		0x7d40, 0x7d80,
1187 		0x7d8c, 0x7ddc,
1188 		0x7de4, 0x7e04,
1189 		0x7e10, 0x7e1c,
1190 		0x7e24, 0x7e38,
1191 		0x7e40, 0x7e44,
1192 		0x7e4c, 0x7e78,
1193 		0x7e80, 0x7ea4,
1194 		0x7eac, 0x7edc,
1195 		0x7ee8, 0x7efc,
1196 		0x8dc0, 0x8e04,
1197 		0x8e10, 0x8e1c,
1198 		0x8e30, 0x8e78,
1199 		0x8ea0, 0x8eb8,
1200 		0x8ec0, 0x8f6c,
1201 		0x8fc0, 0x9008,
1202 		0x9010, 0x9058,
1203 		0x9060, 0x9060,
1204 		0x9068, 0x9074,
1205 		0x90fc, 0x90fc,
1206 		0x9400, 0x9408,
1207 		0x9410, 0x9458,
1208 		0x9600, 0x9600,
1209 		0x9608, 0x9638,
1210 		0x9640, 0x96bc,
1211 		0x9800, 0x9808,
1212 		0x9820, 0x983c,
1213 		0x9850, 0x9864,
1214 		0x9c00, 0x9c6c,
1215 		0x9c80, 0x9cec,
1216 		0x9d00, 0x9d6c,
1217 		0x9d80, 0x9dec,
1218 		0x9e00, 0x9e6c,
1219 		0x9e80, 0x9eec,
1220 		0x9f00, 0x9f6c,
1221 		0x9f80, 0x9fec,
1222 		0xd004, 0xd004,
1223 		0xd010, 0xd03c,
1224 		0xdfc0, 0xdfe0,
1225 		0xe000, 0xea7c,
1226 		0xf000, 0x11110,
1227 		0x11118, 0x11190,
1228 		0x19040, 0x1906c,
1229 		0x19078, 0x19080,
1230 		0x1908c, 0x190e4,
1231 		0x190f0, 0x190f8,
1232 		0x19100, 0x19110,
1233 		0x19120, 0x19124,
1234 		0x19150, 0x19194,
1235 		0x1919c, 0x191b0,
1236 		0x191d0, 0x191e8,
1237 		0x19238, 0x1924c,
1238 		0x193f8, 0x1943c,
1239 		0x1944c, 0x19474,
1240 		0x19490, 0x194e0,
1241 		0x194f0, 0x194f8,
1242 		0x19800, 0x19c08,
1243 		0x19c10, 0x19c90,
1244 		0x19ca0, 0x19ce4,
1245 		0x19cf0, 0x19d40,
1246 		0x19d50, 0x19d94,
1247 		0x19da0, 0x19de8,
1248 		0x19df0, 0x19e40,
1249 		0x19e50, 0x19e90,
1250 		0x19ea0, 0x19f4c,
1251 		0x1a000, 0x1a004,
1252 		0x1a010, 0x1a06c,
1253 		0x1a0b0, 0x1a0e4,
1254 		0x1a0ec, 0x1a0f4,
1255 		0x1a100, 0x1a108,
1256 		0x1a114, 0x1a120,
1257 		0x1a128, 0x1a130,
1258 		0x1a138, 0x1a138,
1259 		0x1a190, 0x1a1c4,
1260 		0x1a1fc, 0x1a1fc,
1261 		0x1e040, 0x1e04c,
1262 		0x1e284, 0x1e28c,
1263 		0x1e2c0, 0x1e2c0,
1264 		0x1e2e0, 0x1e2e0,
1265 		0x1e300, 0x1e384,
1266 		0x1e3c0, 0x1e3c8,
1267 		0x1e440, 0x1e44c,
1268 		0x1e684, 0x1e68c,
1269 		0x1e6c0, 0x1e6c0,
1270 		0x1e6e0, 0x1e6e0,
1271 		0x1e700, 0x1e784,
1272 		0x1e7c0, 0x1e7c8,
1273 		0x1e840, 0x1e84c,
1274 		0x1ea84, 0x1ea8c,
1275 		0x1eac0, 0x1eac0,
1276 		0x1eae0, 0x1eae0,
1277 		0x1eb00, 0x1eb84,
1278 		0x1ebc0, 0x1ebc8,
1279 		0x1ec40, 0x1ec4c,
1280 		0x1ee84, 0x1ee8c,
1281 		0x1eec0, 0x1eec0,
1282 		0x1eee0, 0x1eee0,
1283 		0x1ef00, 0x1ef84,
1284 		0x1efc0, 0x1efc8,
1285 		0x1f040, 0x1f04c,
1286 		0x1f284, 0x1f28c,
1287 		0x1f2c0, 0x1f2c0,
1288 		0x1f2e0, 0x1f2e0,
1289 		0x1f300, 0x1f384,
1290 		0x1f3c0, 0x1f3c8,
1291 		0x1f440, 0x1f44c,
1292 		0x1f684, 0x1f68c,
1293 		0x1f6c0, 0x1f6c0,
1294 		0x1f6e0, 0x1f6e0,
1295 		0x1f700, 0x1f784,
1296 		0x1f7c0, 0x1f7c8,
1297 		0x1f840, 0x1f84c,
1298 		0x1fa84, 0x1fa8c,
1299 		0x1fac0, 0x1fac0,
1300 		0x1fae0, 0x1fae0,
1301 		0x1fb00, 0x1fb84,
1302 		0x1fbc0, 0x1fbc8,
1303 		0x1fc40, 0x1fc4c,
1304 		0x1fe84, 0x1fe8c,
1305 		0x1fec0, 0x1fec0,
1306 		0x1fee0, 0x1fee0,
1307 		0x1ff00, 0x1ff84,
1308 		0x1ffc0, 0x1ffc8,
1309 		0x20000, 0x2002c,
1310 		0x20100, 0x2013c,
1311 		0x20190, 0x201a0,
1312 		0x201a8, 0x201b8,
1313 		0x201c4, 0x201c8,
1314 		0x20200, 0x20318,
1315 		0x20400, 0x204b4,
1316 		0x204c0, 0x20528,
1317 		0x20540, 0x20614,
1318 		0x21000, 0x21040,
1319 		0x2104c, 0x21060,
1320 		0x210c0, 0x210ec,
1321 		0x21200, 0x21268,
1322 		0x21270, 0x21284,
1323 		0x212fc, 0x21388,
1324 		0x21400, 0x21404,
1325 		0x21500, 0x21500,
1326 		0x21510, 0x21518,
1327 		0x2152c, 0x21530,
1328 		0x2153c, 0x2153c,
1329 		0x21550, 0x21554,
1330 		0x21600, 0x21600,
1331 		0x21608, 0x2161c,
1332 		0x21624, 0x21628,
1333 		0x21630, 0x21634,
1334 		0x2163c, 0x2163c,
1335 		0x21700, 0x2171c,
1336 		0x21780, 0x2178c,
1337 		0x21800, 0x21818,
1338 		0x21820, 0x21828,
1339 		0x21830, 0x21848,
1340 		0x21850, 0x21854,
1341 		0x21860, 0x21868,
1342 		0x21870, 0x21870,
1343 		0x21878, 0x21898,
1344 		0x218a0, 0x218a8,
1345 		0x218b0, 0x218c8,
1346 		0x218d0, 0x218d4,
1347 		0x218e0, 0x218e8,
1348 		0x218f0, 0x218f0,
1349 		0x218f8, 0x21a18,
1350 		0x21a20, 0x21a28,
1351 		0x21a30, 0x21a48,
1352 		0x21a50, 0x21a54,
1353 		0x21a60, 0x21a68,
1354 		0x21a70, 0x21a70,
1355 		0x21a78, 0x21a98,
1356 		0x21aa0, 0x21aa8,
1357 		0x21ab0, 0x21ac8,
1358 		0x21ad0, 0x21ad4,
1359 		0x21ae0, 0x21ae8,
1360 		0x21af0, 0x21af0,
1361 		0x21af8, 0x21c18,
1362 		0x21c20, 0x21c20,
1363 		0x21c28, 0x21c30,
1364 		0x21c38, 0x21c38,
1365 		0x21c80, 0x21c98,
1366 		0x21ca0, 0x21ca8,
1367 		0x21cb0, 0x21cc8,
1368 		0x21cd0, 0x21cd4,
1369 		0x21ce0, 0x21ce8,
1370 		0x21cf0, 0x21cf0,
1371 		0x21cf8, 0x21d7c,
1372 		0x21e00, 0x21e04,
1373 		0x22000, 0x2202c,
1374 		0x22100, 0x2213c,
1375 		0x22190, 0x221a0,
1376 		0x221a8, 0x221b8,
1377 		0x221c4, 0x221c8,
1378 		0x22200, 0x22318,
1379 		0x22400, 0x224b4,
1380 		0x224c0, 0x22528,
1381 		0x22540, 0x22614,
1382 		0x23000, 0x23040,
1383 		0x2304c, 0x23060,
1384 		0x230c0, 0x230ec,
1385 		0x23200, 0x23268,
1386 		0x23270, 0x23284,
1387 		0x232fc, 0x23388,
1388 		0x23400, 0x23404,
1389 		0x23500, 0x23500,
1390 		0x23510, 0x23518,
1391 		0x2352c, 0x23530,
1392 		0x2353c, 0x2353c,
1393 		0x23550, 0x23554,
1394 		0x23600, 0x23600,
1395 		0x23608, 0x2361c,
1396 		0x23624, 0x23628,
1397 		0x23630, 0x23634,
1398 		0x2363c, 0x2363c,
1399 		0x23700, 0x2371c,
1400 		0x23780, 0x2378c,
1401 		0x23800, 0x23818,
1402 		0x23820, 0x23828,
1403 		0x23830, 0x23848,
1404 		0x23850, 0x23854,
1405 		0x23860, 0x23868,
1406 		0x23870, 0x23870,
1407 		0x23878, 0x23898,
1408 		0x238a0, 0x238a8,
1409 		0x238b0, 0x238c8,
1410 		0x238d0, 0x238d4,
1411 		0x238e0, 0x238e8,
1412 		0x238f0, 0x238f0,
1413 		0x238f8, 0x23a18,
1414 		0x23a20, 0x23a28,
1415 		0x23a30, 0x23a48,
1416 		0x23a50, 0x23a54,
1417 		0x23a60, 0x23a68,
1418 		0x23a70, 0x23a70,
1419 		0x23a78, 0x23a98,
1420 		0x23aa0, 0x23aa8,
1421 		0x23ab0, 0x23ac8,
1422 		0x23ad0, 0x23ad4,
1423 		0x23ae0, 0x23ae8,
1424 		0x23af0, 0x23af0,
1425 		0x23af8, 0x23c18,
1426 		0x23c20, 0x23c20,
1427 		0x23c28, 0x23c30,
1428 		0x23c38, 0x23c38,
1429 		0x23c80, 0x23c98,
1430 		0x23ca0, 0x23ca8,
1431 		0x23cb0, 0x23cc8,
1432 		0x23cd0, 0x23cd4,
1433 		0x23ce0, 0x23ce8,
1434 		0x23cf0, 0x23cf0,
1435 		0x23cf8, 0x23d7c,
1436 		0x23e00, 0x23e04,
1437 		0x24000, 0x2402c,
1438 		0x24100, 0x2413c,
1439 		0x24190, 0x241a0,
1440 		0x241a8, 0x241b8,
1441 		0x241c4, 0x241c8,
1442 		0x24200, 0x24318,
1443 		0x24400, 0x244b4,
1444 		0x244c0, 0x24528,
1445 		0x24540, 0x24614,
1446 		0x25000, 0x25040,
1447 		0x2504c, 0x25060,
1448 		0x250c0, 0x250ec,
1449 		0x25200, 0x25268,
1450 		0x25270, 0x25284,
1451 		0x252fc, 0x25388,
1452 		0x25400, 0x25404,
1453 		0x25500, 0x25500,
1454 		0x25510, 0x25518,
1455 		0x2552c, 0x25530,
1456 		0x2553c, 0x2553c,
1457 		0x25550, 0x25554,
1458 		0x25600, 0x25600,
1459 		0x25608, 0x2561c,
1460 		0x25624, 0x25628,
1461 		0x25630, 0x25634,
1462 		0x2563c, 0x2563c,
1463 		0x25700, 0x2571c,
1464 		0x25780, 0x2578c,
1465 		0x25800, 0x25818,
1466 		0x25820, 0x25828,
1467 		0x25830, 0x25848,
1468 		0x25850, 0x25854,
1469 		0x25860, 0x25868,
1470 		0x25870, 0x25870,
1471 		0x25878, 0x25898,
1472 		0x258a0, 0x258a8,
1473 		0x258b0, 0x258c8,
1474 		0x258d0, 0x258d4,
1475 		0x258e0, 0x258e8,
1476 		0x258f0, 0x258f0,
1477 		0x258f8, 0x25a18,
1478 		0x25a20, 0x25a28,
1479 		0x25a30, 0x25a48,
1480 		0x25a50, 0x25a54,
1481 		0x25a60, 0x25a68,
1482 		0x25a70, 0x25a70,
1483 		0x25a78, 0x25a98,
1484 		0x25aa0, 0x25aa8,
1485 		0x25ab0, 0x25ac8,
1486 		0x25ad0, 0x25ad4,
1487 		0x25ae0, 0x25ae8,
1488 		0x25af0, 0x25af0,
1489 		0x25af8, 0x25c18,
1490 		0x25c20, 0x25c20,
1491 		0x25c28, 0x25c30,
1492 		0x25c38, 0x25c38,
1493 		0x25c80, 0x25c98,
1494 		0x25ca0, 0x25ca8,
1495 		0x25cb0, 0x25cc8,
1496 		0x25cd0, 0x25cd4,
1497 		0x25ce0, 0x25ce8,
1498 		0x25cf0, 0x25cf0,
1499 		0x25cf8, 0x25d7c,
1500 		0x25e00, 0x25e04,
1501 		0x26000, 0x2602c,
1502 		0x26100, 0x2613c,
1503 		0x26190, 0x261a0,
1504 		0x261a8, 0x261b8,
1505 		0x261c4, 0x261c8,
1506 		0x26200, 0x26318,
1507 		0x26400, 0x264b4,
1508 		0x264c0, 0x26528,
1509 		0x26540, 0x26614,
1510 		0x27000, 0x27040,
1511 		0x2704c, 0x27060,
1512 		0x270c0, 0x270ec,
1513 		0x27200, 0x27268,
1514 		0x27270, 0x27284,
1515 		0x272fc, 0x27388,
1516 		0x27400, 0x27404,
1517 		0x27500, 0x27500,
1518 		0x27510, 0x27518,
1519 		0x2752c, 0x27530,
1520 		0x2753c, 0x2753c,
1521 		0x27550, 0x27554,
1522 		0x27600, 0x27600,
1523 		0x27608, 0x2761c,
1524 		0x27624, 0x27628,
1525 		0x27630, 0x27634,
1526 		0x2763c, 0x2763c,
1527 		0x27700, 0x2771c,
1528 		0x27780, 0x2778c,
1529 		0x27800, 0x27818,
1530 		0x27820, 0x27828,
1531 		0x27830, 0x27848,
1532 		0x27850, 0x27854,
1533 		0x27860, 0x27868,
1534 		0x27870, 0x27870,
1535 		0x27878, 0x27898,
1536 		0x278a0, 0x278a8,
1537 		0x278b0, 0x278c8,
1538 		0x278d0, 0x278d4,
1539 		0x278e0, 0x278e8,
1540 		0x278f0, 0x278f0,
1541 		0x278f8, 0x27a18,
1542 		0x27a20, 0x27a28,
1543 		0x27a30, 0x27a48,
1544 		0x27a50, 0x27a54,
1545 		0x27a60, 0x27a68,
1546 		0x27a70, 0x27a70,
1547 		0x27a78, 0x27a98,
1548 		0x27aa0, 0x27aa8,
1549 		0x27ab0, 0x27ac8,
1550 		0x27ad0, 0x27ad4,
1551 		0x27ae0, 0x27ae8,
1552 		0x27af0, 0x27af0,
1553 		0x27af8, 0x27c18,
1554 		0x27c20, 0x27c20,
1555 		0x27c28, 0x27c30,
1556 		0x27c38, 0x27c38,
1557 		0x27c80, 0x27c98,
1558 		0x27ca0, 0x27ca8,
1559 		0x27cb0, 0x27cc8,
1560 		0x27cd0, 0x27cd4,
1561 		0x27ce0, 0x27ce8,
1562 		0x27cf0, 0x27cf0,
1563 		0x27cf8, 0x27d7c,
1564 		0x27e00, 0x27e04,
1565 	};
1566 
1567 	static const unsigned int t5_reg_ranges[] = {
1568 		0x1008, 0x10c0,
1569 		0x10cc, 0x10f8,
1570 		0x1100, 0x1100,
1571 		0x110c, 0x1148,
1572 		0x1180, 0x1184,
1573 		0x1190, 0x1194,
1574 		0x11a0, 0x11a4,
1575 		0x11b0, 0x11b4,
1576 		0x11fc, 0x123c,
1577 		0x1280, 0x173c,
1578 		0x1800, 0x18fc,
1579 		0x3000, 0x3028,
1580 		0x3060, 0x30b0,
1581 		0x30b8, 0x30d8,
1582 		0x30e0, 0x30fc,
1583 		0x3140, 0x357c,
1584 		0x35a8, 0x35cc,
1585 		0x35ec, 0x35ec,
1586 		0x3600, 0x5624,
1587 		0x56cc, 0x56ec,
1588 		0x56f4, 0x5720,
1589 		0x5728, 0x575c,
1590 		0x580c, 0x5814,
1591 		0x5890, 0x589c,
1592 		0x58a4, 0x58ac,
1593 		0x58b8, 0x58bc,
1594 		0x5940, 0x59c8,
1595 		0x59d0, 0x59dc,
1596 		0x59fc, 0x5a18,
1597 		0x5a60, 0x5a70,
1598 		0x5a80, 0x5a9c,
1599 		0x5b94, 0x5bfc,
1600 		0x6000, 0x6020,
1601 		0x6028, 0x6040,
1602 		0x6058, 0x609c,
1603 		0x60a8, 0x614c,
1604 		0x7700, 0x7798,
1605 		0x77c0, 0x78fc,
1606 		0x7b00, 0x7b58,
1607 		0x7b60, 0x7b84,
1608 		0x7b8c, 0x7c54,
1609 		0x7d00, 0x7d38,
1610 		0x7d40, 0x7d80,
1611 		0x7d8c, 0x7ddc,
1612 		0x7de4, 0x7e04,
1613 		0x7e10, 0x7e1c,
1614 		0x7e24, 0x7e38,
1615 		0x7e40, 0x7e44,
1616 		0x7e4c, 0x7e78,
1617 		0x7e80, 0x7edc,
1618 		0x7ee8, 0x7efc,
1619 		0x8dc0, 0x8de0,
1620 		0x8df8, 0x8e04,
1621 		0x8e10, 0x8e84,
1622 		0x8ea0, 0x8f84,
1623 		0x8fc0, 0x9058,
1624 		0x9060, 0x9060,
1625 		0x9068, 0x90f8,
1626 		0x9400, 0x9408,
1627 		0x9410, 0x9470,
1628 		0x9600, 0x9600,
1629 		0x9608, 0x9638,
1630 		0x9640, 0x96f4,
1631 		0x9800, 0x9808,
1632 		0x9820, 0x983c,
1633 		0x9850, 0x9864,
1634 		0x9c00, 0x9c6c,
1635 		0x9c80, 0x9cec,
1636 		0x9d00, 0x9d6c,
1637 		0x9d80, 0x9dec,
1638 		0x9e00, 0x9e6c,
1639 		0x9e80, 0x9eec,
1640 		0x9f00, 0x9f6c,
1641 		0x9f80, 0xa020,
1642 		0xd004, 0xd004,
1643 		0xd010, 0xd03c,
1644 		0xdfc0, 0xdfe0,
1645 		0xe000, 0x1106c,
1646 		0x11074, 0x11088,
1647 		0x1109c, 0x1117c,
1648 		0x11190, 0x11204,
1649 		0x19040, 0x1906c,
1650 		0x19078, 0x19080,
1651 		0x1908c, 0x190e8,
1652 		0x190f0, 0x190f8,
1653 		0x19100, 0x19110,
1654 		0x19120, 0x19124,
1655 		0x19150, 0x19194,
1656 		0x1919c, 0x191b0,
1657 		0x191d0, 0x191e8,
1658 		0x19238, 0x19290,
1659 		0x193f8, 0x19428,
1660 		0x19430, 0x19444,
1661 		0x1944c, 0x1946c,
1662 		0x19474, 0x19474,
1663 		0x19490, 0x194cc,
1664 		0x194f0, 0x194f8,
1665 		0x19c00, 0x19c08,
1666 		0x19c10, 0x19c60,
1667 		0x19c94, 0x19ce4,
1668 		0x19cf0, 0x19d40,
1669 		0x19d50, 0x19d94,
1670 		0x19da0, 0x19de8,
1671 		0x19df0, 0x19e10,
1672 		0x19e50, 0x19e90,
1673 		0x19ea0, 0x19f24,
1674 		0x19f34, 0x19f34,
1675 		0x19f40, 0x19f50,
1676 		0x19f90, 0x19fb4,
1677 		0x19fc4, 0x19fe4,
1678 		0x1a000, 0x1a004,
1679 		0x1a010, 0x1a06c,
1680 		0x1a0b0, 0x1a0e4,
1681 		0x1a0ec, 0x1a0f8,
1682 		0x1a100, 0x1a108,
1683 		0x1a114, 0x1a120,
1684 		0x1a128, 0x1a130,
1685 		0x1a138, 0x1a138,
1686 		0x1a190, 0x1a1c4,
1687 		0x1a1fc, 0x1a1fc,
1688 		0x1e008, 0x1e00c,
1689 		0x1e040, 0x1e044,
1690 		0x1e04c, 0x1e04c,
1691 		0x1e284, 0x1e290,
1692 		0x1e2c0, 0x1e2c0,
1693 		0x1e2e0, 0x1e2e0,
1694 		0x1e300, 0x1e384,
1695 		0x1e3c0, 0x1e3c8,
1696 		0x1e408, 0x1e40c,
1697 		0x1e440, 0x1e444,
1698 		0x1e44c, 0x1e44c,
1699 		0x1e684, 0x1e690,
1700 		0x1e6c0, 0x1e6c0,
1701 		0x1e6e0, 0x1e6e0,
1702 		0x1e700, 0x1e784,
1703 		0x1e7c0, 0x1e7c8,
1704 		0x1e808, 0x1e80c,
1705 		0x1e840, 0x1e844,
1706 		0x1e84c, 0x1e84c,
1707 		0x1ea84, 0x1ea90,
1708 		0x1eac0, 0x1eac0,
1709 		0x1eae0, 0x1eae0,
1710 		0x1eb00, 0x1eb84,
1711 		0x1ebc0, 0x1ebc8,
1712 		0x1ec08, 0x1ec0c,
1713 		0x1ec40, 0x1ec44,
1714 		0x1ec4c, 0x1ec4c,
1715 		0x1ee84, 0x1ee90,
1716 		0x1eec0, 0x1eec0,
1717 		0x1eee0, 0x1eee0,
1718 		0x1ef00, 0x1ef84,
1719 		0x1efc0, 0x1efc8,
1720 		0x1f008, 0x1f00c,
1721 		0x1f040, 0x1f044,
1722 		0x1f04c, 0x1f04c,
1723 		0x1f284, 0x1f290,
1724 		0x1f2c0, 0x1f2c0,
1725 		0x1f2e0, 0x1f2e0,
1726 		0x1f300, 0x1f384,
1727 		0x1f3c0, 0x1f3c8,
1728 		0x1f408, 0x1f40c,
1729 		0x1f440, 0x1f444,
1730 		0x1f44c, 0x1f44c,
1731 		0x1f684, 0x1f690,
1732 		0x1f6c0, 0x1f6c0,
1733 		0x1f6e0, 0x1f6e0,
1734 		0x1f700, 0x1f784,
1735 		0x1f7c0, 0x1f7c8,
1736 		0x1f808, 0x1f80c,
1737 		0x1f840, 0x1f844,
1738 		0x1f84c, 0x1f84c,
1739 		0x1fa84, 0x1fa90,
1740 		0x1fac0, 0x1fac0,
1741 		0x1fae0, 0x1fae0,
1742 		0x1fb00, 0x1fb84,
1743 		0x1fbc0, 0x1fbc8,
1744 		0x1fc08, 0x1fc0c,
1745 		0x1fc40, 0x1fc44,
1746 		0x1fc4c, 0x1fc4c,
1747 		0x1fe84, 0x1fe90,
1748 		0x1fec0, 0x1fec0,
1749 		0x1fee0, 0x1fee0,
1750 		0x1ff00, 0x1ff84,
1751 		0x1ffc0, 0x1ffc8,
1752 		0x30000, 0x30030,
1753 		0x30100, 0x30144,
1754 		0x30190, 0x301a0,
1755 		0x301a8, 0x301b8,
1756 		0x301c4, 0x301c8,
1757 		0x301d0, 0x301d0,
1758 		0x30200, 0x30318,
1759 		0x30400, 0x304b4,
1760 		0x304c0, 0x3052c,
1761 		0x30540, 0x3061c,
1762 		0x30800, 0x30828,
1763 		0x30834, 0x30834,
1764 		0x308c0, 0x30908,
1765 		0x30910, 0x309ac,
1766 		0x30a00, 0x30a14,
1767 		0x30a1c, 0x30a2c,
1768 		0x30a44, 0x30a50,
1769 		0x30a74, 0x30a74,
1770 		0x30a7c, 0x30afc,
1771 		0x30b08, 0x30c24,
1772 		0x30d00, 0x30d00,
1773 		0x30d08, 0x30d14,
1774 		0x30d1c, 0x30d20,
1775 		0x30d3c, 0x30d3c,
1776 		0x30d48, 0x30d50,
1777 		0x31200, 0x3120c,
1778 		0x31220, 0x31220,
1779 		0x31240, 0x31240,
1780 		0x31600, 0x3160c,
1781 		0x31a00, 0x31a1c,
1782 		0x31e00, 0x31e20,
1783 		0x31e38, 0x31e3c,
1784 		0x31e80, 0x31e80,
1785 		0x31e88, 0x31ea8,
1786 		0x31eb0, 0x31eb4,
1787 		0x31ec8, 0x31ed4,
1788 		0x31fb8, 0x32004,
1789 		0x32200, 0x32200,
1790 		0x32208, 0x32240,
1791 		0x32248, 0x32280,
1792 		0x32288, 0x322c0,
1793 		0x322c8, 0x322fc,
1794 		0x32600, 0x32630,
1795 		0x32a00, 0x32abc,
1796 		0x32b00, 0x32b10,
1797 		0x32b20, 0x32b30,
1798 		0x32b40, 0x32b50,
1799 		0x32b60, 0x32b70,
1800 		0x33000, 0x33028,
1801 		0x33030, 0x33048,
1802 		0x33060, 0x33068,
1803 		0x33070, 0x3309c,
1804 		0x330f0, 0x33128,
1805 		0x33130, 0x33148,
1806 		0x33160, 0x33168,
1807 		0x33170, 0x3319c,
1808 		0x331f0, 0x33238,
1809 		0x33240, 0x33240,
1810 		0x33248, 0x33250,
1811 		0x3325c, 0x33264,
1812 		0x33270, 0x332b8,
1813 		0x332c0, 0x332e4,
1814 		0x332f8, 0x33338,
1815 		0x33340, 0x33340,
1816 		0x33348, 0x33350,
1817 		0x3335c, 0x33364,
1818 		0x33370, 0x333b8,
1819 		0x333c0, 0x333e4,
1820 		0x333f8, 0x33428,
1821 		0x33430, 0x33448,
1822 		0x33460, 0x33468,
1823 		0x33470, 0x3349c,
1824 		0x334f0, 0x33528,
1825 		0x33530, 0x33548,
1826 		0x33560, 0x33568,
1827 		0x33570, 0x3359c,
1828 		0x335f0, 0x33638,
1829 		0x33640, 0x33640,
1830 		0x33648, 0x33650,
1831 		0x3365c, 0x33664,
1832 		0x33670, 0x336b8,
1833 		0x336c0, 0x336e4,
1834 		0x336f8, 0x33738,
1835 		0x33740, 0x33740,
1836 		0x33748, 0x33750,
1837 		0x3375c, 0x33764,
1838 		0x33770, 0x337b8,
1839 		0x337c0, 0x337e4,
1840 		0x337f8, 0x337fc,
1841 		0x33814, 0x33814,
1842 		0x3382c, 0x3382c,
1843 		0x33880, 0x3388c,
1844 		0x338e8, 0x338ec,
1845 		0x33900, 0x33928,
1846 		0x33930, 0x33948,
1847 		0x33960, 0x33968,
1848 		0x33970, 0x3399c,
1849 		0x339f0, 0x33a38,
1850 		0x33a40, 0x33a40,
1851 		0x33a48, 0x33a50,
1852 		0x33a5c, 0x33a64,
1853 		0x33a70, 0x33ab8,
1854 		0x33ac0, 0x33ae4,
1855 		0x33af8, 0x33b10,
1856 		0x33b28, 0x33b28,
1857 		0x33b3c, 0x33b50,
1858 		0x33bf0, 0x33c10,
1859 		0x33c28, 0x33c28,
1860 		0x33c3c, 0x33c50,
1861 		0x33cf0, 0x33cfc,
1862 		0x34000, 0x34030,
1863 		0x34100, 0x34144,
1864 		0x34190, 0x341a0,
1865 		0x341a8, 0x341b8,
1866 		0x341c4, 0x341c8,
1867 		0x341d0, 0x341d0,
1868 		0x34200, 0x34318,
1869 		0x34400, 0x344b4,
1870 		0x344c0, 0x3452c,
1871 		0x34540, 0x3461c,
1872 		0x34800, 0x34828,
1873 		0x34834, 0x34834,
1874 		0x348c0, 0x34908,
1875 		0x34910, 0x349ac,
1876 		0x34a00, 0x34a14,
1877 		0x34a1c, 0x34a2c,
1878 		0x34a44, 0x34a50,
1879 		0x34a74, 0x34a74,
1880 		0x34a7c, 0x34afc,
1881 		0x34b08, 0x34c24,
1882 		0x34d00, 0x34d00,
1883 		0x34d08, 0x34d14,
1884 		0x34d1c, 0x34d20,
1885 		0x34d3c, 0x34d3c,
1886 		0x34d48, 0x34d50,
1887 		0x35200, 0x3520c,
1888 		0x35220, 0x35220,
1889 		0x35240, 0x35240,
1890 		0x35600, 0x3560c,
1891 		0x35a00, 0x35a1c,
1892 		0x35e00, 0x35e20,
1893 		0x35e38, 0x35e3c,
1894 		0x35e80, 0x35e80,
1895 		0x35e88, 0x35ea8,
1896 		0x35eb0, 0x35eb4,
1897 		0x35ec8, 0x35ed4,
1898 		0x35fb8, 0x36004,
1899 		0x36200, 0x36200,
1900 		0x36208, 0x36240,
1901 		0x36248, 0x36280,
1902 		0x36288, 0x362c0,
1903 		0x362c8, 0x362fc,
1904 		0x36600, 0x36630,
1905 		0x36a00, 0x36abc,
1906 		0x36b00, 0x36b10,
1907 		0x36b20, 0x36b30,
1908 		0x36b40, 0x36b50,
1909 		0x36b60, 0x36b70,
1910 		0x37000, 0x37028,
1911 		0x37030, 0x37048,
1912 		0x37060, 0x37068,
1913 		0x37070, 0x3709c,
1914 		0x370f0, 0x37128,
1915 		0x37130, 0x37148,
1916 		0x37160, 0x37168,
1917 		0x37170, 0x3719c,
1918 		0x371f0, 0x37238,
1919 		0x37240, 0x37240,
1920 		0x37248, 0x37250,
1921 		0x3725c, 0x37264,
1922 		0x37270, 0x372b8,
1923 		0x372c0, 0x372e4,
1924 		0x372f8, 0x37338,
1925 		0x37340, 0x37340,
1926 		0x37348, 0x37350,
1927 		0x3735c, 0x37364,
1928 		0x37370, 0x373b8,
1929 		0x373c0, 0x373e4,
1930 		0x373f8, 0x37428,
1931 		0x37430, 0x37448,
1932 		0x37460, 0x37468,
1933 		0x37470, 0x3749c,
1934 		0x374f0, 0x37528,
1935 		0x37530, 0x37548,
1936 		0x37560, 0x37568,
1937 		0x37570, 0x3759c,
1938 		0x375f0, 0x37638,
1939 		0x37640, 0x37640,
1940 		0x37648, 0x37650,
1941 		0x3765c, 0x37664,
1942 		0x37670, 0x376b8,
1943 		0x376c0, 0x376e4,
1944 		0x376f8, 0x37738,
1945 		0x37740, 0x37740,
1946 		0x37748, 0x37750,
1947 		0x3775c, 0x37764,
1948 		0x37770, 0x377b8,
1949 		0x377c0, 0x377e4,
1950 		0x377f8, 0x377fc,
1951 		0x37814, 0x37814,
1952 		0x3782c, 0x3782c,
1953 		0x37880, 0x3788c,
1954 		0x378e8, 0x378ec,
1955 		0x37900, 0x37928,
1956 		0x37930, 0x37948,
1957 		0x37960, 0x37968,
1958 		0x37970, 0x3799c,
1959 		0x379f0, 0x37a38,
1960 		0x37a40, 0x37a40,
1961 		0x37a48, 0x37a50,
1962 		0x37a5c, 0x37a64,
1963 		0x37a70, 0x37ab8,
1964 		0x37ac0, 0x37ae4,
1965 		0x37af8, 0x37b10,
1966 		0x37b28, 0x37b28,
1967 		0x37b3c, 0x37b50,
1968 		0x37bf0, 0x37c10,
1969 		0x37c28, 0x37c28,
1970 		0x37c3c, 0x37c50,
1971 		0x37cf0, 0x37cfc,
1972 		0x38000, 0x38030,
1973 		0x38100, 0x38144,
1974 		0x38190, 0x381a0,
1975 		0x381a8, 0x381b8,
1976 		0x381c4, 0x381c8,
1977 		0x381d0, 0x381d0,
1978 		0x38200, 0x38318,
1979 		0x38400, 0x384b4,
1980 		0x384c0, 0x3852c,
1981 		0x38540, 0x3861c,
1982 		0x38800, 0x38828,
1983 		0x38834, 0x38834,
1984 		0x388c0, 0x38908,
1985 		0x38910, 0x389ac,
1986 		0x38a00, 0x38a14,
1987 		0x38a1c, 0x38a2c,
1988 		0x38a44, 0x38a50,
1989 		0x38a74, 0x38a74,
1990 		0x38a7c, 0x38afc,
1991 		0x38b08, 0x38c24,
1992 		0x38d00, 0x38d00,
1993 		0x38d08, 0x38d14,
1994 		0x38d1c, 0x38d20,
1995 		0x38d3c, 0x38d3c,
1996 		0x38d48, 0x38d50,
1997 		0x39200, 0x3920c,
1998 		0x39220, 0x39220,
1999 		0x39240, 0x39240,
2000 		0x39600, 0x3960c,
2001 		0x39a00, 0x39a1c,
2002 		0x39e00, 0x39e20,
2003 		0x39e38, 0x39e3c,
2004 		0x39e80, 0x39e80,
2005 		0x39e88, 0x39ea8,
2006 		0x39eb0, 0x39eb4,
2007 		0x39ec8, 0x39ed4,
2008 		0x39fb8, 0x3a004,
2009 		0x3a200, 0x3a200,
2010 		0x3a208, 0x3a240,
2011 		0x3a248, 0x3a280,
2012 		0x3a288, 0x3a2c0,
2013 		0x3a2c8, 0x3a2fc,
2014 		0x3a600, 0x3a630,
2015 		0x3aa00, 0x3aabc,
2016 		0x3ab00, 0x3ab10,
2017 		0x3ab20, 0x3ab30,
2018 		0x3ab40, 0x3ab50,
2019 		0x3ab60, 0x3ab70,
2020 		0x3b000, 0x3b028,
2021 		0x3b030, 0x3b048,
2022 		0x3b060, 0x3b068,
2023 		0x3b070, 0x3b09c,
2024 		0x3b0f0, 0x3b128,
2025 		0x3b130, 0x3b148,
2026 		0x3b160, 0x3b168,
2027 		0x3b170, 0x3b19c,
2028 		0x3b1f0, 0x3b238,
2029 		0x3b240, 0x3b240,
2030 		0x3b248, 0x3b250,
2031 		0x3b25c, 0x3b264,
2032 		0x3b270, 0x3b2b8,
2033 		0x3b2c0, 0x3b2e4,
2034 		0x3b2f8, 0x3b338,
2035 		0x3b340, 0x3b340,
2036 		0x3b348, 0x3b350,
2037 		0x3b35c, 0x3b364,
2038 		0x3b370, 0x3b3b8,
2039 		0x3b3c0, 0x3b3e4,
2040 		0x3b3f8, 0x3b428,
2041 		0x3b430, 0x3b448,
2042 		0x3b460, 0x3b468,
2043 		0x3b470, 0x3b49c,
2044 		0x3b4f0, 0x3b528,
2045 		0x3b530, 0x3b548,
2046 		0x3b560, 0x3b568,
2047 		0x3b570, 0x3b59c,
2048 		0x3b5f0, 0x3b638,
2049 		0x3b640, 0x3b640,
2050 		0x3b648, 0x3b650,
2051 		0x3b65c, 0x3b664,
2052 		0x3b670, 0x3b6b8,
2053 		0x3b6c0, 0x3b6e4,
2054 		0x3b6f8, 0x3b738,
2055 		0x3b740, 0x3b740,
2056 		0x3b748, 0x3b750,
2057 		0x3b75c, 0x3b764,
2058 		0x3b770, 0x3b7b8,
2059 		0x3b7c0, 0x3b7e4,
2060 		0x3b7f8, 0x3b7fc,
2061 		0x3b814, 0x3b814,
2062 		0x3b82c, 0x3b82c,
2063 		0x3b880, 0x3b88c,
2064 		0x3b8e8, 0x3b8ec,
2065 		0x3b900, 0x3b928,
2066 		0x3b930, 0x3b948,
2067 		0x3b960, 0x3b968,
2068 		0x3b970, 0x3b99c,
2069 		0x3b9f0, 0x3ba38,
2070 		0x3ba40, 0x3ba40,
2071 		0x3ba48, 0x3ba50,
2072 		0x3ba5c, 0x3ba64,
2073 		0x3ba70, 0x3bab8,
2074 		0x3bac0, 0x3bae4,
2075 		0x3baf8, 0x3bb10,
2076 		0x3bb28, 0x3bb28,
2077 		0x3bb3c, 0x3bb50,
2078 		0x3bbf0, 0x3bc10,
2079 		0x3bc28, 0x3bc28,
2080 		0x3bc3c, 0x3bc50,
2081 		0x3bcf0, 0x3bcfc,
2082 		0x3c000, 0x3c030,
2083 		0x3c100, 0x3c144,
2084 		0x3c190, 0x3c1a0,
2085 		0x3c1a8, 0x3c1b8,
2086 		0x3c1c4, 0x3c1c8,
2087 		0x3c1d0, 0x3c1d0,
2088 		0x3c200, 0x3c318,
2089 		0x3c400, 0x3c4b4,
2090 		0x3c4c0, 0x3c52c,
2091 		0x3c540, 0x3c61c,
2092 		0x3c800, 0x3c828,
2093 		0x3c834, 0x3c834,
2094 		0x3c8c0, 0x3c908,
2095 		0x3c910, 0x3c9ac,
2096 		0x3ca00, 0x3ca14,
2097 		0x3ca1c, 0x3ca2c,
2098 		0x3ca44, 0x3ca50,
2099 		0x3ca74, 0x3ca74,
2100 		0x3ca7c, 0x3cafc,
2101 		0x3cb08, 0x3cc24,
2102 		0x3cd00, 0x3cd00,
2103 		0x3cd08, 0x3cd14,
2104 		0x3cd1c, 0x3cd20,
2105 		0x3cd3c, 0x3cd3c,
2106 		0x3cd48, 0x3cd50,
2107 		0x3d200, 0x3d20c,
2108 		0x3d220, 0x3d220,
2109 		0x3d240, 0x3d240,
2110 		0x3d600, 0x3d60c,
2111 		0x3da00, 0x3da1c,
2112 		0x3de00, 0x3de20,
2113 		0x3de38, 0x3de3c,
2114 		0x3de80, 0x3de80,
2115 		0x3de88, 0x3dea8,
2116 		0x3deb0, 0x3deb4,
2117 		0x3dec8, 0x3ded4,
2118 		0x3dfb8, 0x3e004,
2119 		0x3e200, 0x3e200,
2120 		0x3e208, 0x3e240,
2121 		0x3e248, 0x3e280,
2122 		0x3e288, 0x3e2c0,
2123 		0x3e2c8, 0x3e2fc,
2124 		0x3e600, 0x3e630,
2125 		0x3ea00, 0x3eabc,
2126 		0x3eb00, 0x3eb10,
2127 		0x3eb20, 0x3eb30,
2128 		0x3eb40, 0x3eb50,
2129 		0x3eb60, 0x3eb70,
2130 		0x3f000, 0x3f028,
2131 		0x3f030, 0x3f048,
2132 		0x3f060, 0x3f068,
2133 		0x3f070, 0x3f09c,
2134 		0x3f0f0, 0x3f128,
2135 		0x3f130, 0x3f148,
2136 		0x3f160, 0x3f168,
2137 		0x3f170, 0x3f19c,
2138 		0x3f1f0, 0x3f238,
2139 		0x3f240, 0x3f240,
2140 		0x3f248, 0x3f250,
2141 		0x3f25c, 0x3f264,
2142 		0x3f270, 0x3f2b8,
2143 		0x3f2c0, 0x3f2e4,
2144 		0x3f2f8, 0x3f338,
2145 		0x3f340, 0x3f340,
2146 		0x3f348, 0x3f350,
2147 		0x3f35c, 0x3f364,
2148 		0x3f370, 0x3f3b8,
2149 		0x3f3c0, 0x3f3e4,
2150 		0x3f3f8, 0x3f428,
2151 		0x3f430, 0x3f448,
2152 		0x3f460, 0x3f468,
2153 		0x3f470, 0x3f49c,
2154 		0x3f4f0, 0x3f528,
2155 		0x3f530, 0x3f548,
2156 		0x3f560, 0x3f568,
2157 		0x3f570, 0x3f59c,
2158 		0x3f5f0, 0x3f638,
2159 		0x3f640, 0x3f640,
2160 		0x3f648, 0x3f650,
2161 		0x3f65c, 0x3f664,
2162 		0x3f670, 0x3f6b8,
2163 		0x3f6c0, 0x3f6e4,
2164 		0x3f6f8, 0x3f738,
2165 		0x3f740, 0x3f740,
2166 		0x3f748, 0x3f750,
2167 		0x3f75c, 0x3f764,
2168 		0x3f770, 0x3f7b8,
2169 		0x3f7c0, 0x3f7e4,
2170 		0x3f7f8, 0x3f7fc,
2171 		0x3f814, 0x3f814,
2172 		0x3f82c, 0x3f82c,
2173 		0x3f880, 0x3f88c,
2174 		0x3f8e8, 0x3f8ec,
2175 		0x3f900, 0x3f928,
2176 		0x3f930, 0x3f948,
2177 		0x3f960, 0x3f968,
2178 		0x3f970, 0x3f99c,
2179 		0x3f9f0, 0x3fa38,
2180 		0x3fa40, 0x3fa40,
2181 		0x3fa48, 0x3fa50,
2182 		0x3fa5c, 0x3fa64,
2183 		0x3fa70, 0x3fab8,
2184 		0x3fac0, 0x3fae4,
2185 		0x3faf8, 0x3fb10,
2186 		0x3fb28, 0x3fb28,
2187 		0x3fb3c, 0x3fb50,
2188 		0x3fbf0, 0x3fc10,
2189 		0x3fc28, 0x3fc28,
2190 		0x3fc3c, 0x3fc50,
2191 		0x3fcf0, 0x3fcfc,
2192 		0x40000, 0x4000c,
2193 		0x40040, 0x40050,
2194 		0x40060, 0x40068,
2195 		0x4007c, 0x4008c,
2196 		0x40094, 0x400b0,
2197 		0x400c0, 0x40144,
2198 		0x40180, 0x4018c,
2199 		0x40200, 0x40254,
2200 		0x40260, 0x40264,
2201 		0x40270, 0x40288,
2202 		0x40290, 0x40298,
2203 		0x402ac, 0x402c8,
2204 		0x402d0, 0x402e0,
2205 		0x402f0, 0x402f0,
2206 		0x40300, 0x4033c,
2207 		0x403f8, 0x403fc,
2208 		0x41304, 0x413c4,
2209 		0x41400, 0x4140c,
2210 		0x41414, 0x4141c,
2211 		0x41480, 0x414d0,
2212 		0x44000, 0x44054,
2213 		0x4405c, 0x44078,
2214 		0x440c0, 0x44174,
2215 		0x44180, 0x441ac,
2216 		0x441b4, 0x441b8,
2217 		0x441c0, 0x44254,
2218 		0x4425c, 0x44278,
2219 		0x442c0, 0x44374,
2220 		0x44380, 0x443ac,
2221 		0x443b4, 0x443b8,
2222 		0x443c0, 0x44454,
2223 		0x4445c, 0x44478,
2224 		0x444c0, 0x44574,
2225 		0x44580, 0x445ac,
2226 		0x445b4, 0x445b8,
2227 		0x445c0, 0x44654,
2228 		0x4465c, 0x44678,
2229 		0x446c0, 0x44774,
2230 		0x44780, 0x447ac,
2231 		0x447b4, 0x447b8,
2232 		0x447c0, 0x44854,
2233 		0x4485c, 0x44878,
2234 		0x448c0, 0x44974,
2235 		0x44980, 0x449ac,
2236 		0x449b4, 0x449b8,
2237 		0x449c0, 0x449fc,
2238 		0x45000, 0x45004,
2239 		0x45010, 0x45030,
2240 		0x45040, 0x45060,
2241 		0x45068, 0x45068,
2242 		0x45080, 0x45084,
2243 		0x450a0, 0x450b0,
2244 		0x45200, 0x45204,
2245 		0x45210, 0x45230,
2246 		0x45240, 0x45260,
2247 		0x45268, 0x45268,
2248 		0x45280, 0x45284,
2249 		0x452a0, 0x452b0,
2250 		0x460c0, 0x460e4,
2251 		0x47000, 0x4703c,
2252 		0x47044, 0x4708c,
2253 		0x47200, 0x47250,
2254 		0x47400, 0x47408,
2255 		0x47414, 0x47420,
2256 		0x47600, 0x47618,
2257 		0x47800, 0x47814,
2258 		0x48000, 0x4800c,
2259 		0x48040, 0x48050,
2260 		0x48060, 0x48068,
2261 		0x4807c, 0x4808c,
2262 		0x48094, 0x480b0,
2263 		0x480c0, 0x48144,
2264 		0x48180, 0x4818c,
2265 		0x48200, 0x48254,
2266 		0x48260, 0x48264,
2267 		0x48270, 0x48288,
2268 		0x48290, 0x48298,
2269 		0x482ac, 0x482c8,
2270 		0x482d0, 0x482e0,
2271 		0x482f0, 0x482f0,
2272 		0x48300, 0x4833c,
2273 		0x483f8, 0x483fc,
2274 		0x49304, 0x493c4,
2275 		0x49400, 0x4940c,
2276 		0x49414, 0x4941c,
2277 		0x49480, 0x494d0,
2278 		0x4c000, 0x4c054,
2279 		0x4c05c, 0x4c078,
2280 		0x4c0c0, 0x4c174,
2281 		0x4c180, 0x4c1ac,
2282 		0x4c1b4, 0x4c1b8,
2283 		0x4c1c0, 0x4c254,
2284 		0x4c25c, 0x4c278,
2285 		0x4c2c0, 0x4c374,
2286 		0x4c380, 0x4c3ac,
2287 		0x4c3b4, 0x4c3b8,
2288 		0x4c3c0, 0x4c454,
2289 		0x4c45c, 0x4c478,
2290 		0x4c4c0, 0x4c574,
2291 		0x4c580, 0x4c5ac,
2292 		0x4c5b4, 0x4c5b8,
2293 		0x4c5c0, 0x4c654,
2294 		0x4c65c, 0x4c678,
2295 		0x4c6c0, 0x4c774,
2296 		0x4c780, 0x4c7ac,
2297 		0x4c7b4, 0x4c7b8,
2298 		0x4c7c0, 0x4c854,
2299 		0x4c85c, 0x4c878,
2300 		0x4c8c0, 0x4c974,
2301 		0x4c980, 0x4c9ac,
2302 		0x4c9b4, 0x4c9b8,
2303 		0x4c9c0, 0x4c9fc,
2304 		0x4d000, 0x4d004,
2305 		0x4d010, 0x4d030,
2306 		0x4d040, 0x4d060,
2307 		0x4d068, 0x4d068,
2308 		0x4d080, 0x4d084,
2309 		0x4d0a0, 0x4d0b0,
2310 		0x4d200, 0x4d204,
2311 		0x4d210, 0x4d230,
2312 		0x4d240, 0x4d260,
2313 		0x4d268, 0x4d268,
2314 		0x4d280, 0x4d284,
2315 		0x4d2a0, 0x4d2b0,
2316 		0x4e0c0, 0x4e0e4,
2317 		0x4f000, 0x4f03c,
2318 		0x4f044, 0x4f08c,
2319 		0x4f200, 0x4f250,
2320 		0x4f400, 0x4f408,
2321 		0x4f414, 0x4f420,
2322 		0x4f600, 0x4f618,
2323 		0x4f800, 0x4f814,
2324 		0x50000, 0x50084,
2325 		0x50090, 0x500cc,
2326 		0x50400, 0x50400,
2327 		0x50800, 0x50884,
2328 		0x50890, 0x508cc,
2329 		0x50c00, 0x50c00,
2330 		0x51000, 0x5101c,
2331 		0x51300, 0x51308,
2332 	};
2333 
2334 	static const unsigned int t6_reg_ranges[] = {
2335 		0x1008, 0x101c,
2336 		0x1024, 0x10a8,
2337 		0x10b4, 0x10f8,
2338 		0x1100, 0x1114,
2339 		0x111c, 0x112c,
2340 		0x1138, 0x113c,
2341 		0x1144, 0x114c,
2342 		0x1180, 0x1184,
2343 		0x1190, 0x1194,
2344 		0x11a0, 0x11a4,
2345 		0x11b0, 0x11b4,
2346 		0x11fc, 0x1274,
2347 		0x1280, 0x133c,
2348 		0x1800, 0x18fc,
2349 		0x3000, 0x302c,
2350 		0x3060, 0x30b0,
2351 		0x30b8, 0x30d8,
2352 		0x30e0, 0x30fc,
2353 		0x3140, 0x357c,
2354 		0x35a8, 0x35cc,
2355 		0x35ec, 0x35ec,
2356 		0x3600, 0x5624,
2357 		0x56cc, 0x56ec,
2358 		0x56f4, 0x5720,
2359 		0x5728, 0x575c,
2360 		0x580c, 0x5814,
2361 		0x5890, 0x589c,
2362 		0x58a4, 0x58ac,
2363 		0x58b8, 0x58bc,
2364 		0x5940, 0x595c,
2365 		0x5980, 0x598c,
2366 		0x59b0, 0x59c8,
2367 		0x59d0, 0x59dc,
2368 		0x59fc, 0x5a18,
2369 		0x5a60, 0x5a6c,
2370 		0x5a80, 0x5a8c,
2371 		0x5a94, 0x5a9c,
2372 		0x5b94, 0x5bfc,
2373 		0x5c10, 0x5e48,
2374 		0x5e50, 0x5e94,
2375 		0x5ea0, 0x5eb0,
2376 		0x5ec0, 0x5ec0,
2377 		0x5ec8, 0x5ed0,
2378 		0x5ee0, 0x5ee0,
2379 		0x5ef0, 0x5ef0,
2380 		0x5f00, 0x5f00,
2381 		0x6000, 0x6020,
2382 		0x6028, 0x6040,
2383 		0x6058, 0x609c,
2384 		0x60a8, 0x619c,
2385 		0x7700, 0x7798,
2386 		0x77c0, 0x7880,
2387 		0x78cc, 0x78fc,
2388 		0x7b00, 0x7b58,
2389 		0x7b60, 0x7b84,
2390 		0x7b8c, 0x7c54,
2391 		0x7d00, 0x7d38,
2392 		0x7d40, 0x7d84,
2393 		0x7d8c, 0x7ddc,
2394 		0x7de4, 0x7e04,
2395 		0x7e10, 0x7e1c,
2396 		0x7e24, 0x7e38,
2397 		0x7e40, 0x7e44,
2398 		0x7e4c, 0x7e78,
2399 		0x7e80, 0x7edc,
2400 		0x7ee8, 0x7efc,
2401 		0x8dc0, 0x8de4,
2402 		0x8df8, 0x8e04,
2403 		0x8e10, 0x8e84,
2404 		0x8ea0, 0x8f88,
2405 		0x8fb8, 0x9058,
2406 		0x9060, 0x9060,
2407 		0x9068, 0x90f8,
2408 		0x9100, 0x9124,
2409 		0x9400, 0x9470,
2410 		0x9600, 0x9600,
2411 		0x9608, 0x9638,
2412 		0x9640, 0x9704,
2413 		0x9710, 0x971c,
2414 		0x9800, 0x9808,
2415 		0x9820, 0x983c,
2416 		0x9850, 0x9864,
2417 		0x9c00, 0x9c6c,
2418 		0x9c80, 0x9cec,
2419 		0x9d00, 0x9d6c,
2420 		0x9d80, 0x9dec,
2421 		0x9e00, 0x9e6c,
2422 		0x9e80, 0x9eec,
2423 		0x9f00, 0x9f6c,
2424 		0x9f80, 0xa020,
2425 		0xd004, 0xd03c,
2426 		0xd100, 0xd118,
2427 		0xd200, 0xd214,
2428 		0xd220, 0xd234,
2429 		0xd240, 0xd254,
2430 		0xd260, 0xd274,
2431 		0xd280, 0xd294,
2432 		0xd2a0, 0xd2b4,
2433 		0xd2c0, 0xd2d4,
2434 		0xd2e0, 0xd2f4,
2435 		0xd300, 0xd31c,
2436 		0xdfc0, 0xdfe0,
2437 		0xe000, 0xf008,
2438 		0xf010, 0xf018,
2439 		0xf020, 0xf028,
2440 		0x11000, 0x11014,
2441 		0x11048, 0x1106c,
2442 		0x11074, 0x11088,
2443 		0x11098, 0x11120,
2444 		0x1112c, 0x1117c,
2445 		0x11190, 0x112e0,
2446 		0x11300, 0x1130c,
2447 		0x12000, 0x1206c,
2448 		0x19040, 0x1906c,
2449 		0x19078, 0x19080,
2450 		0x1908c, 0x190e8,
2451 		0x190f0, 0x190f8,
2452 		0x19100, 0x19110,
2453 		0x19120, 0x19124,
2454 		0x19150, 0x19194,
2455 		0x1919c, 0x191b0,
2456 		0x191d0, 0x191e8,
2457 		0x19238, 0x19290,
2458 		0x192a4, 0x192b0,
2459 		0x192bc, 0x192bc,
2460 		0x19348, 0x1934c,
2461 		0x193f8, 0x19418,
2462 		0x19420, 0x19428,
2463 		0x19430, 0x19444,
2464 		0x1944c, 0x1946c,
2465 		0x19474, 0x19474,
2466 		0x19490, 0x194cc,
2467 		0x194f0, 0x194f8,
2468 		0x19c00, 0x19c48,
2469 		0x19c50, 0x19c80,
2470 		0x19c94, 0x19c98,
2471 		0x19ca0, 0x19cbc,
2472 		0x19ce4, 0x19ce4,
2473 		0x19cf0, 0x19cf8,
2474 		0x19d00, 0x19d28,
2475 		0x19d50, 0x19d78,
2476 		0x19d94, 0x19d98,
2477 		0x19da0, 0x19dc8,
2478 		0x19df0, 0x19e10,
2479 		0x19e50, 0x19e6c,
2480 		0x19ea0, 0x19ebc,
2481 		0x19ec4, 0x19ef4,
2482 		0x19f04, 0x19f2c,
2483 		0x19f34, 0x19f34,
2484 		0x19f40, 0x19f50,
2485 		0x19f90, 0x19fac,
2486 		0x19fc4, 0x19fc8,
2487 		0x19fd0, 0x19fe4,
2488 		0x1a000, 0x1a004,
2489 		0x1a010, 0x1a06c,
2490 		0x1a0b0, 0x1a0e4,
2491 		0x1a0ec, 0x1a0f8,
2492 		0x1a100, 0x1a108,
2493 		0x1a114, 0x1a120,
2494 		0x1a128, 0x1a130,
2495 		0x1a138, 0x1a138,
2496 		0x1a190, 0x1a1c4,
2497 		0x1a1fc, 0x1a1fc,
2498 		0x1e008, 0x1e00c,
2499 		0x1e040, 0x1e044,
2500 		0x1e04c, 0x1e04c,
2501 		0x1e284, 0x1e290,
2502 		0x1e2c0, 0x1e2c0,
2503 		0x1e2e0, 0x1e2e0,
2504 		0x1e300, 0x1e384,
2505 		0x1e3c0, 0x1e3c8,
2506 		0x1e408, 0x1e40c,
2507 		0x1e440, 0x1e444,
2508 		0x1e44c, 0x1e44c,
2509 		0x1e684, 0x1e690,
2510 		0x1e6c0, 0x1e6c0,
2511 		0x1e6e0, 0x1e6e0,
2512 		0x1e700, 0x1e784,
2513 		0x1e7c0, 0x1e7c8,
2514 		0x1e808, 0x1e80c,
2515 		0x1e840, 0x1e844,
2516 		0x1e84c, 0x1e84c,
2517 		0x1ea84, 0x1ea90,
2518 		0x1eac0, 0x1eac0,
2519 		0x1eae0, 0x1eae0,
2520 		0x1eb00, 0x1eb84,
2521 		0x1ebc0, 0x1ebc8,
2522 		0x1ec08, 0x1ec0c,
2523 		0x1ec40, 0x1ec44,
2524 		0x1ec4c, 0x1ec4c,
2525 		0x1ee84, 0x1ee90,
2526 		0x1eec0, 0x1eec0,
2527 		0x1eee0, 0x1eee0,
2528 		0x1ef00, 0x1ef84,
2529 		0x1efc0, 0x1efc8,
2530 		0x1f008, 0x1f00c,
2531 		0x1f040, 0x1f044,
2532 		0x1f04c, 0x1f04c,
2533 		0x1f284, 0x1f290,
2534 		0x1f2c0, 0x1f2c0,
2535 		0x1f2e0, 0x1f2e0,
2536 		0x1f300, 0x1f384,
2537 		0x1f3c0, 0x1f3c8,
2538 		0x1f408, 0x1f40c,
2539 		0x1f440, 0x1f444,
2540 		0x1f44c, 0x1f44c,
2541 		0x1f684, 0x1f690,
2542 		0x1f6c0, 0x1f6c0,
2543 		0x1f6e0, 0x1f6e0,
2544 		0x1f700, 0x1f784,
2545 		0x1f7c0, 0x1f7c8,
2546 		0x1f808, 0x1f80c,
2547 		0x1f840, 0x1f844,
2548 		0x1f84c, 0x1f84c,
2549 		0x1fa84, 0x1fa90,
2550 		0x1fac0, 0x1fac0,
2551 		0x1fae0, 0x1fae0,
2552 		0x1fb00, 0x1fb84,
2553 		0x1fbc0, 0x1fbc8,
2554 		0x1fc08, 0x1fc0c,
2555 		0x1fc40, 0x1fc44,
2556 		0x1fc4c, 0x1fc4c,
2557 		0x1fe84, 0x1fe90,
2558 		0x1fec0, 0x1fec0,
2559 		0x1fee0, 0x1fee0,
2560 		0x1ff00, 0x1ff84,
2561 		0x1ffc0, 0x1ffc8,
2562 		0x30000, 0x30030,
2563 		0x30100, 0x30168,
2564 		0x30190, 0x301a0,
2565 		0x301a8, 0x301b8,
2566 		0x301c4, 0x301c8,
2567 		0x301d0, 0x301d0,
2568 		0x30200, 0x30320,
2569 		0x30400, 0x304b4,
2570 		0x304c0, 0x3052c,
2571 		0x30540, 0x3061c,
2572 		0x30800, 0x308a0,
2573 		0x308c0, 0x30908,
2574 		0x30910, 0x309b8,
2575 		0x30a00, 0x30a04,
2576 		0x30a0c, 0x30a14,
2577 		0x30a1c, 0x30a2c,
2578 		0x30a44, 0x30a50,
2579 		0x30a74, 0x30a74,
2580 		0x30a7c, 0x30afc,
2581 		0x30b08, 0x30c24,
2582 		0x30d00, 0x30d14,
2583 		0x30d1c, 0x30d3c,
2584 		0x30d44, 0x30d4c,
2585 		0x30d54, 0x30d74,
2586 		0x30d7c, 0x30d7c,
2587 		0x30de0, 0x30de0,
2588 		0x30e00, 0x30ed4,
2589 		0x30f00, 0x30fa4,
2590 		0x30fc0, 0x30fc4,
2591 		0x31000, 0x31004,
2592 		0x31080, 0x310fc,
2593 		0x31208, 0x31220,
2594 		0x3123c, 0x31254,
2595 		0x31300, 0x31300,
2596 		0x31308, 0x3131c,
2597 		0x31338, 0x3133c,
2598 		0x31380, 0x31380,
2599 		0x31388, 0x313a8,
2600 		0x313b4, 0x313b4,
2601 		0x31400, 0x31420,
2602 		0x31438, 0x3143c,
2603 		0x31480, 0x31480,
2604 		0x314a8, 0x314a8,
2605 		0x314b0, 0x314b4,
2606 		0x314c8, 0x314d4,
2607 		0x31a40, 0x31a4c,
2608 		0x31af0, 0x31b20,
2609 		0x31b38, 0x31b3c,
2610 		0x31b80, 0x31b80,
2611 		0x31ba8, 0x31ba8,
2612 		0x31bb0, 0x31bb4,
2613 		0x31bc8, 0x31bd4,
2614 		0x32140, 0x3218c,
2615 		0x321f0, 0x321f4,
2616 		0x32200, 0x32200,
2617 		0x32218, 0x32218,
2618 		0x32400, 0x32400,
2619 		0x32408, 0x3241c,
2620 		0x32618, 0x32620,
2621 		0x32664, 0x32664,
2622 		0x326a8, 0x326a8,
2623 		0x326ec, 0x326ec,
2624 		0x32a00, 0x32abc,
2625 		0x32b00, 0x32b18,
2626 		0x32b20, 0x32b38,
2627 		0x32b40, 0x32b58,
2628 		0x32b60, 0x32b78,
2629 		0x32c00, 0x32c00,
2630 		0x32c08, 0x32c3c,
2631 		0x33000, 0x3302c,
2632 		0x33034, 0x33050,
2633 		0x33058, 0x33058,
2634 		0x33060, 0x3308c,
2635 		0x3309c, 0x330ac,
2636 		0x330c0, 0x330c0,
2637 		0x330c8, 0x330d0,
2638 		0x330d8, 0x330e0,
2639 		0x330ec, 0x3312c,
2640 		0x33134, 0x33150,
2641 		0x33158, 0x33158,
2642 		0x33160, 0x3318c,
2643 		0x3319c, 0x331ac,
2644 		0x331c0, 0x331c0,
2645 		0x331c8, 0x331d0,
2646 		0x331d8, 0x331e0,
2647 		0x331ec, 0x33290,
2648 		0x33298, 0x332c4,
2649 		0x332e4, 0x33390,
2650 		0x33398, 0x333c4,
2651 		0x333e4, 0x3342c,
2652 		0x33434, 0x33450,
2653 		0x33458, 0x33458,
2654 		0x33460, 0x3348c,
2655 		0x3349c, 0x334ac,
2656 		0x334c0, 0x334c0,
2657 		0x334c8, 0x334d0,
2658 		0x334d8, 0x334e0,
2659 		0x334ec, 0x3352c,
2660 		0x33534, 0x33550,
2661 		0x33558, 0x33558,
2662 		0x33560, 0x3358c,
2663 		0x3359c, 0x335ac,
2664 		0x335c0, 0x335c0,
2665 		0x335c8, 0x335d0,
2666 		0x335d8, 0x335e0,
2667 		0x335ec, 0x33690,
2668 		0x33698, 0x336c4,
2669 		0x336e4, 0x33790,
2670 		0x33798, 0x337c4,
2671 		0x337e4, 0x337fc,
2672 		0x33814, 0x33814,
2673 		0x33854, 0x33868,
2674 		0x33880, 0x3388c,
2675 		0x338c0, 0x338d0,
2676 		0x338e8, 0x338ec,
2677 		0x33900, 0x3392c,
2678 		0x33934, 0x33950,
2679 		0x33958, 0x33958,
2680 		0x33960, 0x3398c,
2681 		0x3399c, 0x339ac,
2682 		0x339c0, 0x339c0,
2683 		0x339c8, 0x339d0,
2684 		0x339d8, 0x339e0,
2685 		0x339ec, 0x33a90,
2686 		0x33a98, 0x33ac4,
2687 		0x33ae4, 0x33b10,
2688 		0x33b24, 0x33b28,
2689 		0x33b38, 0x33b50,
2690 		0x33bf0, 0x33c10,
2691 		0x33c24, 0x33c28,
2692 		0x33c38, 0x33c50,
2693 		0x33cf0, 0x33cfc,
2694 		0x34000, 0x34030,
2695 		0x34100, 0x34168,
2696 		0x34190, 0x341a0,
2697 		0x341a8, 0x341b8,
2698 		0x341c4, 0x341c8,
2699 		0x341d0, 0x341d0,
2700 		0x34200, 0x34320,
2701 		0x34400, 0x344b4,
2702 		0x344c0, 0x3452c,
2703 		0x34540, 0x3461c,
2704 		0x34800, 0x348a0,
2705 		0x348c0, 0x34908,
2706 		0x34910, 0x349b8,
2707 		0x34a00, 0x34a04,
2708 		0x34a0c, 0x34a14,
2709 		0x34a1c, 0x34a2c,
2710 		0x34a44, 0x34a50,
2711 		0x34a74, 0x34a74,
2712 		0x34a7c, 0x34afc,
2713 		0x34b08, 0x34c24,
2714 		0x34d00, 0x34d14,
2715 		0x34d1c, 0x34d3c,
2716 		0x34d44, 0x34d4c,
2717 		0x34d54, 0x34d74,
2718 		0x34d7c, 0x34d7c,
2719 		0x34de0, 0x34de0,
2720 		0x34e00, 0x34ed4,
2721 		0x34f00, 0x34fa4,
2722 		0x34fc0, 0x34fc4,
2723 		0x35000, 0x35004,
2724 		0x35080, 0x350fc,
2725 		0x35208, 0x35220,
2726 		0x3523c, 0x35254,
2727 		0x35300, 0x35300,
2728 		0x35308, 0x3531c,
2729 		0x35338, 0x3533c,
2730 		0x35380, 0x35380,
2731 		0x35388, 0x353a8,
2732 		0x353b4, 0x353b4,
2733 		0x35400, 0x35420,
2734 		0x35438, 0x3543c,
2735 		0x35480, 0x35480,
2736 		0x354a8, 0x354a8,
2737 		0x354b0, 0x354b4,
2738 		0x354c8, 0x354d4,
2739 		0x35a40, 0x35a4c,
2740 		0x35af0, 0x35b20,
2741 		0x35b38, 0x35b3c,
2742 		0x35b80, 0x35b80,
2743 		0x35ba8, 0x35ba8,
2744 		0x35bb0, 0x35bb4,
2745 		0x35bc8, 0x35bd4,
2746 		0x36140, 0x3618c,
2747 		0x361f0, 0x361f4,
2748 		0x36200, 0x36200,
2749 		0x36218, 0x36218,
2750 		0x36400, 0x36400,
2751 		0x36408, 0x3641c,
2752 		0x36618, 0x36620,
2753 		0x36664, 0x36664,
2754 		0x366a8, 0x366a8,
2755 		0x366ec, 0x366ec,
2756 		0x36a00, 0x36abc,
2757 		0x36b00, 0x36b18,
2758 		0x36b20, 0x36b38,
2759 		0x36b40, 0x36b58,
2760 		0x36b60, 0x36b78,
2761 		0x36c00, 0x36c00,
2762 		0x36c08, 0x36c3c,
2763 		0x37000, 0x3702c,
2764 		0x37034, 0x37050,
2765 		0x37058, 0x37058,
2766 		0x37060, 0x3708c,
2767 		0x3709c, 0x370ac,
2768 		0x370c0, 0x370c0,
2769 		0x370c8, 0x370d0,
2770 		0x370d8, 0x370e0,
2771 		0x370ec, 0x3712c,
2772 		0x37134, 0x37150,
2773 		0x37158, 0x37158,
2774 		0x37160, 0x3718c,
2775 		0x3719c, 0x371ac,
2776 		0x371c0, 0x371c0,
2777 		0x371c8, 0x371d0,
2778 		0x371d8, 0x371e0,
2779 		0x371ec, 0x37290,
2780 		0x37298, 0x372c4,
2781 		0x372e4, 0x37390,
2782 		0x37398, 0x373c4,
2783 		0x373e4, 0x3742c,
2784 		0x37434, 0x37450,
2785 		0x37458, 0x37458,
2786 		0x37460, 0x3748c,
2787 		0x3749c, 0x374ac,
2788 		0x374c0, 0x374c0,
2789 		0x374c8, 0x374d0,
2790 		0x374d8, 0x374e0,
2791 		0x374ec, 0x3752c,
2792 		0x37534, 0x37550,
2793 		0x37558, 0x37558,
2794 		0x37560, 0x3758c,
2795 		0x3759c, 0x375ac,
2796 		0x375c0, 0x375c0,
2797 		0x375c8, 0x375d0,
2798 		0x375d8, 0x375e0,
2799 		0x375ec, 0x37690,
2800 		0x37698, 0x376c4,
2801 		0x376e4, 0x37790,
2802 		0x37798, 0x377c4,
2803 		0x377e4, 0x377fc,
2804 		0x37814, 0x37814,
2805 		0x37854, 0x37868,
2806 		0x37880, 0x3788c,
2807 		0x378c0, 0x378d0,
2808 		0x378e8, 0x378ec,
2809 		0x37900, 0x3792c,
2810 		0x37934, 0x37950,
2811 		0x37958, 0x37958,
2812 		0x37960, 0x3798c,
2813 		0x3799c, 0x379ac,
2814 		0x379c0, 0x379c0,
2815 		0x379c8, 0x379d0,
2816 		0x379d8, 0x379e0,
2817 		0x379ec, 0x37a90,
2818 		0x37a98, 0x37ac4,
2819 		0x37ae4, 0x37b10,
2820 		0x37b24, 0x37b28,
2821 		0x37b38, 0x37b50,
2822 		0x37bf0, 0x37c10,
2823 		0x37c24, 0x37c28,
2824 		0x37c38, 0x37c50,
2825 		0x37cf0, 0x37cfc,
2826 		0x40040, 0x40040,
2827 		0x40080, 0x40084,
2828 		0x40100, 0x40100,
2829 		0x40140, 0x401bc,
2830 		0x40200, 0x40214,
2831 		0x40228, 0x40228,
2832 		0x40240, 0x40258,
2833 		0x40280, 0x40280,
2834 		0x40304, 0x40304,
2835 		0x40330, 0x4033c,
2836 		0x41304, 0x413c8,
2837 		0x413d0, 0x413dc,
2838 		0x413f0, 0x413f0,
2839 		0x41400, 0x4140c,
2840 		0x41414, 0x4141c,
2841 		0x41480, 0x414d0,
2842 		0x44000, 0x4407c,
2843 		0x440c0, 0x441ac,
2844 		0x441b4, 0x4427c,
2845 		0x442c0, 0x443ac,
2846 		0x443b4, 0x4447c,
2847 		0x444c0, 0x445ac,
2848 		0x445b4, 0x4467c,
2849 		0x446c0, 0x447ac,
2850 		0x447b4, 0x4487c,
2851 		0x448c0, 0x449ac,
2852 		0x449b4, 0x44a7c,
2853 		0x44ac0, 0x44bac,
2854 		0x44bb4, 0x44c7c,
2855 		0x44cc0, 0x44dac,
2856 		0x44db4, 0x44e7c,
2857 		0x44ec0, 0x44fac,
2858 		0x44fb4, 0x4507c,
2859 		0x450c0, 0x451ac,
2860 		0x451b4, 0x451fc,
2861 		0x45800, 0x45804,
2862 		0x45810, 0x45830,
2863 		0x45840, 0x45860,
2864 		0x45868, 0x45868,
2865 		0x45880, 0x45884,
2866 		0x458a0, 0x458b0,
2867 		0x45a00, 0x45a04,
2868 		0x45a10, 0x45a30,
2869 		0x45a40, 0x45a60,
2870 		0x45a68, 0x45a68,
2871 		0x45a80, 0x45a84,
2872 		0x45aa0, 0x45ab0,
2873 		0x460c0, 0x460e4,
2874 		0x47000, 0x4703c,
2875 		0x47044, 0x4708c,
2876 		0x47200, 0x47250,
2877 		0x47400, 0x47408,
2878 		0x47414, 0x47420,
2879 		0x47600, 0x47618,
2880 		0x47800, 0x47814,
2881 		0x47820, 0x4782c,
2882 		0x50000, 0x50084,
2883 		0x50090, 0x500cc,
2884 		0x50300, 0x50384,
2885 		0x50400, 0x50400,
2886 		0x50800, 0x50884,
2887 		0x50890, 0x508cc,
2888 		0x50b00, 0x50b84,
2889 		0x50c00, 0x50c00,
2890 		0x51000, 0x51020,
2891 		0x51028, 0x510b0,
2892 		0x51300, 0x51324,
2893 	};
2894 
2895 	u32 *buf_end = (u32 *)((char *)buf + buf_size);
2896 	const unsigned int *reg_ranges;
2897 	int reg_ranges_size, range;
2898 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2899 
2900 	/* Select the right set of register ranges to dump depending on the
2901 	 * adapter chip type.
2902 	 */
2903 	switch (chip_version) {
2904 	case CHELSIO_T4:
2905 		reg_ranges = t4_reg_ranges;
2906 		reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2907 		break;
2908 
2909 	case CHELSIO_T5:
2910 		reg_ranges = t5_reg_ranges;
2911 		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2912 		break;
2913 
2914 	case CHELSIO_T6:
2915 		reg_ranges = t6_reg_ranges;
2916 		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2917 		break;
2918 
2919 	default:
2920 		CH_ERR(adap,
2921 			"Unsupported chip version %d\n", chip_version);
2922 		return;
2923 	}
2924 
2925 	/* Clear the register buffer and insert the appropriate register
2926 	 * values selected by the above register ranges.
2927 	 */
2928 	memset(buf, 0, buf_size);
2929 	for (range = 0; range < reg_ranges_size; range += 2) {
2930 		unsigned int reg = reg_ranges[range];
2931 		unsigned int last_reg = reg_ranges[range + 1];
2932 		u32 *bufp = (u32 *)((char *)buf + reg);
2933 
2934 		/* Iterate across the register range filling in the register
2935 		 * buffer but don't write past the end of the register buffer.
2936 		 */
2937 		while (reg <= last_reg && bufp < buf_end) {
2938 			*bufp++ = t4_read_reg(adap, reg);
2939 			reg += sizeof(u32);
2940 		}
2941 	}
2942 }
2943 
2944 /*
2945  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2946  */
2947 #define EEPROM_DELAY		10		// 10us per poll spin
2948 #define EEPROM_MAX_POLL		5000		// x 5000 == 50ms
2949 
2950 #define EEPROM_STAT_ADDR	0x7bfc
2951 #define VPD_SIZE		0x800
2952 #define VPD_BASE		0x400
2953 #define VPD_BASE_OLD		0
2954 #define VPD_LEN			1024
2955 #define VPD_INFO_FLD_HDR_SIZE	3
2956 #define CHELSIO_VPD_UNIQUE_ID	0x82
2957 
2958 /*
2959  * Small utility function to wait till any outstanding VPD Access is complete.
2960  * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2961  * VPD Access in flight.  This allows us to handle the problem of having a
2962  * previous VPD Access time out and prevent an attempt to inject a new VPD
2963  * Request before any in-flight VPD reguest has completed.
2964  */
2965 static int t4_seeprom_wait(struct adapter *adapter)
2966 {
2967 	unsigned int base = adapter->params.pci.vpd_cap_addr;
2968 	int max_poll;
2969 
2970 	/*
2971 	 * If no VPD Access is in flight, we can just return success right
2972 	 * away.
2973 	 */
2974 	if (!adapter->vpd_busy)
2975 		return 0;
2976 
2977 	/*
2978 	 * Poll the VPD Capability Address/Flag register waiting for it
2979 	 * to indicate that the operation is complete.
2980 	 */
2981 	max_poll = EEPROM_MAX_POLL;
2982 	do {
2983 		u16 val;
2984 
2985 		udelay(EEPROM_DELAY);
2986 		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2987 
2988 		/*
2989 		 * If the operation is complete, mark the VPD as no longer
2990 		 * busy and return success.
2991 		 */
2992 		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2993 			adapter->vpd_busy = 0;
2994 			return 0;
2995 		}
2996 	} while (--max_poll);
2997 
2998 	/*
2999 	 * Failure!  Note that we leave the VPD Busy status set in order to
3000 	 * avoid pushing a new VPD Access request into the VPD Capability till
3001 	 * the current operation eventually succeeds.  It's a bug to issue a
3002 	 * new request when an existing request is in flight and will result
3003 	 * in corrupt hardware state.
3004 	 */
3005 	return -ETIMEDOUT;
3006 }
3007 
3008 /**
3009  *	t4_seeprom_read - read a serial EEPROM location
3010  *	@adapter: adapter to read
3011  *	@addr: EEPROM virtual address
3012  *	@data: where to store the read data
3013  *
3014  *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
3015  *	VPD capability.  Note that this function must be called with a virtual
3016  *	address.
3017  */
3018 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
3019 {
3020 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3021 	int ret;
3022 
3023 	/*
3024 	 * VPD Accesses must alway be 4-byte aligned!
3025 	 */
3026 	if (addr >= EEPROMVSIZE || (addr & 3))
3027 		return -EINVAL;
3028 
3029 	/*
3030 	 * Wait for any previous operation which may still be in flight to
3031 	 * complete.
3032 	 */
3033 	ret = t4_seeprom_wait(adapter);
3034 	if (ret) {
3035 		CH_ERR(adapter, "VPD still busy from previous operation\n");
3036 		return ret;
3037 	}
3038 
3039 	/*
3040 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
3041 	 * for our request to complete.  If it doesn't complete, note the
3042 	 * error and return it to our caller.  Note that we do not reset the
3043 	 * VPD Busy status!
3044 	 */
3045 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
3046 	adapter->vpd_busy = 1;
3047 	adapter->vpd_flag = PCI_VPD_ADDR_F;
3048 	ret = t4_seeprom_wait(adapter);
3049 	if (ret) {
3050 		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
3051 		return ret;
3052 	}
3053 
3054 	/*
3055 	 * Grab the returned data, swizzle it into our endianess and
3056 	 * return success.
3057 	 */
3058 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
3059 	*data = le32_to_cpu(*data);
3060 	return 0;
3061 }
3062 
3063 /**
3064  *	t4_seeprom_write - write a serial EEPROM location
3065  *	@adapter: adapter to write
3066  *	@addr: virtual EEPROM address
3067  *	@data: value to write
3068  *
3069  *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
3070  *	VPD capability.  Note that this function must be called with a virtual
3071  *	address.
3072  */
3073 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
3074 {
3075 	unsigned int base = adapter->params.pci.vpd_cap_addr;
3076 	int ret;
3077 	u32 stats_reg;
3078 	int max_poll;
3079 
3080 	/*
3081 	 * VPD Accesses must alway be 4-byte aligned!
3082 	 */
3083 	if (addr >= EEPROMVSIZE || (addr & 3))
3084 		return -EINVAL;
3085 
3086 	/*
3087 	 * Wait for any previous operation which may still be in flight to
3088 	 * complete.
3089 	 */
3090 	ret = t4_seeprom_wait(adapter);
3091 	if (ret) {
3092 		CH_ERR(adapter, "VPD still busy from previous operation\n");
3093 		return ret;
3094 	}
3095 
3096 	/*
3097 	 * Issue our new VPD Read request, mark the VPD as being busy and wait
3098 	 * for our request to complete.  If it doesn't complete, note the
3099 	 * error and return it to our caller.  Note that we do not reset the
3100 	 * VPD Busy status!
3101 	 */
3102 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
3103 				 cpu_to_le32(data));
3104 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
3105 				 (u16)addr | PCI_VPD_ADDR_F);
3106 	adapter->vpd_busy = 1;
3107 	adapter->vpd_flag = 0;
3108 	ret = t4_seeprom_wait(adapter);
3109 	if (ret) {
3110 		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
3111 		return ret;
3112 	}
3113 
3114 	/*
3115 	 * Reset PCI_VPD_DATA register after a transaction and wait for our
3116 	 * request to complete. If it doesn't complete, return error.
3117 	 */
3118 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
3119 	max_poll = EEPROM_MAX_POLL;
3120 	do {
3121 		udelay(EEPROM_DELAY);
3122 		ret = t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
3123 		if (!ret && (stats_reg & 0x1))
3124 			break;
3125 	} while (--max_poll);
3126 	if (!max_poll)
3127 		return -ETIMEDOUT;
3128 
3129 	/* Return success! */
3130 	return 0;
3131 }
3132 
3133 /**
3134  *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
3135  *	@phys_addr: the physical EEPROM address
3136  *	@fn: the PCI function number
3137  *	@sz: size of function-specific area
3138  *
3139  *	Translate a physical EEPROM address to virtual.  The first 1K is
3140  *	accessed through virtual addresses starting at 31K, the rest is
3141  *	accessed through virtual addresses starting at 0.
3142  *
3143  *	The mapping is as follows:
3144  *	[0..1K) -> [31K..32K)
3145  *	[1K..1K+A) -> [ES-A..ES)
3146  *	[1K+A..ES) -> [0..ES-A-1K)
3147  *
3148  *	where A = @fn * @sz, and ES = EEPROM size.
3149  */
3150 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
3151 {
3152 	fn *= sz;
3153 	if (phys_addr < 1024)
3154 		return phys_addr + (31 << 10);
3155 	if (phys_addr < 1024 + fn)
3156 		return EEPROMSIZE - fn + phys_addr - 1024;
3157 	if (phys_addr < EEPROMSIZE)
3158 		return phys_addr - 1024 - fn;
3159 	return -EINVAL;
3160 }
3161 
3162 /**
3163  *	t4_seeprom_wp - enable/disable EEPROM write protection
3164  *	@adapter: the adapter
3165  *	@enable: whether to enable or disable write protection
3166  *
3167  *	Enables or disables write protection on the serial EEPROM.
3168  */
3169 int t4_seeprom_wp(struct adapter *adapter, int enable)
3170 {
3171 	return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
3172 }
3173 
3174 /**
3175  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
3176  *	@v: Pointer to buffered vpd data structure
3177  *	@kw: The keyword to search for
3178  *
3179  *	Returns the value of the information field keyword or
3180  *	-ENOENT otherwise.
3181  */
3182 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
3183 {
3184 	int i;
3185 	unsigned int offset , len;
3186 	const u8 *buf = (const u8 *)v;
3187 	const u8 *vpdr_len = &v->vpdr_len[0];
3188 	offset = sizeof(struct t4_vpd_hdr);
3189 	len =  (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
3190 
3191 	if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
3192 		return -ENOENT;
3193 	}
3194 
3195 	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
3196 		if(memcmp(buf + i , kw , 2) == 0){
3197 			i += VPD_INFO_FLD_HDR_SIZE;
3198 			return i;
3199 		}
3200 
3201 		i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
3202 	}
3203 
3204 	return -ENOENT;
3205 }
3206 
3207 /*
3208  * str_strip
3209  * Removes trailing whitespaces from string "s"
3210  * Based on strstrip() implementation in string.c
3211  */
3212 static void str_strip(char *s)
3213 {
3214 	size_t size;
3215 	char *end;
3216 
3217 	size = strlen(s);
3218 	if (!size)
3219 		return;
3220 
3221 	end = s + size - 1;
3222 	while (end >= s && isspace(*end))
3223 		end--;
3224 	*(end + 1) = '\0';
3225 }
3226 
3227 /**
3228  *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
3229  *	@adapter: adapter to read
3230  *	@p: where to store the parameters
3231  *
3232  *	Reads card parameters stored in VPD EEPROM.
3233  */
3234 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
3235 {
3236 	int i, ret = 0, addr;
3237 	int ec, sn, pn, na;
3238 	u8 *vpd, csum;
3239 	const struct t4_vpd_hdr *v;
3240 
3241 	vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN);
3242 	if (!vpd)
3243 		return -ENOMEM;
3244 
3245 	/* We have two VPD data structures stored in the adapter VPD area.
3246 	 * By default, Linux calculates the size of the VPD area by traversing
3247 	 * the first VPD area at offset 0x0, so we need to tell the OS what
3248 	 * our real VPD size is.
3249 	 */
3250 	ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE);
3251 	if (ret < 0)
3252 		goto out;
3253 
3254 	/* Card information normally starts at VPD_BASE but early cards had
3255 	 * it at 0.
3256 	 */
3257 	ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd));
3258 	if (ret)
3259 		goto out;
3260 
3261 	/* The VPD shall have a unique identifier specified by the PCI SIG.
3262 	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
3263 	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
3264 	 * is expected to automatically put this entry at the
3265 	 * beginning of the VPD.
3266 	 */
3267 	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
3268 
3269 	for (i = 0; i < VPD_LEN; i += 4) {
3270 		ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i));
3271 		if (ret)
3272 			goto out;
3273 	}
3274  	v = (const struct t4_vpd_hdr *)vpd;
3275 
3276 #define FIND_VPD_KW(var,name) do { \
3277 	var = get_vpd_keyword_val(v , name); \
3278 	if (var < 0) { \
3279 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
3280 		ret = -EINVAL; \
3281 		goto out;      \
3282 	} \
3283 } while (0)
3284 
3285 	FIND_VPD_KW(i, "RV");
3286 	for (csum = 0; i >= 0; i--)
3287 		csum += vpd[i];
3288 
3289 	if (csum) {
3290 		CH_ERR(adapter,
3291 			"corrupted VPD EEPROM, actual csum %u\n", csum);
3292 		ret = -EINVAL;
3293 		goto out;
3294 	}
3295 
3296 	FIND_VPD_KW(ec, "EC");
3297 	FIND_VPD_KW(sn, "SN");
3298 	FIND_VPD_KW(pn, "PN");
3299 	FIND_VPD_KW(na, "NA");
3300 #undef FIND_VPD_KW
3301 
3302 	memcpy(p->id, v->id_data, ID_LEN);
3303 	str_strip((char *)p->id);
3304 	memcpy(p->ec, vpd + ec, EC_LEN);
3305 	str_strip((char *)p->ec);
3306 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3307 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3308 	str_strip((char *)p->sn);
3309 	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3310 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3311 	str_strip((char *)p->pn);
3312 	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3313 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3314 	str_strip((char *)p->na);
3315 
3316 out:
3317 	kmem_free(vpd, sizeof(u8) * VPD_LEN);
3318 	return ret < 0 ? ret : 0;
3319 }
3320 
3321 /**
3322  *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
3323  *	@adapter: adapter to read
3324  *	@p: where to store the parameters
3325  *
3326  *	Reads card parameters stored in VPD EEPROM and retrieves the Core
3327  *	Clock.  This can only be called after a connection to the firmware
3328  *	is established.
3329  */
3330 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
3331 {
3332 	u32 cclk_param, cclk_val;
3333 	int ret;
3334 
3335 	/*
3336 	 * Grab the raw VPD parameters.
3337 	 */
3338 	ret = t4_get_raw_vpd_params(adapter, p);
3339 	if (ret)
3340 		return ret;
3341 
3342 	/*
3343 	 * Ask firmware for the Core Clock since it knows how to translate the
3344 	 * Reference Clock ('V2') VPD field into a Core Clock value ...
3345 	 */
3346 	cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3347 		      V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
3348 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3349 			      1, &cclk_param, &cclk_val);
3350 
3351 	if (ret)
3352 		return ret;
3353 	p->cclk = cclk_val;
3354 
3355 	return 0;
3356 }
3357 
3358 /* serial flash and firmware constants and flash config file constants */
3359 enum {
3360 	SF_ATTEMPTS = 10,	/* max retries for SF operations */
3361 
3362 	/* flash command opcodes */
3363 	SF_PROG_PAGE    = 2,	/* program page */
3364 	SF_WR_DISABLE   = 4,	/* disable writes */
3365 	SF_RD_STATUS    = 5,	/* read status register */
3366 	SF_WR_ENABLE    = 6,	/* enable writes */
3367 	SF_RD_DATA_FAST = 0xb,	/* read flash */
3368 	SF_RD_ID	= 0x9f,	/* read ID */
3369 	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
3370 };
3371 
3372 /**
3373  *	sf1_read - read data from the serial flash
3374  *	@adapter: the adapter
3375  *	@byte_cnt: number of bytes to read
3376  *	@cont: whether another operation will be chained
3377  *	@lock: whether to lock SF for PL access only
3378  *	@valp: where to store the read data
3379  *
3380  *	Reads up to 4 bytes of data from the serial flash.  The location of
3381  *	the read needs to be specified prior to calling this by issuing the
3382  *	appropriate commands to the serial flash.
3383  */
3384 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3385 		    int lock, u32 *valp)
3386 {
3387 	int ret;
3388 
3389 	if (!byte_cnt || byte_cnt > 4)
3390 		return -EINVAL;
3391 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3392 		return -EBUSY;
3393 	t4_write_reg(adapter, A_SF_OP,
3394 		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3395 	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3396 	if (!ret)
3397 		*valp = t4_read_reg(adapter, A_SF_DATA);
3398 	return ret;
3399 }
3400 
3401 /**
3402  *	sf1_write - write data to the serial flash
3403  *	@adapter: the adapter
3404  *	@byte_cnt: number of bytes to write
3405  *	@cont: whether another operation will be chained
3406  *	@lock: whether to lock SF for PL access only
3407  *	@val: value to write
3408  *
3409  *	Writes up to 4 bytes of data to the serial flash.  The location of
3410  *	the write needs to be specified prior to calling this by issuing the
3411  *	appropriate commands to the serial flash.
3412  */
3413 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3414 		     int lock, u32 val)
3415 {
3416 	if (!byte_cnt || byte_cnt > 4)
3417 		return -EINVAL;
3418 	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3419 		return -EBUSY;
3420 	t4_write_reg(adapter, A_SF_DATA, val);
3421 	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3422 		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3423 	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3424 }
3425 
3426 /**
3427  *	flash_wait_op - wait for a flash operation to complete
3428  *	@adapter: the adapter
3429  *	@attempts: max number of polls of the status register
3430  *	@delay: delay between polls in ms
3431  *
3432  *	Wait for a flash operation to complete by polling the status register.
3433  */
3434 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay)
3435 {
3436 	int ret;
3437 	u32 status;
3438 
3439 	while (1) {
3440 		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3441 		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3442 			return ret;
3443 		if (!(status & 1))
3444 			return 0;
3445 		if (--attempts == 0)
3446 			return -EAGAIN;
3447 		if (ch_delay) {
3448 #ifdef CONFIG_CUDBG
3449 			if (adapter->flags & K_CRASH)
3450 				mdelay(ch_delay);
3451 			else
3452 #endif
3453 				msleep(ch_delay);
3454 		}
3455 	}
3456 }
3457 
3458 /**
3459  *	t4_read_flash - read words from serial flash
3460  *	@adapter: the adapter
3461  *	@addr: the start address for the read
3462  *	@nwords: how many 32-bit words to read
3463  *	@data: where to store the read data
3464  *	@byte_oriented: whether to store data as bytes or as words
3465  *
3466  *	Read the specified number of 32-bit words from the serial flash.
3467  *	If @byte_oriented is set the read data is stored as a byte array
3468  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3469  *	natural endianness.
3470  */
3471 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3472 		  unsigned int nwords, u32 *data, int byte_oriented)
3473 {
3474 	int ret;
3475 
3476 	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3477 		return -EINVAL;
3478 
3479 	addr = swab32(addr) | SF_RD_DATA_FAST;
3480 
3481 	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3482 	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3483 		return ret;
3484 
3485 	for ( ; nwords; nwords--, data++) {
3486 		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3487 		if (nwords == 1)
3488 			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3489 		if (ret)
3490 			return ret;
3491 		if (byte_oriented)
3492 			*data = (__force __u32)(cpu_to_be32(*data));
3493 	}
3494 	return 0;
3495 }
3496 
3497 /**
3498  *	t4_write_flash - write up to a page of data to the serial flash
3499  *	@adapter: the adapter
3500  *	@addr: the start address to write
3501  *	@n: length of data to write in bytes
3502  *	@data: the data to write
3503  *	@byte_oriented: whether to store data as bytes or as words
3504  *
3505  *	Writes up to a page of data (256 bytes) to the serial flash starting
3506  *	at the given address.  All the data must be written to the same page.
3507  *	If @byte_oriented is set the write data is stored as byte stream
3508  *	(i.e. matches what on disk), otherwise in big-endian.
3509  */
3510 int t4_write_flash(struct adapter *adapter, unsigned int addr,
3511 			  unsigned int n, const u8 *data, int byte_oriented)
3512 {
3513 	int ret;
3514 	u32 buf[64];
3515 	unsigned int i, c, left, val, offset = addr & 0xff;
3516 
3517 	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3518 		return -EINVAL;
3519 
3520 	val = swab32(addr) | SF_PROG_PAGE;
3521 
3522 	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3523 	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3524 		goto unlock;
3525 
3526 	for (left = n; left; left -= c) {
3527 		c = min(left, 4U);
3528 		for (val = 0, i = 0; i < c; ++i)
3529 			val = (val << 8) + *data++;
3530 
3531 		if (!byte_oriented)
3532 			val = cpu_to_be32(val);
3533 
3534 		ret = sf1_write(adapter, c, c != left, 1, val);
3535 		if (ret)
3536 			goto unlock;
3537 	}
3538 	ret = flash_wait_op(adapter, 8, 1);
3539 	if (ret)
3540 		goto unlock;
3541 
3542 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3543 
3544 	/* Read the page to verify the write succeeded */
3545 	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3546 			    byte_oriented);
3547 	if (ret)
3548 		return ret;
3549 
3550 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3551 		CH_ERR(adapter,
3552 			"failed to correctly write the flash page at %#x\n",
3553 			addr);
3554 		return -EIO;
3555 	}
3556 	return 0;
3557 
3558 unlock:
3559 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3560 	return ret;
3561 }
3562 
3563 /**
3564  *	t4_get_fw_version - read the firmware version
3565  *	@adapter: the adapter
3566  *	@vers: where to place the version
3567  *
3568  *	Reads the FW version from flash.
3569  */
3570 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3571 {
3572 	return t4_read_flash(adapter, FLASH_FW_START +
3573 			     offsetof(struct fw_hdr, fw_ver), 1,
3574 			     vers, 0);
3575 }
3576 
3577 /**
3578  *	t4_get_bs_version - read the firmware bootstrap version
3579  *	@adapter: the adapter
3580  *	@vers: where to place the version
3581  *
3582  *	Reads the FW Bootstrap version from flash.
3583  */
3584 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3585 {
3586 	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3587 			     offsetof(struct fw_hdr, fw_ver), 1,
3588 			     vers, 0);
3589 }
3590 
3591 /**
3592  *	t4_get_tp_version - read the TP microcode version
3593  *	@adapter: the adapter
3594  *	@vers: where to place the version
3595  *
3596  *	Reads the TP microcode version from flash.
3597  */
3598 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3599 {
3600 	return t4_read_flash(adapter, FLASH_FW_START +
3601 			     offsetof(struct fw_hdr, tp_microcode_ver),
3602 			     1, vers, 0);
3603 }
3604 
3605 /**
3606  *	t4_get_exprom_version - return the Expansion ROM version (if any)
3607  *	@adapter: the adapter
3608  *	@vers: where to place the version
3609  *
3610  *	Reads the Expansion ROM header from FLASH and returns the version
3611  *	number (if present) through the @vers return value pointer.  We return
3612  *	this in the Firmware Version Format since it's convenient.  Return
3613  *	0 on success, -ENOENT if no Expansion ROM is present.
3614  */
3615 int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
3616 {
3617 	struct exprom_header {
3618 		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3619 		unsigned char hdr_ver[4];	/* Expansion ROM version */
3620 	} *hdr;
3621 	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3622 					   sizeof(u32))];
3623 	int ret;
3624 
3625 	ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
3626 			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3627 			    0);
3628 	if (ret)
3629 		return ret;
3630 
3631 	hdr = (struct exprom_header *)exprom_header_buf;
3632 	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3633 		return -ENOENT;
3634 
3635 	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3636 		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3637 		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3638 		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3639 	return 0;
3640 }
3641 
3642 /**
3643  *	t4_get_scfg_version - return the Serial Configuration version
3644  *	@adapter: the adapter
3645  *	@vers: where to place the version
3646  *
3647  *	Reads the Serial Configuration Version via the Firmware interface
3648  *	(thus this can only be called once we're ready to issue Firmware
3649  *	commands).  The format of the Serial Configuration version is
3650  *	adapter specific.  Returns 0 on success, an error on failure.
3651  *
3652  *	Note that early versions of the Firmware didn't include the ability
3653  *	to retrieve the Serial Configuration version, so we zero-out the
3654  *	return-value parameter in that case to avoid leaving it with
3655  *	garbage in it.
3656  *
3657  *	Also note that the Firmware will return its cached copy of the Serial
3658  *	Initialization Revision ID, not the actual Revision ID as written in
3659  *	the Serial EEPROM.  This is only an issue if a new VPD has been written
3660  *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3661  *	it's best to defer calling this routine till after a FW_RESET_CMD has
3662  *	been issued if the Host Driver will be performing a full adapter
3663  *	initialization.
3664  */
3665 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3666 {
3667 	u32 scfgrev_param;
3668 	int ret;
3669 
3670 	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3671 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3672 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3673 			      1, &scfgrev_param, vers);
3674 	if (ret)
3675 		*vers = 0;
3676 	return ret;
3677 }
3678 
3679 /**
3680  *	t4_get_vpd_version - return the VPD version
3681  *	@adapter: the adapter
3682  *	@vers: where to place the version
3683  *
3684  *	Reads the VPD via the Firmware interface (thus this can only be called
3685  *	once we're ready to issue Firmware commands).  The format of the
3686  *	VPD version is adapter specific.  Returns 0 on success, an error on
3687  *	failure.
3688  *
3689  *	Note that early versions of the Firmware didn't include the ability
3690  *	to retrieve the VPD version, so we zero-out the return-value parameter
3691  *	in that case to avoid leaving it with garbage in it.
3692  *
3693  *	Also note that the Firmware will return its cached copy of the VPD
3694  *	Revision ID, not the actual Revision ID as written in the Serial
3695  *	EEPROM.  This is only an issue if a new VPD has been written and the
3696  *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3697  *	to defer calling this routine till after a FW_RESET_CMD has been issued
3698  *	if the Host Driver will be performing a full adapter initialization.
3699  */
3700 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3701 {
3702 	u32 vpdrev_param;
3703 	int ret;
3704 
3705 	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3706 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3707 	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3708 			      1, &vpdrev_param, vers);
3709 	if (ret)
3710 		*vers = 0;
3711 	return ret;
3712 }
3713 
3714 /**
3715  *	t4_get_version_info - extract various chip/firmware version information
3716  *	@adapter: the adapter
3717  *
3718  *	Reads various chip/firmware version numbers and stores them into the
3719  *	adapter Adapter Parameters structure.  If any of the efforts fails
3720  *	the first failure will be returned, but all of the version numbers
3721  *	will be read.
3722  */
3723 int t4_get_version_info(struct adapter *adapter)
3724 {
3725 	int ret = 0;
3726 
3727 	#define FIRST_RET(__getvinfo) \
3728 	do { \
3729 		int __ret = __getvinfo; \
3730 		if (__ret && !ret) \
3731 			ret = __ret; \
3732 	} while (0)
3733 
3734 	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3735 	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3736 	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3737 	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3738 	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3739 	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3740 
3741 	#undef FIRST_RET
3742 
3743 	return ret;
3744 }
3745 
3746 /**
3747  *	t4_dump_version_info - dump all of the adapter configuration IDs
3748  *	@adapter: the adapter
3749  *
3750  *	Dumps all of the various bits of adapter configuration version/revision
3751  *	IDs information.  This is typically called at some point after
3752  *	t4_get_version_info() has been called.
3753  */
3754 void t4_dump_version_info(struct adapter *adapter)
3755 {
3756 	/*
3757 	 * Device information.
3758 	 */
3759 	CH_INFO(adapter, "Chelsio %s rev %d\n",
3760 		adapter->params.vpd.id,
3761 		CHELSIO_CHIP_RELEASE(adapter->params.chip));
3762 	CH_INFO(adapter, "S/N: %s, P/N: %s\n",
3763 		adapter->params.vpd.sn,
3764 		adapter->params.vpd.pn);
3765 
3766 	/*
3767 	 * Firmware Version.
3768 	 */
3769 	if (!adapter->params.fw_vers)
3770 		CH_WARN(adapter, "No firmware loaded\n");
3771 	else
3772 		CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n",
3773 			G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
3774 			G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
3775 			G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
3776 			G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
3777 
3778 	/*
3779 	 * Bootstrap Firmware Version.  (Some adapters don't have Bootstrap
3780 	 * Firmware, so dev_info() is more appropriate here.)
3781 	 */
3782 	if (!adapter->params.bs_vers)
3783 		CH_INFO(adapter, "No bootstrap loaded\n");
3784 	else
3785 		CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n",
3786 			G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
3787 			G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
3788 			G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
3789 			G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
3790 
3791 	/*
3792 	 * TP Microcode Version.
3793 	 */
3794 	if (!adapter->params.tp_vers)
3795 		CH_WARN(adapter, "No TP Microcode loaded\n");
3796 	else
3797 		CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n",
3798 			G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
3799 			G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
3800 			G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
3801 			G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
3802 
3803 	/*
3804 	 * Expansion ROM version.
3805 	 */
3806 	if (!adapter->params.er_vers)
3807 		CH_INFO(adapter, "No Expansion ROM loaded\n");
3808 	else
3809 		CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
3810 			G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
3811 			G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
3812 			G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
3813 			G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
3814 
3815 
3816 	/*
3817 	 * Serial Configuration version.
3818 	 */
3819 	CH_INFO(adapter, "Serial Configuration version: %x\n",
3820 		adapter->params.scfg_vers);
3821 
3822 	/*
3823 	 * VPD  version.
3824 	 */
3825 	CH_INFO(adapter, "VPD version: %x\n",
3826 		adapter->params.vpd_vers);
3827 }
3828 
3829 /**
3830  *	t4_check_fw_version - check if the FW is supported with this driver
3831  *	@adap: the adapter
3832  *
3833  *	Checks if an adapter's FW is compatible with the driver.  Returns 0
3834  *	if there's exact match, a negative error if the version could not be
3835  *	read or there's a major version mismatch
3836  */
3837 int t4_check_fw_version(struct adapter *adap)
3838 {
3839 	int ret, major, minor, micro;
3840 	int exp_major, exp_minor, exp_micro;
3841 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3842 
3843 	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3844 	if (ret)
3845 		return ret;
3846 
3847 	major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers);
3848 	minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers);
3849 	micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers);
3850 
3851 	switch (chip_version) {
3852 	case CHELSIO_T4:
3853 		exp_major = T4FW_MIN_VERSION_MAJOR;
3854 		exp_minor = T4FW_MIN_VERSION_MINOR;
3855 		exp_micro = T4FW_MIN_VERSION_MICRO;
3856 		break;
3857 	case CHELSIO_T5:
3858 		exp_major = T5FW_MIN_VERSION_MAJOR;
3859 		exp_minor = T5FW_MIN_VERSION_MINOR;
3860 		exp_micro = T5FW_MIN_VERSION_MICRO;
3861 		break;
3862 	case CHELSIO_T6:
3863 		exp_major = T6FW_MIN_VERSION_MAJOR;
3864 		exp_minor = T6FW_MIN_VERSION_MINOR;
3865 		exp_micro = T6FW_MIN_VERSION_MICRO;
3866 		break;
3867 	default:
3868 		CH_ERR(adap, "Unsupported chip type, %x\n",
3869 			adap->params.chip);
3870 		return -EINVAL;
3871 	}
3872 
3873 	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3874 	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3875 		CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum "
3876 			"supported firmware is %u.%u.%u.\n", major, minor,
3877 			micro, exp_major, exp_minor, exp_micro);
3878 		return -EFAULT;
3879 	}
3880 	return 0;
3881 }
3882 
3883 /* Is the given firmware API compatible with the one the driver was compiled
3884  * with?
3885  */
3886 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3887 {
3888 
3889 	/* short circuit if it's the exact same firmware version */
3890 	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3891 		return 1;
3892 
3893 	/*
3894 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
3895 	 * features that are supported in the driver.
3896 	 */
3897 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3898 	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3899 	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
3900 	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
3901 		return 1;
3902 #undef SAME_INTF
3903 
3904 	return 0;
3905 }
3906 
3907 /* The firmware in the filesystem is usable, but should it be installed?
3908  * This routine explains itself in detail if it indicates the filesystem
3909  * firmware should be installed.
3910  */
3911 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3912 				int k, int c, int t4_fw_install)
3913 {
3914 	const char *reason;
3915 
3916 	if (!card_fw_usable) {
3917 		reason = "incompatible or unusable";
3918 		goto install;
3919 	}
3920 
3921 	if (k > c) {
3922 		reason = "older than the version bundled with this driver";
3923 		goto install;
3924 	}
3925 
3926 	if (t4_fw_install == 2 && k != c) {
3927 		reason = "different than the version bundled with this driver";
3928 		goto install;
3929 	}
3930 
3931 	return 0;
3932 
3933 install:
3934 	if (t4_fw_install == 0) {
3935 		CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3936 		       "but the driver is prohibited from installing a "
3937 		       "different firmware on the card.\n",
3938 		       G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3939 		       G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
3940 		       reason);
3941 
3942 		return (0);
3943 	}
3944 
3945 	CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, "
3946 	       "installing firmware %u.%u.%u.%u on card.\n",
3947 	       G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
3948 	       G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
3949 	       G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
3950 	       G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
3951 
3952 	return 1;
3953 }
3954 
3955 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3956 	       const u8 *fw_data, unsigned int fw_size,
3957 	       struct fw_hdr *card_fw, const int t4_fw_install,
3958 	       enum dev_state state, int *reset)
3959 {
3960 	int ret, card_fw_usable, fs_fw_usable;
3961 	const struct fw_hdr *fs_fw;
3962 	const struct fw_hdr *drv_fw;
3963 
3964 	drv_fw = &fw_info->fw_hdr;
3965 
3966 	/* Read the header of the firmware on the card */
3967 	ret = -t4_read_flash(adap, FLASH_FW_START,
3968 			    sizeof(*card_fw) / sizeof(uint32_t),
3969 			    (uint32_t *)card_fw, 1);
3970 	if (ret == 0) {
3971 		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3972 	} else {
3973 		CH_ERR(adap,
3974 			"Unable to read card's firmware header: %d\n", ret);
3975 		card_fw_usable = 0;
3976 	}
3977 
3978 	if (fw_data != NULL) {
3979 		fs_fw = (const void *)fw_data;
3980 		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3981 	} else {
3982 		fs_fw = NULL;
3983 		fs_fw_usable = 0;
3984 	}
3985 
3986 	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3987 	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3988 		/* Common case: the firmware on the card is an exact match and
3989 		 * the filesystem one is an exact match too, or the filesystem
3990 		 * one is absent/incompatible.  Note that t4_fw_install = 2
3991 		 * is ignored here -- use cxgbtool loadfw if you want to
3992 		 * reinstall the same firmware as the one on the card.
3993 		 */
3994 	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3995 		   should_install_fs_fw(adap, card_fw_usable,
3996 					be32_to_cpu(fs_fw->fw_ver),
3997 					be32_to_cpu(card_fw->fw_ver),
3998 					 t4_fw_install)) {
3999 
4000 		ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
4001 				     fw_size, 0);
4002 		if (ret != 0) {
4003 			CH_ERR(adap,
4004 				"failed to install firmware: %d\n", ret);
4005 			goto bye;
4006 		}
4007 
4008 		/* Installed successfully, update cached information */
4009 		memcpy(card_fw, fs_fw, sizeof(*card_fw));
4010 		(void)t4_init_devlog_params(adap, 1);
4011 		card_fw_usable = 1;
4012 		*reset = 0;	/* already reset as part of load_fw */
4013 	}
4014 
4015 	if (!card_fw_usable) {
4016 		uint32_t d, c, k;
4017 
4018 		d = be32_to_cpu(drv_fw->fw_ver);
4019 		c = be32_to_cpu(card_fw->fw_ver);
4020 		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
4021 
4022 		CH_ERR(adap, "Cannot find a usable firmware: "
4023 			"fw_install %d, chip state %d, "
4024 			"driver compiled with %d.%d.%d.%d, "
4025 			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
4026 			t4_fw_install, state,
4027 			G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
4028 			G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
4029 			G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
4030 			G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
4031 			G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
4032 			G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
4033 		ret = EINVAL;
4034 		goto bye;
4035 	}
4036 
4037 	/* We're using whatever's on the card and it's known to be good. */
4038 	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
4039 	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
4040 
4041 bye:
4042 	return ret;
4043 
4044 }
4045 
4046 /**
4047  *	t4_flash_erase_sectors - erase a range of flash sectors
4048  *	@adapter: the adapter
4049  *	@start: the first sector to erase
4050  *	@end: the last sector to erase
4051  *
4052  *	Erases the sectors in the given inclusive range.
4053  */
4054 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
4055 {
4056 	int ret = 0;
4057 
4058 	if (end >= adapter->params.sf_nsec)
4059 		return -EINVAL;
4060 
4061 	while (start <= end) {
4062 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
4063 		    (ret = sf1_write(adapter, 4, 0, 1,
4064 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
4065 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
4066 			CH_ERR(adapter,
4067 				"erase of flash sector %d failed, error %d\n",
4068 				start, ret);
4069 			break;
4070 		}
4071 		start++;
4072 	}
4073 	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
4074 	return ret;
4075 }
4076 
4077 /**
4078  *	t4_flash_cfg_addr - return the address of the flash configuration file
4079  *	@adapter: the adapter
4080  *
4081  *	Return the address within the flash where the Firmware Configuration
4082  *	File is stored, or an error if the device FLASH is too small to contain
4083  *	a Firmware Configuration File.
4084  */
4085 int t4_flash_cfg_addr(struct adapter *adapter)
4086 {
4087 	/*
4088 	 * If the device FLASH isn't large enough to hold a Firmware
4089 	 * Configuration File, return an error.
4090 	 */
4091 	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
4092 		return -ENOSPC;
4093 
4094 	return FLASH_CFG_START;
4095 }
4096 
4097 /* Return TRUE if the specified firmware matches the adapter.  I.e. T4
4098  * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
4099  * and emit an error message for mismatched firmware to save our caller the
4100  * effort ...
4101  */
4102 static int t4_fw_matches_chip(const struct adapter *adap,
4103 			      const struct fw_hdr *hdr)
4104 {
4105 	/*
4106 	 * The expression below will return FALSE for any unsupported adapter
4107 	 * which will keep us "honest" in the future ...
4108 	 */
4109 	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
4110 	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
4111 	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
4112 		return 1;
4113 
4114 	CH_ERR(adap,
4115 		"FW image (%d) is not suitable for this adapter (%d)\n",
4116 		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
4117 	return 0;
4118 }
4119 
4120 /**
4121  *	t4_load_fw - download firmware
4122  *	@adap: the adapter
4123  *	@fw_data: the firmware image to write
4124  *	@size: image size
4125  *	@bootstrap: indicates if the binary is a bootstrap fw
4126  *
4127  *	Write the supplied firmware image to the card's serial flash.
4128  */
4129 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size,
4130 	       unsigned int bootstrap)
4131 {
4132 	u32 csum;
4133 	int ret, addr;
4134 	unsigned int i;
4135 	u8 first_page[SF_PAGE_SIZE];
4136 	const __be32 *p = (const __be32 *)fw_data;
4137 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
4138 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
4139 	unsigned int fw_start_sec;
4140 	unsigned int fw_start;
4141 	unsigned int fw_size;
4142 
4143 	if (bootstrap) {
4144 		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
4145 		fw_start = FLASH_FWBOOTSTRAP_START;
4146 		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
4147 	} else {
4148 		fw_start_sec = FLASH_FW_START_SEC;
4149  		fw_start = FLASH_FW_START;
4150 		fw_size = FLASH_FW_MAX_SIZE;
4151 	}
4152 
4153 	if (!size) {
4154 		CH_ERR(adap, "FW image has no data\n");
4155 		return -EINVAL;
4156 	}
4157 	if (size & 511) {
4158 		CH_ERR(adap,
4159 			"FW image size not multiple of 512 bytes\n");
4160 		return -EINVAL;
4161 	}
4162 	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
4163 		CH_ERR(adap,
4164 			"FW image size differs from size in FW header\n");
4165 		return -EINVAL;
4166 	}
4167 	if (size > fw_size) {
4168 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
4169 			fw_size);
4170 		return -EFBIG;
4171 	}
4172 	if (!t4_fw_matches_chip(adap, hdr))
4173 		return -EINVAL;
4174 
4175 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
4176 		csum += be32_to_cpu(p[i]);
4177 
4178 	if (csum != 0xffffffff) {
4179 		CH_ERR(adap,
4180 			"corrupted firmware image, checksum %#x\n", csum);
4181 		return -EINVAL;
4182 	}
4183 
4184 	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
4185 	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
4186 	if (ret)
4187 		goto out;
4188 
4189 	/*
4190 	 * We write the correct version at the end so the driver can see a bad
4191 	 * version if the FW write fails.  Start by writing a copy of the
4192 	 * first page with a bad version.
4193 	 */
4194 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
4195 	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
4196 	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
4197 	if (ret)
4198 		goto out;
4199 
4200 	addr = fw_start;
4201 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
4202 		addr += SF_PAGE_SIZE;
4203 		fw_data += SF_PAGE_SIZE;
4204 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
4205 		if (ret)
4206 			goto out;
4207 	}
4208 
4209 	ret = t4_write_flash(adap,
4210 			     fw_start + offsetof(struct fw_hdr, fw_ver),
4211 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
4212 out:
4213 	if (ret)
4214 		CH_ERR(adap, "firmware download failed, error %d\n",
4215 			ret);
4216 	else {
4217 		if (bootstrap)
4218 			ret = t4_get_bs_version(adap, &adap->params.bs_vers);
4219 		else
4220 			ret = t4_get_fw_version(adap, &adap->params.fw_vers);
4221 	}
4222 	return ret;
4223 }
4224 
4225 /**
4226  *	t4_phy_fw_ver - return current PHY firmware version
4227  *	@adap: the adapter
4228  *	@phy_fw_ver: return value buffer for PHY firmware version
4229  *
4230  *	Returns the current version of external PHY firmware on the
4231  *	adapter.
4232  */
4233 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
4234 {
4235 	u32 param, val;
4236 	int ret;
4237 
4238 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4239 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4240 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4241 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
4242 	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4243 			      &param, &val);
4244 	if (ret < 0)
4245 		return ret;
4246 	*phy_fw_ver = val;
4247 	return 0;
4248 }
4249 
4250 /**
4251  *	t4_load_phy_fw - download port PHY firmware
4252  *	@adap: the adapter
4253  *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
4254  *	@lock: the lock to use to guard the memory copy
4255  *	@phy_fw_version: function to check PHY firmware versions
4256  *	@phy_fw_data: the PHY firmware image to write
4257  *	@phy_fw_size: image size
4258  *
4259  *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
4260  *	@phy_fw_version is supplied, then it will be used to determine if
4261  *	it's necessary to perform the transfer by comparing the version
4262  *	of any existing adapter PHY firmware with that of the passed in
4263  *	PHY firmware image.  If @lock is non-NULL then it will be used
4264  *	around the call to t4_memory_rw() which transfers the PHY firmware
4265  *	to the adapter.
4266  *
4267  *	A negative error number will be returned if an error occurs.  If
4268  *	version number support is available and there's no need to upgrade
4269  *	the firmware, 0 will be returned.  If firmware is successfully
4270  *	transferred to the adapter, 1 will be retured.
4271  *
4272  *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
4273  *	a result, a RESET of the adapter would cause that RAM to lose its
4274  *	contents.  Thus, loading PHY firmware on such adapters must happen after any
4275  *	FW_RESET_CMDs ...
4276  */
4277 int t4_load_phy_fw(struct adapter *adap,
4278 		   int win, t4_os_lock_t *lock,
4279 		   int (*phy_fw_version)(const u8 *, size_t),
4280 		   const u8 *phy_fw_data, size_t phy_fw_size)
4281 {
4282 	unsigned long mtype = 0, maddr = 0;
4283 	u32 param, val;
4284 	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
4285 	int ret;
4286 
4287 	/*
4288 	 * If we have version number support, then check to see if the adapter
4289 	 * already has up-to-date PHY firmware loaded.
4290 	 */
4291 	 if (phy_fw_version) {
4292 		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
4293 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4294 		if (ret < 0)
4295 			return ret;;
4296 
4297 		if (cur_phy_fw_ver >= new_phy_fw_vers) {
4298 			CH_WARN(adap, "PHY Firmware already up-to-date, "
4299 				"version %#x\n", cur_phy_fw_ver);
4300 			return 0;
4301 		}
4302 	}
4303 
4304 	/*
4305 	 * Ask the firmware where it wants us to copy the PHY firmware image.
4306 	 * The size of the file requires a special version of the READ coommand
4307 	 * which will pass the file size via the values field in PARAMS_CMD and
4308 	 * retreive the return value from firmware and place it in the same
4309 	 * buffer values
4310 	 */
4311 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4312 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4313 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4314 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4315 	val = phy_fw_size;
4316 	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
4317 			      &param, &val, 1, true);
4318 	if (ret < 0)
4319 		return ret;
4320 	mtype = val >> 8;
4321 	maddr = (val & 0xff) << 16;
4322 
4323 	/*
4324 	 * Copy the supplied PHY Firmware image to the adapter memory location
4325 	 * allocated by the adapter firmware.
4326 	 */
4327 	if (lock)
4328 		t4_os_lock(lock);
4329 	ret = t4_memory_rw(adap, win, mtype, maddr,
4330 			   phy_fw_size, (__be32*)phy_fw_data,
4331 			   T4_MEMORY_WRITE);
4332 	if (lock)
4333 		t4_os_unlock(lock);
4334 	if (ret)
4335 		return ret;
4336 
4337 	/*
4338 	 * Tell the firmware that the PHY firmware image has been written to
4339 	 * RAM and it can now start copying it over to the PHYs.  The chip
4340 	 * firmware will RESET the affected PHYs as part of this operation
4341 	 * leaving them running the new PHY firmware image.
4342 	 */
4343 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4344 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) |
4345 		 V_FW_PARAMS_PARAM_Y(adap->params.portvec) |
4346 		 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
4347 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
4348 				    &param, &val, 30000);
4349 
4350 	/*
4351 	 * If we have version number support, then check to see that the new
4352 	 * firmware got loaded properly.
4353 	 */
4354 	if (phy_fw_version) {
4355 		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4356 		if (ret < 0)
4357 			return ret;
4358 
4359 		if (cur_phy_fw_ver != new_phy_fw_vers) {
4360 			CH_WARN(adap, "PHY Firmware did not update: "
4361 				"version on adapter %#x, "
4362 				"version flashed %#x\n",
4363 				cur_phy_fw_ver, new_phy_fw_vers);
4364 			return -ENXIO;
4365 		}
4366 	}
4367 
4368 	return 1;
4369 }
4370 
4371 /**
4372  *	t4_fwcache - firmware cache operation
4373  *	@adap: the adapter
4374  *	@op  : the operation (flush or flush and invalidate)
4375  */
4376 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
4377 {
4378 	struct fw_params_cmd c;
4379 
4380 	memset(&c, 0, sizeof(c));
4381 	c.op_to_vfn =
4382 	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
4383 			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4384 				V_FW_PARAMS_CMD_PFN(adap->pf) |
4385 				V_FW_PARAMS_CMD_VFN(0));
4386 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4387 	c.param[0].mnem =
4388 	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4389 			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
4390 	c.param[0].val = (__force __be32)op;
4391 
4392 	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
4393 }
4394 
4395 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
4396 			unsigned int *pif_req_wrptr,
4397 			unsigned int *pif_rsp_wrptr)
4398 {
4399 	int i, j;
4400 	u32 cfg, val, req, rsp;
4401 
4402 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4403 	if (cfg & F_LADBGEN)
4404 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4405 
4406 	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
4407 	req = G_POLADBGWRPTR(val);
4408 	rsp = G_PILADBGWRPTR(val);
4409 	if (pif_req_wrptr)
4410 		*pif_req_wrptr = req;
4411 	if (pif_rsp_wrptr)
4412 		*pif_rsp_wrptr = rsp;
4413 
4414 	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
4415 		for (j = 0; j < 6; j++) {
4416 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
4417 				     V_PILADBGRDPTR(rsp));
4418 			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
4419 			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
4420 			req++;
4421 			rsp++;
4422 		}
4423 		req = (req + 2) & M_POLADBGRDPTR;
4424 		rsp = (rsp + 2) & M_PILADBGRDPTR;
4425 	}
4426 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4427 }
4428 
4429 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
4430 {
4431 	u32 cfg;
4432 	int i, j, idx;
4433 
4434 	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
4435 	if (cfg & F_LADBGEN)
4436 		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
4437 
4438 	for (i = 0; i < CIM_MALA_SIZE; i++) {
4439 		for (j = 0; j < 5; j++) {
4440 			idx = 8 * i + j;
4441 			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
4442 				     V_PILADBGRDPTR(idx));
4443 			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
4444 			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
4445 		}
4446 	}
4447 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
4448 }
4449 
4450 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
4451 {
4452 	unsigned int i, j;
4453 
4454 	for (i = 0; i < 8; i++) {
4455 		u32 *p = la_buf + i;
4456 
4457 		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
4458 		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
4459 		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
4460 		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
4461 			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
4462 	}
4463 }
4464 
4465 #define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
4466 		     FW_PORT_CAP_ANEG)
4467 
4468 /* Translate Firmware Port Capabilities Pause specification to Common Code */
4469 static inline unsigned int fwcap_to_cc_pause(unsigned int fw_pause)
4470 {
4471 	unsigned int cc_pause = 0;
4472 
4473 	if (fw_pause & FW_PORT_CAP_FC_RX)
4474 		cc_pause |= PAUSE_RX;
4475 	if (fw_pause & FW_PORT_CAP_FC_TX)
4476 		cc_pause |= PAUSE_TX;
4477 
4478 	return cc_pause;
4479 }
4480 
4481 /* Translate Common Code Pause specification into Firmware Port Capabilities */
4482 static inline unsigned int cc_to_fwcap_pause(unsigned int cc_pause)
4483 {
4484 	unsigned int fw_pause = 0;
4485 
4486 	if (cc_pause & PAUSE_RX)
4487 		fw_pause |= FW_PORT_CAP_FC_RX;
4488 	if (cc_pause & PAUSE_TX)
4489 		fw_pause |= FW_PORT_CAP_FC_TX;
4490 
4491 	return fw_pause;
4492 }
4493 
4494 /* Translate Firmware Forward Error Correction specification to Common Code */
4495 static inline unsigned int fwcap_to_cc_fec(unsigned int fw_fec)
4496 {
4497 	unsigned int cc_fec = 0;
4498 
4499 	if (fw_fec & FW_PORT_CAP_FEC_RS)
4500 		cc_fec |= FEC_RS;
4501 	if (fw_fec & FW_PORT_CAP_FEC_BASER_RS)
4502 		cc_fec |= FEC_BASER_RS;
4503 
4504 	return cc_fec;
4505 }
4506 
4507 /* Translate Common Code Forward Error Correction specification to Firmware */
4508 static inline unsigned int cc_to_fwcap_fec(unsigned int cc_fec)
4509 {
4510 	unsigned int fw_fec = 0;
4511 
4512 	if (cc_fec & FEC_RS)
4513 		fw_fec |= FW_PORT_CAP_FEC_RS;
4514 	if (cc_fec & FEC_BASER_RS)
4515 		fw_fec |= FW_PORT_CAP_FEC_BASER_RS;
4516 
4517 	return fw_fec;
4518 }
4519 
4520 /**
4521  *	t4_link_l1cfg - apply link configuration to MAC/PHY
4522  *	@phy: the PHY to setup
4523  *	@mac: the MAC to setup
4524  *	@lc: the requested link configuration
4525  *
4526  *	Set up a port's MAC and PHY according to a desired link configuration.
4527  *	- If the PHY can auto-negotiate first decide what to advertise, then
4528  *	  enable/disable auto-negotiation as desired, and reset.
4529  *	- If the PHY does not auto-negotiate just reset it.
4530  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4531  *	  otherwise do it later based on the outcome of auto-negotiation.
4532  */
4533 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
4534 		  struct link_config *lc)
4535 {
4536 	struct fw_port_cmd c;
4537 	unsigned int fw_mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
4538 	unsigned int fw_fc, cc_fec, fw_fec;
4539 
4540 	lc->link_ok = 0;
4541 
4542 	/*
4543 	 * Convert driver coding of Pause Frame Flow Control settings into the
4544 	 * Firmware's API.
4545 	 */
4546 	fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4547 
4548 	/*
4549 	 * Convert Common Code Forward Error Control settings into the
4550 	 * Firmware's API.  If the current Requested FEC has "Automatic"
4551 	 * (IEEE 802.3) specified, then we use whatever the Firmware
4552 	 * sent us as part of it's IEEE 802.3-based interpratation of
4553 	 * the Transceiver Module EPROM FEC parameters.  Otherwise we
4554 	 * use whatever is in the current Requested FEC settings.
4555 	 */
4556 	if (lc->requested_fec & FEC_AUTO)
4557 		cc_fec = lc->auto_fec;
4558 	else
4559 		cc_fec = lc->requested_fec;
4560 	fw_fec = cc_to_fwcap_fec(cc_fec);
4561 
4562 	memset(&c, 0, sizeof(c));
4563 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4564 				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4565 				     V_FW_PORT_CMD_PORTID(port));
4566 	c.action_to_len16 =
4567 		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4568 			    FW_LEN16(c));
4569 
4570 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
4571 		c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
4572 					     fw_fc | fw_fec);
4573 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4574 		lc->fec = cc_fec;
4575 	} else if (lc->autoneg == AUTONEG_DISABLE) {
4576 		c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed |
4577 					     fw_fc | fw_fec | fw_mdi);
4578 		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4579 		lc->fec = cc_fec;
4580 	} else
4581 		c.u.l1cfg.rcap = cpu_to_be32(lc->advertising |
4582 					     fw_fc | fw_fec | fw_mdi);
4583 
4584 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4585 }
4586 
4587 /**
4588  *	t4_restart_aneg - restart autonegotiation
4589  *	@adap: the adapter
4590  *	@mbox: mbox to use for the FW command
4591  *	@port: the port id
4592  *
4593  *	Restarts autonegotiation for the selected port.
4594  */
4595 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4596 {
4597 	struct fw_port_cmd c;
4598 
4599 	memset(&c, 0, sizeof(c));
4600 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4601 				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4602 				     V_FW_PORT_CMD_PORTID(port));
4603 	c.action_to_len16 =
4604 		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
4605 			    FW_LEN16(c));
4606 	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4607 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4608 }
4609 
4610 typedef void (*int_handler_t)(struct adapter *adap);
4611 
4612 struct intr_info {
4613 	unsigned int mask;	/* bits to check in interrupt status */
4614 	const char *msg;	/* message to print or NULL */
4615 	short stat_idx;		/* stat counter to increment or -1 */
4616 	unsigned short fatal;	/* whether the condition reported is fatal */
4617 	int_handler_t int_handler;	/* platform-specific int handler */
4618 };
4619 
4620 /**
4621  *	t4_handle_intr_status - table driven interrupt handler
4622  *	@adapter: the adapter that generated the interrupt
4623  *	@reg: the interrupt status register to process
4624  *	@acts: table of interrupt actions
4625  *
4626  *	A table driven interrupt handler that applies a set of masks to an
4627  *	interrupt status word and performs the corresponding actions if the
4628  *	interrupts described by the mask have occurred.  The actions include
4629  *	optionally emitting a warning or alert message.  The table is terminated
4630  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
4631  *	conditions.
4632  */
4633 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4634 				 const struct intr_info *acts)
4635 {
4636 	int fatal = 0;
4637 	unsigned int mask = 0;
4638 	unsigned int status = t4_read_reg(adapter, reg);
4639 
4640 	for ( ; acts->mask; ++acts) {
4641 		if (!(status & acts->mask))
4642 			continue;
4643 		if (acts->fatal) {
4644 			fatal++;
4645 			CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
4646 				  status & acts->mask);
4647 		} else if (acts->msg)
4648 			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
4649 				 status & acts->mask);
4650 		if (acts->int_handler)
4651 			acts->int_handler(adapter);
4652 		mask |= acts->mask;
4653 	}
4654 	status &= mask;
4655 	if (status)	/* clear processed interrupts */
4656 		t4_write_reg(adapter, reg, status);
4657 	return fatal;
4658 }
4659 
4660 /*
4661  * Interrupt handler for the PCIE module.
4662  */
4663 static void pcie_intr_handler(struct adapter *adapter)
4664 {
4665 	static const struct intr_info sysbus_intr_info[] = {
4666 		{ F_RNPP, "RXNP array parity error", -1, 1 },
4667 		{ F_RPCP, "RXPC array parity error", -1, 1 },
4668 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
4669 		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
4670 		{ F_RFTP, "RXFT array parity error", -1, 1 },
4671 		{ 0 }
4672 	};
4673 	static const struct intr_info pcie_port_intr_info[] = {
4674 		{ F_TPCP, "TXPC array parity error", -1, 1 },
4675 		{ F_TNPP, "TXNP array parity error", -1, 1 },
4676 		{ F_TFTP, "TXFT array parity error", -1, 1 },
4677 		{ F_TCAP, "TXCA array parity error", -1, 1 },
4678 		{ F_TCIP, "TXCIF array parity error", -1, 1 },
4679 		{ F_RCAP, "RXCA array parity error", -1, 1 },
4680 		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
4681 		{ F_RDPE, "Rx data parity error", -1, 1 },
4682 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
4683 		{ 0 }
4684 	};
4685 	static const struct intr_info pcie_intr_info[] = {
4686 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
4687 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
4688 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
4689 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4690 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4691 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4692 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4693 		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
4694 		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
4695 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4696 		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
4697 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4698 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4699 		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
4700 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4701 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4702 		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
4703 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4704 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4705 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4706 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4707 		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
4708 		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
4709 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4710 		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
4711 		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
4712 		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
4713 		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
4714 		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
4715 		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
4716 		  0 },
4717 		{ 0 }
4718 	};
4719 
4720 	static struct intr_info t5_pcie_intr_info[] = {
4721 		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
4722 		  -1, 1 },
4723 		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
4724 		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
4725 		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
4726 		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
4727 		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
4728 		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
4729 		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
4730 		  -1, 1 },
4731 		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
4732 		  -1, 1 },
4733 		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
4734 		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
4735 		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
4736 		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
4737 		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
4738 		  -1, 1 },
4739 		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
4740 		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
4741 		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
4742 		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
4743 		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
4744 		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
4745 		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
4746 		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
4747 		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
4748 		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
4749 		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
4750 		  -1, 1 },
4751 		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
4752 		  -1, 1 },
4753 		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
4754 		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
4755 		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4756 		{ F_READRSPERR, "Outbound read error", -1,
4757 		  0 },
4758 		{ 0 }
4759 	};
4760 
4761 	int fat;
4762 
4763 	if (is_t4(adapter->params.chip))
4764 		fat = t4_handle_intr_status(adapter,
4765 				A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4766 				sysbus_intr_info) +
4767 			t4_handle_intr_status(adapter,
4768 					A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4769 					pcie_port_intr_info) +
4770 			t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4771 					      pcie_intr_info);
4772 	else
4773 		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
4774 					    t5_pcie_intr_info);
4775 	if (fat)
4776 		t4_fatal_err(adapter);
4777 }
4778 
4779 /*
4780  * TP interrupt handler.
4781  */
4782 static void tp_intr_handler(struct adapter *adapter)
4783 {
4784 	static const struct intr_info tp_intr_info[] = {
4785 		{ 0x3fffffff, "TP parity error", -1, 1 },
4786 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
4787 		{ 0 }
4788 	};
4789 
4790 	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
4791 		t4_fatal_err(adapter);
4792 }
4793 
4794 /*
4795  * SGE interrupt handler.
4796  */
4797 static void sge_intr_handler(struct adapter *adapter)
4798 {
4799 	u64 v;
4800 	u32 err;
4801 
4802 	static const struct intr_info sge_intr_info[] = {
4803 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
4804 		  "SGE received CPL exceeding IQE size", -1, 1 },
4805 		{ F_ERR_INVALID_CIDX_INC,
4806 		  "SGE GTS CIDX increment too large", -1, 0 },
4807 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
4808 		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
4809 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
4810 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
4811 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
4812 		  0 },
4813 		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
4814 		  0 },
4815 		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
4816 		  0 },
4817 		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
4818 		  0 },
4819 		{ F_ERR_ING_CTXT_PRIO,
4820 		  "SGE too many priority ingress contexts", -1, 0 },
4821 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
4822 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
4823 		{ F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
4824 		  F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
4825 		  "SGE PCIe error for a DBP thread", -1, 0 },
4826 		{ 0 }
4827 	};
4828 
4829 	static struct intr_info t4t5_sge_intr_info[] = {
4830 		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
4831 		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
4832 		{ F_ERR_EGR_CTXT_PRIO,
4833 		  "SGE too many priority egress contexts", -1, 0 },
4834 		{ 0 }
4835 	};
4836 
4837 	/*
4838  	* For now, treat below interrupts as fatal so that we disable SGE and
4839  	* get better debug */
4840 	static struct intr_info t6_sge_intr_info[] = {
4841 		{ F_FATAL_WRE_LEN,
4842 		  "SGE Actual WRE packet is less than advertized length",
4843 		  -1, 1 },
4844 		{ 0 }
4845 	};
4846 
4847 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4848 		((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4849 	if (v) {
4850 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4851 				(unsigned long long)v);
4852 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4853 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4854 	}
4855 
4856 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4857 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4858 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4859 					   t4t5_sge_intr_info);
4860 	else
4861 		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4862 					   t6_sge_intr_info);
4863 
4864 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4865 	if (err & F_ERROR_QID_VALID) {
4866 		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4867 		if (err & F_UNCAPTURED_ERROR)
4868 			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4869 		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4870 			     F_UNCAPTURED_ERROR);
4871 	}
4872 
4873 	if (v != 0)
4874 		t4_fatal_err(adapter);
4875 }
4876 
4877 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4878 		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4879 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4880 		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4881 
4882 /*
4883  * CIM interrupt handler.
4884  */
4885 static void cim_intr_handler(struct adapter *adapter)
4886 {
4887 	static const struct intr_info cim_intr_info[] = {
4888 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4889 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4890 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4891 		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4892 		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4893 		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4894 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4895 		{ 0 }
4896 	};
4897 	static const struct intr_info cim_upintr_info[] = {
4898 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4899 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4900 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
4901 		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
4902 		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4903 		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4904 		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4905 		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4906 		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4907 		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4908 		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4909 		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4910 		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4911 		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4912 		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4913 		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4914 		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4915 		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4916 		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4917 		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4918 		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4919 		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4920 		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4921 		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4922 		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4923 		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4924 		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4925 		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4926 		{ 0 }
4927 	};
4928 	int fat;
4929 
4930 	if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR)
4931 		t4_report_fw_error(adapter);
4932 
4933 	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4934 				    cim_intr_info) +
4935 	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4936 				    cim_upintr_info);
4937 	if (fat)
4938 		t4_fatal_err(adapter);
4939 }
4940 
4941 /*
4942  * ULP RX interrupt handler.
4943  */
4944 static void ulprx_intr_handler(struct adapter *adapter)
4945 {
4946 	static const struct intr_info ulprx_intr_info[] = {
4947 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4948 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4949 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
4950 		{ 0 }
4951 	};
4952 
4953 	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4954 		t4_fatal_err(adapter);
4955 }
4956 
4957 /*
4958  * ULP TX interrupt handler.
4959  */
4960 static void ulptx_intr_handler(struct adapter *adapter)
4961 {
4962 	static const struct intr_info ulptx_intr_info[] = {
4963 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4964 		  0 },
4965 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4966 		  0 },
4967 		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4968 		  0 },
4969 		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4970 		  0 },
4971 		{ 0xfffffff, "ULPTX parity error", -1, 1 },
4972 		{ 0 }
4973 	};
4974 
4975 	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4976 		t4_fatal_err(adapter);
4977 }
4978 
4979 /*
4980  * PM TX interrupt handler.
4981  */
4982 static void pmtx_intr_handler(struct adapter *adapter)
4983 {
4984 	static const struct intr_info pmtx_intr_info[] = {
4985 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4986 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4987 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4988 		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4989 		{ 0xffffff0, "PMTX framing error", -1, 1 },
4990 		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4991 		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4992 		  1 },
4993 		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4994 		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4995 		{ 0 }
4996 	};
4997 
4998 	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4999 		t4_fatal_err(adapter);
5000 }
5001 
5002 /*
5003  * PM RX interrupt handler.
5004  */
5005 static void pmrx_intr_handler(struct adapter *adapter)
5006 {
5007 	static const struct intr_info pmrx_intr_info[] = {
5008 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
5009 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
5010 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
5011 		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
5012 		  1 },
5013 		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
5014 		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
5015 		{ 0 }
5016 	};
5017 
5018 	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
5019 		t4_fatal_err(adapter);
5020 }
5021 
5022 /*
5023  * CPL switch interrupt handler.
5024  */
5025 static void cplsw_intr_handler(struct adapter *adapter)
5026 {
5027 	static const struct intr_info cplsw_intr_info[] = {
5028 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
5029 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
5030 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
5031 		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
5032 		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
5033 		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
5034 		{ 0 }
5035 	};
5036 
5037 	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
5038 		t4_fatal_err(adapter);
5039 }
5040 
5041 /*
5042  * LE interrupt handler.
5043  */
5044 static void le_intr_handler(struct adapter *adap)
5045 {
5046 	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
5047 	static const struct intr_info le_intr_info[] = {
5048 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
5049 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
5050 		{ F_PARITYERR, "LE parity error", -1, 1 },
5051 		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
5052 		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
5053 		{ 0 }
5054 	};
5055 
5056 	static struct intr_info t6_le_intr_info[] = {
5057 		{ F_T6_LIPMISS, "LE LIP miss", -1, 0 },
5058 		{ F_T6_LIP0, "LE 0 LIP error", -1, 0 },
5059 		{ F_TCAMINTPERR, "LE parity error", -1, 1 },
5060 		{ F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
5061 		{ F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
5062 		{ 0 }
5063 	};
5064 
5065 	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
5066 				  (chip_ver <= CHELSIO_T5) ?
5067 				  le_intr_info : t6_le_intr_info))
5068 		t4_fatal_err(adap);
5069 }
5070 
5071 /*
5072  * MPS interrupt handler.
5073  */
5074 static void mps_intr_handler(struct adapter *adapter)
5075 {
5076 	static const struct intr_info mps_rx_intr_info[] = {
5077 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
5078 		{ 0 }
5079 	};
5080 	static const struct intr_info mps_tx_intr_info[] = {
5081 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
5082 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
5083 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
5084 		  -1, 1 },
5085 		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
5086 		  -1, 1 },
5087 		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
5088 		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
5089 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
5090 		{ 0 }
5091 	};
5092 	static const struct intr_info mps_trc_intr_info[] = {
5093 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
5094 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
5095 		  1 },
5096 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
5097 		{ 0 }
5098 	};
5099 	static const struct intr_info mps_stat_sram_intr_info[] = {
5100 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
5101 		{ 0 }
5102 	};
5103 	static const struct intr_info mps_stat_tx_intr_info[] = {
5104 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
5105 		{ 0 }
5106 	};
5107 	static const struct intr_info mps_stat_rx_intr_info[] = {
5108 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
5109 		{ 0 }
5110 	};
5111 	static const struct intr_info mps_cls_intr_info[] = {
5112 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
5113 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
5114 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
5115 		{ 0 }
5116 	};
5117 
5118 	int fat;
5119 
5120 	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
5121 				    mps_rx_intr_info) +
5122 	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
5123 				    mps_tx_intr_info) +
5124 	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
5125 				    mps_trc_intr_info) +
5126 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
5127 				    mps_stat_sram_intr_info) +
5128 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
5129 				    mps_stat_tx_intr_info) +
5130 	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
5131 				    mps_stat_rx_intr_info) +
5132 	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
5133 				    mps_cls_intr_info);
5134 
5135 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
5136 	t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
5137 	if (fat)
5138 		t4_fatal_err(adapter);
5139 }
5140 
5141 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
5142 		      F_ECC_UE_INT_CAUSE)
5143 
5144 /*
5145  * EDC/MC interrupt handler.
5146  */
5147 static void mem_intr_handler(struct adapter *adapter, int idx)
5148 {
5149 	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
5150 
5151 	unsigned int addr, cnt_addr, v;
5152 
5153 	if (idx <= MEM_EDC1) {
5154 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
5155 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
5156 	} else if (idx == MEM_MC) {
5157 		if (is_t4(adapter->params.chip)) {
5158 			addr = A_MC_INT_CAUSE;
5159 			cnt_addr = A_MC_ECC_STATUS;
5160 		} else {
5161 			addr = A_MC_P_INT_CAUSE;
5162 			cnt_addr = A_MC_P_ECC_STATUS;
5163 		}
5164 	} else {
5165 		addr = MC_REG(A_MC_P_INT_CAUSE, 1);
5166 		cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
5167 	}
5168 
5169 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
5170 	if (v & F_PERR_INT_CAUSE)
5171 		CH_ALERT(adapter, "%s FIFO parity error\n",
5172 			  name[idx]);
5173 	if (v & F_ECC_CE_INT_CAUSE) {
5174 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
5175 
5176 		if (idx <= MEM_EDC1)
5177 			t4_edc_err_read(adapter, idx);
5178 
5179 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
5180 		CH_WARN_RATELIMIT(adapter,
5181 				  "%u %s correctable ECC data error%s\n",
5182 				  cnt, name[idx], cnt > 1 ? "s" : "");
5183 	}
5184 	if (v & F_ECC_UE_INT_CAUSE)
5185 		CH_ALERT(adapter,
5186 			 "%s uncorrectable ECC data error\n", name[idx]);
5187 
5188 	t4_write_reg(adapter, addr, v);
5189 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
5190 		t4_fatal_err(adapter);
5191 }
5192 
5193 /*
5194  * MA interrupt handler.
5195  */
5196 static void ma_intr_handler(struct adapter *adapter)
5197 {
5198 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
5199 
5200 	if (status & F_MEM_PERR_INT_CAUSE) {
5201 		CH_ALERT(adapter,
5202 			  "MA parity error, parity status %#x\n",
5203 			  t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
5204 		if (is_t5(adapter->params.chip))
5205 			CH_ALERT(adapter,
5206 				  "MA parity error, parity status %#x\n",
5207 				  t4_read_reg(adapter,
5208 					      A_MA_PARITY_ERROR_STATUS2));
5209 	}
5210 	if (status & F_MEM_WRAP_INT_CAUSE) {
5211 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
5212 		CH_ALERT(adapter, "MA address wrap-around error by "
5213 			  "client %u to address %#x\n",
5214 			  G_MEM_WRAP_CLIENT_NUM(v),
5215 			  G_MEM_WRAP_ADDRESS(v) << 4);
5216 	}
5217 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
5218 	t4_fatal_err(adapter);
5219 }
5220 
5221 /*
5222  * SMB interrupt handler.
5223  */
5224 static void smb_intr_handler(struct adapter *adap)
5225 {
5226 	static const struct intr_info smb_intr_info[] = {
5227 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
5228 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
5229 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
5230 		{ 0 }
5231 	};
5232 
5233 	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
5234 		t4_fatal_err(adap);
5235 }
5236 
5237 /*
5238  * NC-SI interrupt handler.
5239  */
5240 static void ncsi_intr_handler(struct adapter *adap)
5241 {
5242 	static const struct intr_info ncsi_intr_info[] = {
5243 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
5244 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
5245 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
5246 		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
5247 		{ 0 }
5248 	};
5249 
5250 	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
5251 		t4_fatal_err(adap);
5252 }
5253 
5254 /*
5255  * XGMAC interrupt handler.
5256  */
5257 static void xgmac_intr_handler(struct adapter *adap, int port)
5258 {
5259 	u32 v, int_cause_reg;
5260 
5261 	if (is_t4(adap->params.chip))
5262 		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
5263 	else
5264 		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
5265 
5266 	v = t4_read_reg(adap, int_cause_reg);
5267 
5268 	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
5269 	if (!v)
5270 		return;
5271 
5272 	if (v & F_TXFIFO_PRTY_ERR)
5273 		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
5274 			  port);
5275 	if (v & F_RXFIFO_PRTY_ERR)
5276 		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
5277 			  port);
5278 	t4_write_reg(adap, int_cause_reg, v);
5279 	t4_fatal_err(adap);
5280 }
5281 
5282 /*
5283  * PL interrupt handler.
5284  */
5285 static void pl_intr_handler(struct adapter *adap)
5286 {
5287 	static const struct intr_info pl_intr_info[] = {
5288 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
5289 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
5290 		{ 0 }
5291 	};
5292 
5293 	static struct intr_info t5_pl_intr_info[] = {
5294 		{ F_FATALPERR, "Fatal parity error", -1, 1 },
5295 		{ 0 }
5296 	};
5297 
5298 	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
5299 				  is_t4(adap->params.chip) ?
5300 				  pl_intr_info : t5_pl_intr_info))
5301 		t4_fatal_err(adap);
5302 }
5303 
5304 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
5305 
5306 /**
5307  *	t4_slow_intr_handler - control path interrupt handler
5308  *	@adapter: the adapter
5309  *
5310  *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
5311  *	The designation 'slow' is because it involves register reads, while
5312  *	data interrupts typically don't involve any MMIOs.
5313  */
5314 int t4_slow_intr_handler(struct adapter *adapter)
5315 {
5316 	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
5317 
5318 	if (!(cause & GLBL_INTR_MASK))
5319 		return 0;
5320 	if (cause & F_CIM)
5321 		cim_intr_handler(adapter);
5322 	if (cause & F_MPS)
5323 		mps_intr_handler(adapter);
5324 	if (cause & F_NCSI)
5325 		ncsi_intr_handler(adapter);
5326 	if (cause & F_PL)
5327 		pl_intr_handler(adapter);
5328 	if (cause & F_SMB)
5329 		smb_intr_handler(adapter);
5330 	if (cause & F_MAC0)
5331 		xgmac_intr_handler(adapter, 0);
5332 	if (cause & F_MAC1)
5333 		xgmac_intr_handler(adapter, 1);
5334 	if (cause & F_MAC2)
5335 		xgmac_intr_handler(adapter, 2);
5336 	if (cause & F_MAC3)
5337 		xgmac_intr_handler(adapter, 3);
5338 	if (cause & F_PCIE)
5339 		pcie_intr_handler(adapter);
5340 	if (cause & F_MC0)
5341 		mem_intr_handler(adapter, MEM_MC);
5342 	if (is_t5(adapter->params.chip) && (cause & F_MC1))
5343 		mem_intr_handler(adapter, MEM_MC1);
5344 	if (cause & F_EDC0)
5345 		mem_intr_handler(adapter, MEM_EDC0);
5346 	if (cause & F_EDC1)
5347 		mem_intr_handler(adapter, MEM_EDC1);
5348 	if (cause & F_LE)
5349 		le_intr_handler(adapter);
5350 	if (cause & F_TP)
5351 		tp_intr_handler(adapter);
5352 	if (cause & F_MA)
5353 		ma_intr_handler(adapter);
5354 	if (cause & F_PM_TX)
5355 		pmtx_intr_handler(adapter);
5356 	if (cause & F_PM_RX)
5357 		pmrx_intr_handler(adapter);
5358 	if (cause & F_ULP_RX)
5359 		ulprx_intr_handler(adapter);
5360 	if (cause & F_CPL_SWITCH)
5361 		cplsw_intr_handler(adapter);
5362 	if (cause & F_SGE)
5363 		sge_intr_handler(adapter);
5364 	if (cause & F_ULP_TX)
5365 		ulptx_intr_handler(adapter);
5366 
5367 	/* Clear the interrupts just processed for which we are the master. */
5368 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
5369 	(void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
5370 	return 1;
5371 }
5372 
5373 /**
5374  *	t4_intr_enable - enable interrupts
5375  *	@adapter: the adapter whose interrupts should be enabled
5376  *
5377  *	Enable PF-specific interrupts for the calling function and the top-level
5378  *	interrupt concentrator for global interrupts.  Interrupts are already
5379  *	enabled at each module,	here we just enable the roots of the interrupt
5380  *	hierarchies.
5381  *
5382  *	Note: this function should be called only when the driver manages
5383  *	non PF-specific interrupts from the various HW modules.  Only one PCI
5384  *	function at a time should be doing this.
5385  */
5386 void t4_intr_enable(struct adapter *adapter)
5387 {
5388 	u32 val = 0;
5389 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5390 	u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5391 		  ? G_SOURCEPF(whoami)
5392 		  : G_T6_SOURCEPF(whoami));
5393 
5394 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5395 		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
5396 	else
5397 		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
5398 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
5399 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
5400 		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
5401 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
5402 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
5403 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
5404 		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
5405 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
5406 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
5407 }
5408 
5409 /**
5410  *	t4_intr_disable - disable interrupts
5411  *	@adapter: the adapter whose interrupts should be disabled
5412  *
5413  *	Disable interrupts.  We only disable the top-level interrupt
5414  *	concentrators.  The caller must be a PCI function managing global
5415  *	interrupts.
5416  */
5417 void t4_intr_disable(struct adapter *adapter)
5418 {
5419 	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
5420 	u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5
5421 		  ? G_SOURCEPF(whoami)
5422 		  : G_T6_SOURCEPF(whoami));
5423 
5424 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
5425 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
5426 }
5427 
5428 /**
5429  *	t4_config_rss_range - configure a portion of the RSS mapping table
5430  *	@adapter: the adapter
5431  *	@mbox: mbox to use for the FW command
5432  *	@viid: virtual interface whose RSS subtable is to be written
5433  *	@start: start entry in the table to write
5434  *	@n: how many table entries to write
5435  *	@rspq: values for the "response queue" (Ingress Queue) lookup table
5436  *	@nrspq: number of values in @rspq
5437  *
5438  *	Programs the selected part of the VI's RSS mapping table with the
5439  *	provided values.  If @nrspq < @n the supplied values are used repeatedly
5440  *	until the full table range is populated.
5441  *
5442  *	The caller must ensure the values in @rspq are in the range allowed for
5443  *	@viid.
5444  */
5445 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5446 			int start, int n, const u16 *rspq, unsigned int nrspq)
5447 {
5448 	int ret;
5449 	const u16 *rsp = rspq;
5450 	const u16 *rsp_end = rspq + nrspq;
5451 	struct fw_rss_ind_tbl_cmd cmd;
5452 
5453 	memset(&cmd, 0, sizeof(cmd));
5454 	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
5455 				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5456 				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
5457 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5458 
5459 	/* Each firmware RSS command can accommodate up to 32 RSS Ingress
5460 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
5461 	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
5462 	 * reserved.
5463 	 */
5464 	while (n > 0) {
5465 		int nq = min(n, 32);
5466 		int nq_packed = 0;
5467 		__be32 *qp = &cmd.iq0_to_iq2;
5468 
5469 		/* Set up the firmware RSS command header to send the next
5470 		 * "nq" Ingress Queue IDs to the firmware.
5471 		 */
5472 		cmd.niqid = cpu_to_be16(nq);
5473 		cmd.startidx = cpu_to_be16(start);
5474 
5475 		/* "nq" more done for the start of the next loop.
5476 		 */
5477 		start += nq;
5478 		n -= nq;
5479 
5480 		/* While there are still Ingress Queue IDs to stuff into the
5481 		 * current firmware RSS command, retrieve them from the
5482 		 * Ingress Queue ID array and insert them into the command.
5483 		 */
5484 		while (nq > 0) {
5485 			/* Grab up to the next 3 Ingress Queue IDs (wrapping
5486 			 * around the Ingress Queue ID array if necessary) and
5487 			 * insert them into the firmware RSS command at the
5488 			 * current 3-tuple position within the commad.
5489 			 */
5490 			u16 qbuf[3];
5491 			u16 *qbp = qbuf;
5492 			int nqbuf = min(3, nq);
5493 
5494 			nq -= nqbuf;
5495 			qbuf[0] = qbuf[1] = qbuf[2] = 0;
5496 			while (nqbuf && nq_packed < 32) {
5497 				nqbuf--;
5498 				nq_packed++;
5499 				*qbp++ = *rsp++;
5500 				if (rsp >= rsp_end)
5501 					rsp = rspq;
5502 			}
5503 			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
5504 					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
5505 					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
5506 		}
5507 
5508 		/* Send this portion of the RRS table update to the firmware;
5509 		 * bail out on any errors.
5510 		 */
5511 		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5512 		if (ret)
5513 			return ret;
5514 	}
5515 	return 0;
5516 }
5517 
5518 /**
5519  *	t4_config_glbl_rss - configure the global RSS mode
5520  *	@adapter: the adapter
5521  *	@mbox: mbox to use for the FW command
5522  *	@mode: global RSS mode
5523  *	@flags: mode-specific flags
5524  *
5525  *	Sets the global RSS mode.
5526  */
5527 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5528 		       unsigned int flags)
5529 {
5530 	struct fw_rss_glb_config_cmd c;
5531 
5532 	memset(&c, 0, sizeof(c));
5533 	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
5534 				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
5535 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5536 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5537 		c.u.manual.mode_pkd =
5538 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5539 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5540 		c.u.basicvirtual.mode_keymode =
5541 			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
5542 		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5543 	} else
5544 		return -EINVAL;
5545 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5546 }
5547 
5548 /**
5549  *	t4_config_vi_rss - configure per VI RSS settings
5550  *	@adapter: the adapter
5551  *	@mbox: mbox to use for the FW command
5552  *	@viid: the VI id
5553  *	@flags: RSS flags
5554  *	@defq: id of the default RSS queue for the VI.
5555  *	@skeyidx: RSS secret key table index for non-global mode
5556  *	@skey: RSS vf_scramble key for VI.
5557  *
5558  *	Configures VI-specific RSS properties.
5559  */
5560 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5561 		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
5562 		     unsigned int skey)
5563 {
5564 	struct fw_rss_vi_config_cmd c;
5565 
5566 	memset(&c, 0, sizeof(c));
5567 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5568 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
5569 				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
5570 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5571 	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5572 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
5573 	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
5574 					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
5575 	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
5576 
5577 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5578 }
5579 
5580 /* Read an RSS table row */
5581 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5582 {
5583 	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
5584 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
5585 				   5, 0, val);
5586 }
5587 
5588 /**
5589  *	t4_read_rss - read the contents of the RSS mapping table
5590  *	@adapter: the adapter
5591  *	@map: holds the contents of the RSS mapping table
5592  *
5593  *	Reads the contents of the RSS hash->queue mapping table.
5594  */
5595 int t4_read_rss(struct adapter *adapter, u16 *map)
5596 {
5597 	u32 val;
5598 	int i, ret;
5599 
5600 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5601 		ret = rd_rss_row(adapter, i, &val);
5602 		if (ret)
5603 			return ret;
5604 		*map++ = G_LKPTBLQUEUE0(val);
5605 		*map++ = G_LKPTBLQUEUE1(val);
5606 	}
5607 	return 0;
5608 }
5609 
5610 /**
5611  * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5612  * @adap: the adapter
5613  * @cmd: TP fw ldst address space type
5614  * @vals: where the indirect register values are stored/written
5615  * @nregs: how many indirect registers to read/write
5616  * @start_idx: index of first indirect register to read/write
5617  * @rw: Read (1) or Write (0)
5618  * @sleep_ok: if true we may sleep while awaiting command completion
5619  *
5620  * Access TP indirect registers through LDST
5621  **/
5622 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5623 			    unsigned int nregs, unsigned int start_index,
5624 			    unsigned int rw, bool sleep_ok)
5625 {
5626 	int ret = 0;
5627 	unsigned int i;
5628 	struct fw_ldst_cmd c;
5629 
5630 	for (i = 0; i < nregs; i++) {
5631 		memset(&c, 0, sizeof(c));
5632 		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
5633 						F_FW_CMD_REQUEST |
5634 						(rw ? F_FW_CMD_READ :
5635 						      F_FW_CMD_WRITE) |
5636 						V_FW_LDST_CMD_ADDRSPACE(cmd));
5637 		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5638 
5639 		c.u.addrval.addr = cpu_to_be32(start_index + i);
5640 		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
5641 		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5642 				      sleep_ok);
5643 		if (ret)
5644 			return ret;
5645 
5646 		if (rw)
5647 			vals[i] = be32_to_cpu(c.u.addrval.val);
5648 	}
5649 	return 0;
5650 }
5651 
5652 /**
5653  * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5654  * @adap: the adapter
5655  * @reg_addr: Address Register
5656  * @reg_data: Data register
5657  * @buff: where the indirect register values are stored/written
5658  * @nregs: how many indirect registers to read/write
5659  * @start_index: index of first indirect register to read/write
5660  * @rw: READ(1) or WRITE(0)
5661  * @sleep_ok: if true we may sleep while awaiting command completion
5662  *
5663  * Read/Write TP indirect registers through LDST if possible.
5664  * Else, use backdoor access
5665  **/
5666 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5667 			      u32 *buff, u32 nregs, u32 start_index, int rw,
5668 			      bool sleep_ok)
5669 {
5670 	int rc = -EINVAL;
5671 	int cmd;
5672 
5673 	switch (reg_addr) {
5674 	case A_TP_PIO_ADDR:
5675 		cmd = FW_LDST_ADDRSPC_TP_PIO;
5676 		break;
5677 	case A_TP_TM_PIO_ADDR:
5678 		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5679 		break;
5680 	case A_TP_MIB_INDEX:
5681 		cmd = FW_LDST_ADDRSPC_TP_MIB;
5682 		break;
5683 	default:
5684 		goto indirect_access;
5685 	}
5686 
5687 	if (t4_use_ldst(adap))
5688 		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5689 				      sleep_ok);
5690 
5691 indirect_access:
5692 
5693 	if (rc) {
5694 		if (rw)
5695 			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5696 					 start_index);
5697 		else
5698 			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5699 					  start_index);
5700 	}
5701 }
5702 
5703 /**
5704  * t4_tp_pio_read - Read TP PIO registers
5705  * @adap: the adapter
5706  * @buff: where the indirect register values are written
5707  * @nregs: how many indirect registers to read
5708  * @start_index: index of first indirect register to read
5709  * @sleep_ok: if true we may sleep while awaiting command completion
5710  *
5711  * Read TP PIO Registers
5712  **/
5713 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5714 		    u32 start_index, bool sleep_ok)
5715 {
5716 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5717 			  start_index, 1, sleep_ok);
5718 }
5719 
5720 /**
5721  * t4_tp_pio_write - Write TP PIO registers
5722  * @adap: the adapter
5723  * @buff: where the indirect register values are stored
5724  * @nregs: how many indirect registers to write
5725  * @start_index: index of first indirect register to write
5726  * @sleep_ok: if true we may sleep while awaiting command completion
5727  *
5728  * Write TP PIO Registers
5729  **/
5730 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5731 		     u32 start_index, bool sleep_ok)
5732 {
5733 	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
5734 			  start_index, 0, sleep_ok);
5735 }
5736 
5737 /**
5738  * t4_tp_tm_pio_read - Read TP TM PIO registers
5739  * @adap: the adapter
5740  * @buff: where the indirect register values are written
5741  * @nregs: how many indirect registers to read
5742  * @start_index: index of first indirect register to read
5743  * @sleep_ok: if true we may sleep while awaiting command completion
5744  *
5745  * Read TP TM PIO Registers
5746  **/
5747 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5748 		       u32 start_index, bool sleep_ok)
5749 {
5750 	t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
5751 			  nregs, start_index, 1, sleep_ok);
5752 }
5753 
5754 /**
5755  * t4_tp_mib_read - Read TP MIB registers
5756  * @adap: the adapter
5757  * @buff: where the indirect register values are written
5758  * @nregs: how many indirect registers to read
5759  * @start_index: index of first indirect register to read
5760  * @sleep_ok: if true we may sleep while awaiting command completion
5761  *
5762  * Read TP MIB Registers
5763  **/
5764 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5765 		    bool sleep_ok)
5766 {
5767 	t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5768 			  start_index, 1, sleep_ok);
5769 }
5770 
5771 /**
5772  *	t4_read_rss_key - read the global RSS key
5773  *	@adap: the adapter
5774  *	@key: 10-entry array holding the 320-bit RSS key
5775  * 	@sleep_ok: if true we may sleep while awaiting command completion
5776  *
5777  *	Reads the global 320-bit RSS key.
5778  */
5779 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5780 {
5781 	t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5782 }
5783 
5784 /**
5785  *	t4_write_rss_key - program one of the RSS keys
5786  *	@adap: the adapter
5787  *	@key: 10-entry array holding the 320-bit RSS key
5788  *	@idx: which RSS key to write
5789  * 	@sleep_ok: if true we may sleep while awaiting command completion
5790  *
5791  *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
5792  *	0..15 the corresponding entry in the RSS key table is written,
5793  *	otherwise the global RSS key is written.
5794  */
5795 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5796 		      bool sleep_ok)
5797 {
5798 	u8 rss_key_addr_cnt = 16;
5799 	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5800 
5801 	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5802 	 * allows access to key addresses 16-63 by using KeyWrAddrX
5803 	 * as index[5:4](upper 2) into key table
5804 	 */
5805 	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5806 	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5807 		rss_key_addr_cnt = 32;
5808 
5809 	t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5810 
5811 	if (idx >= 0 && idx < rss_key_addr_cnt) {
5812 		if (rss_key_addr_cnt > 16)
5813 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5814 				     vrt | V_KEYWRADDRX(idx >> 4) |
5815 				     V_T6_VFWRADDR(idx) | F_KEYWREN);
5816 		else
5817 			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5818 				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5819 	}
5820 }
5821 
5822 /**
5823  *	t4_read_rss_pf_config - read PF RSS Configuration Table
5824  *	@adapter: the adapter
5825  *	@index: the entry in the PF RSS table to read
5826  *	@valp: where to store the returned value
5827  * 	@sleep_ok: if true we may sleep while awaiting command completion
5828  *
5829  *	Reads the PF RSS Configuration Table at the specified index and returns
5830  *	the value found there.
5831  */
5832 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5833 			   u32 *valp, bool sleep_ok)
5834 {
5835 	t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5836 }
5837 
5838 /**
5839  *	t4_write_rss_pf_config - write PF RSS Configuration Table
5840  *	@adapter: the adapter
5841  *	@index: the entry in the VF RSS table to read
5842  *	@val: the value to store
5843  * 	@sleep_ok: if true we may sleep while awaiting command completion
5844  *
5845  *	Writes the PF RSS Configuration Table at the specified index with the
5846  *	specified value.
5847  */
5848 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5849 			    u32 val, bool sleep_ok)
5850 {
5851 	t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5852 			sleep_ok);
5853 }
5854 
5855 /**
5856  *	t4_read_rss_vf_config - read VF RSS Configuration Table
5857  *	@adapter: the adapter
5858  *	@index: the entry in the VF RSS table to read
5859  *	@vfl: where to store the returned VFL
5860  *	@vfh: where to store the returned VFH
5861  * 	@sleep_ok: if true we may sleep while awaiting command completion
5862  *
5863  *	Reads the VF RSS Configuration Table at the specified index and returns
5864  *	the (VFL, VFH) values found there.
5865  */
5866 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5867 			   u32 *vfl, u32 *vfh, bool sleep_ok)
5868 {
5869 	u32 vrt, mask, data;
5870 
5871 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5872 		mask = V_VFWRADDR(M_VFWRADDR);
5873 		data = V_VFWRADDR(index);
5874 	} else {
5875 		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5876 		 data = V_T6_VFWRADDR(index);
5877 	}
5878 	/*
5879 	 * Request that the index'th VF Table values be read into VFL/VFH.
5880 	 */
5881 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5882 	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5883 	vrt |= data | F_VFRDEN;
5884 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5885 
5886 	/*
5887 	 * Grab the VFL/VFH values ...
5888 	 */
5889 	t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5890 	t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5891 }
5892 
5893 /**
5894  *	t4_read_rss_pf_map - read PF RSS Map
5895  *	@adapter: the adapter
5896  * 	@sleep_ok: if true we may sleep while awaiting command completion
5897  *
5898  *	Reads the PF RSS Map register and returns its value.
5899  */
5900 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5901 {
5902 	u32 pfmap;
5903 
5904 	t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5905 
5906 	return pfmap;
5907 }
5908 
5909 /**
5910  *	t4_read_rss_pf_mask - read PF RSS Mask
5911  *	@adapter: the adapter
5912  * 	@sleep_ok: if true we may sleep while awaiting command completion
5913  *
5914  *	Reads the PF RSS Mask register and returns its value.
5915  */
5916 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5917 {
5918 	u32 pfmask;
5919 
5920 	t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5921 
5922 	return pfmask;
5923 }
5924 
5925 /**
5926  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
5927  *	@adap: the adapter
5928  *	@v4: holds the TCP/IP counter values
5929  *	@v6: holds the TCP/IPv6 counter values
5930  * 	@sleep_ok: if true we may sleep while awaiting command completion
5931  *
5932  *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5933  *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5934  */
5935 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5936 			 struct tp_tcp_stats *v6, bool sleep_ok)
5937 {
5938 	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5939 
5940 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5941 #define STAT(x)     val[STAT_IDX(x)]
5942 #define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5943 
5944 	if (v4) {
5945 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5946 			       A_TP_MIB_TCP_OUT_RST, sleep_ok);
5947 		v4->tcp_out_rsts = STAT(OUT_RST);
5948 		v4->tcp_in_segs  = STAT64(IN_SEG);
5949 		v4->tcp_out_segs = STAT64(OUT_SEG);
5950 		v4->tcp_retrans_segs = STAT64(RXT_SEG);
5951 	}
5952 	if (v6) {
5953 		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5954 			       A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5955 		v6->tcp_out_rsts = STAT(OUT_RST);
5956 		v6->tcp_in_segs  = STAT64(IN_SEG);
5957 		v6->tcp_out_segs = STAT64(OUT_SEG);
5958 		v6->tcp_retrans_segs = STAT64(RXT_SEG);
5959 	}
5960 #undef STAT64
5961 #undef STAT
5962 #undef STAT_IDX
5963 }
5964 
5965 /**
5966  *	t4_tp_get_err_stats - read TP's error MIB counters
5967  *	@adap: the adapter
5968  *	@st: holds the counter values
5969  * 	@sleep_ok: if true we may sleep while awaiting command completion
5970  *
5971  *	Returns the values of TP's error counters.
5972  */
5973 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5974 			 bool sleep_ok)
5975 {
5976 	int nchan = adap->params.arch.nchan;
5977 
5978 	t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5979 		       sleep_ok);
5980 
5981 	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5982 		       sleep_ok);
5983 
5984 	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5985 		       sleep_ok);
5986 
5987 	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5988 		       A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5989 
5990 	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5991 		       A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5992 
5993 	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
5994 		       sleep_ok);
5995 
5996 	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5997 		       A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
5998 
5999 	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
6000 		       A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
6001 
6002 	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
6003 		       sleep_ok);
6004 }
6005 
6006 /**
6007  *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
6008  *	@adap: the adapter
6009  *	@st: holds the counter values
6010  * 	@sleep_ok: if true we may sleep while awaiting command completion
6011  *
6012  *	Returns the values of TP's CPL counters.
6013  */
6014 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
6015 			 bool sleep_ok)
6016 {
6017 	int nchan = adap->params.arch.nchan;
6018 
6019 	t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
6020 
6021 	t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
6022 }
6023 
6024 /**
6025  *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
6026  *	@adap: the adapter
6027  *	@st: holds the counter values
6028  *
6029  *	Returns the values of TP's RDMA counters.
6030  */
6031 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
6032 			  bool sleep_ok)
6033 {
6034 	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
6035 		       sleep_ok);
6036 }
6037 
6038 /**
6039  *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
6040  *	@adap: the adapter
6041  *	@idx: the port index
6042  *	@st: holds the counter values
6043  * 	@sleep_ok: if true we may sleep while awaiting command completion
6044  *
6045  *	Returns the values of TP's FCoE counters for the selected port.
6046  */
6047 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
6048 		       struct tp_fcoe_stats *st, bool sleep_ok)
6049 {
6050 	u32 val[2];
6051 
6052 	t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
6053 		       sleep_ok);
6054 
6055 	t4_tp_mib_read(adap, &st->frames_drop, 1,
6056 		       A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
6057 
6058 	t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
6059 		       sleep_ok);
6060 
6061 	st->octets_ddp = ((u64)val[0] << 32) | val[1];
6062 }
6063 
6064 /**
6065  *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
6066  *	@adap: the adapter
6067  *	@st: holds the counter values
6068  * 	@sleep_ok: if true we may sleep while awaiting command completion
6069  *
6070  *	Returns the values of TP's counters for non-TCP directly-placed packets.
6071  */
6072 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
6073 		      bool sleep_ok)
6074 {
6075 	u32 val[4];
6076 
6077 	t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
6078 
6079 	st->frames = val[0];
6080 	st->drops = val[1];
6081 	st->octets = ((u64)val[2] << 32) | val[3];
6082 }
6083 
6084 /**
6085  *	t4_read_mtu_tbl - returns the values in the HW path MTU table
6086  *	@adap: the adapter
6087  *	@mtus: where to store the MTU values
6088  *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
6089  *
6090  *	Reads the HW path MTU table.
6091  */
6092 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
6093 {
6094 	u32 v;
6095 	int i;
6096 
6097 	for (i = 0; i < NMTUS; ++i) {
6098 		t4_write_reg(adap, A_TP_MTU_TABLE,
6099 			     V_MTUINDEX(0xffU) | V_MTUVALUE(i));
6100 		v = t4_read_reg(adap, A_TP_MTU_TABLE);
6101 		mtus[i] = G_MTUVALUE(v);
6102 		if (mtu_log)
6103 			mtu_log[i] = G_MTUWIDTH(v);
6104 	}
6105 }
6106 
6107 /**
6108  *	t4_read_cong_tbl - reads the congestion control table
6109  *	@adap: the adapter
6110  *	@incr: where to store the alpha values
6111  *
6112  *	Reads the additive increments programmed into the HW congestion
6113  *	control table.
6114  */
6115 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
6116 {
6117 	unsigned int mtu, w;
6118 
6119 	for (mtu = 0; mtu < NMTUS; ++mtu)
6120 		for (w = 0; w < NCCTRL_WIN; ++w) {
6121 			t4_write_reg(adap, A_TP_CCTRL_TABLE,
6122 				     V_ROWINDEX(0xffffU) | (mtu << 5) | w);
6123 			incr[mtu][w] = (u16)t4_read_reg(adap,
6124 						A_TP_CCTRL_TABLE) & 0x1fff;
6125 		}
6126 }
6127 
6128 /**
6129  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
6130  *	@adap: the adapter
6131  *	@addr: the indirect TP register address
6132  *	@mask: specifies the field within the register to modify
6133  *	@val: new value for the field
6134  *
6135  *	Sets a field of an indirect TP register to the given value.
6136  */
6137 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
6138 			    unsigned int mask, unsigned int val)
6139 {
6140 	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
6141 	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
6142 	t4_write_reg(adap, A_TP_PIO_DATA, val);
6143 }
6144 
6145 /**
6146  *	init_cong_ctrl - initialize congestion control parameters
6147  *	@a: the alpha values for congestion control
6148  *	@b: the beta values for congestion control
6149  *
6150  *	Initialize the congestion control parameters.
6151  */
6152 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
6153 {
6154 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
6155 	a[9] = 2;
6156 	a[10] = 3;
6157 	a[11] = 4;
6158 	a[12] = 5;
6159 	a[13] = 6;
6160 	a[14] = 7;
6161 	a[15] = 8;
6162 	a[16] = 9;
6163 	a[17] = 10;
6164 	a[18] = 14;
6165 	a[19] = 17;
6166 	a[20] = 21;
6167 	a[21] = 25;
6168 	a[22] = 30;
6169 	a[23] = 35;
6170 	a[24] = 45;
6171 	a[25] = 60;
6172 	a[26] = 80;
6173 	a[27] = 100;
6174 	a[28] = 200;
6175 	a[29] = 300;
6176 	a[30] = 400;
6177 	a[31] = 500;
6178 
6179 	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
6180 	b[9] = b[10] = 1;
6181 	b[11] = b[12] = 2;
6182 	b[13] = b[14] = b[15] = b[16] = 3;
6183 	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
6184 	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
6185 	b[28] = b[29] = 6;
6186 	b[30] = b[31] = 7;
6187 }
6188 
6189 /* The minimum additive increment value for the congestion control table */
6190 #define CC_MIN_INCR 2U
6191 
6192 /**
6193  *	t4_load_mtus - write the MTU and congestion control HW tables
6194  *	@adap: the adapter
6195  *	@mtus: the values for the MTU table
6196  *	@alpha: the values for the congestion control alpha parameter
6197  *	@beta: the values for the congestion control beta parameter
6198  *
6199  *	Write the HW MTU table with the supplied MTUs and the high-speed
6200  *	congestion control table with the supplied alpha, beta, and MTUs.
6201  *	We write the two tables together because the additive increments
6202  *	depend on the MTUs.
6203  */
6204 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
6205 		  const unsigned short *alpha, const unsigned short *beta)
6206 {
6207 	static const unsigned int avg_pkts[NCCTRL_WIN] = {
6208 		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
6209 		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
6210 		28672, 40960, 57344, 81920, 114688, 163840, 229376
6211 	};
6212 
6213 	unsigned int i, w;
6214 
6215 	for (i = 0; i < NMTUS; ++i) {
6216 		unsigned int mtu = mtus[i];
6217 		unsigned int log2 = fls(mtu);
6218 
6219 		if (!(mtu & ((1 << log2) >> 2)))     /* round */
6220 			log2--;
6221 		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
6222 			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
6223 
6224 		for (w = 0; w < NCCTRL_WIN; ++w) {
6225 			unsigned int inc;
6226 
6227 			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
6228 				  CC_MIN_INCR);
6229 
6230 			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
6231 				     (w << 16) | (beta[w] << 13) | inc);
6232 		}
6233 	}
6234 }
6235 
6236 /*
6237  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
6238  * clocks.  The formula is
6239  *
6240  * bytes/s = bytes256 * 256 * ClkFreq / 4096
6241  *
6242  * which is equivalent to
6243  *
6244  * bytes/s = 62.5 * bytes256 * ClkFreq_ms
6245  */
6246 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
6247 {
6248 	u64 v = bytes256 * adap->params.vpd.cclk;
6249 
6250 	return v * 62 + v / 2;
6251 }
6252 
6253 /**
6254  *	t4_get_chan_txrate - get the current per channel Tx rates
6255  *	@adap: the adapter
6256  *	@nic_rate: rates for NIC traffic
6257  *	@ofld_rate: rates for offloaded traffic
6258  *
6259  *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
6260  *	for each channel.
6261  */
6262 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
6263 {
6264 	u32 v;
6265 
6266 	v = t4_read_reg(adap, A_TP_TX_TRATE);
6267 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
6268 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
6269 	if (adap->params.arch.nchan == NCHAN) {
6270 		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
6271 		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
6272 	}
6273 
6274 	v = t4_read_reg(adap, A_TP_TX_ORATE);
6275 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
6276 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
6277 	if (adap->params.arch.nchan == NCHAN) {
6278 		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
6279 		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
6280 	}
6281 }
6282 
6283 /**
6284  *	t4_set_trace_filter - configure one of the tracing filters
6285  *	@adap: the adapter
6286  *	@tp: the desired trace filter parameters
6287  *	@idx: which filter to configure
6288  *	@enable: whether to enable or disable the filter
6289  *
6290  *	Configures one of the tracing filters available in HW.  If @enable is
6291  *	%0 @tp is not examined and may be %NULL. The user is responsible to
6292  *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
6293  *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
6294  *	docs/readme.txt for a complete description of how to setup traceing on
6295  *	T4.
6296  */
6297 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
6298 			int enable)
6299 {
6300 	int i, ofst = idx * 4;
6301 	u32 data_reg, mask_reg, cfg;
6302 
6303 	if (!enable) {
6304 		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6305 		return 0;
6306 	}
6307 
6308 	/*
6309 	 * TODO - After T4 data book is updated, specify the exact
6310 	 * section below.
6311 	 *
6312 	 * See T4 data book - MPS section for a complete description
6313 	 * of the below if..else handling of A_MPS_TRC_CFG register
6314 	 * value.
6315 	 */
6316 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
6317 	if (cfg & F_TRCMULTIFILTER) {
6318 		/*
6319 		 * If multiple tracers are enabled, then maximum
6320 		 * capture size is 2.5KB (FIFO size of a single channel)
6321 		 * minus 2 flits for CPL_TRACE_PKT header.
6322 		 */
6323 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
6324 			return -EINVAL;
6325 	}
6326 	else {
6327 		/*
6328 		 * If multiple tracers are disabled, to avoid deadlocks
6329 		 * maximum packet capture size of 9600 bytes is recommended.
6330 		 * Also in this mode, only trace0 can be enabled and running.
6331 		 */
6332 		if (tp->snap_len > 9600 || idx)
6333 			return -EINVAL;
6334 	}
6335 
6336 	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
6337 	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
6338 	    tp->min_len > M_TFMINPKTSIZE)
6339 		return -EINVAL;
6340 
6341 	/* stop the tracer we'll be changing */
6342 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
6343 
6344 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
6345 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
6346 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
6347 
6348 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6349 		t4_write_reg(adap, data_reg, tp->data[i]);
6350 		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6351 	}
6352 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
6353 		     V_TFCAPTUREMAX(tp->snap_len) |
6354 		     V_TFMINPKTSIZE(tp->min_len));
6355 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
6356 		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
6357 		     (is_t4(adap->params.chip) ?
6358 		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) :
6359 		     V_T5_TFPORT(tp->port) | F_T5_TFEN |
6360 		     V_T5_TFINVERTMATCH(tp->invert)));
6361 
6362 	return 0;
6363 }
6364 
6365 /**
6366  *	t4_get_trace_filter - query one of the tracing filters
6367  *	@adap: the adapter
6368  *	@tp: the current trace filter parameters
6369  *	@idx: which trace filter to query
6370  *	@enabled: non-zero if the filter is enabled
6371  *
6372  *	Returns the current settings of one of the HW tracing filters.
6373  */
6374 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6375 			 int *enabled)
6376 {
6377 	u32 ctla, ctlb;
6378 	int i, ofst = idx * 4;
6379 	u32 data_reg, mask_reg;
6380 
6381 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
6382 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
6383 
6384 	if (is_t4(adap->params.chip)) {
6385 		*enabled = !!(ctla & F_TFEN);
6386 		tp->port =  G_TFPORT(ctla);
6387 		tp->invert = !!(ctla & F_TFINVERTMATCH);
6388 	} else {
6389 		*enabled = !!(ctla & F_T5_TFEN);
6390 		tp->port = G_T5_TFPORT(ctla);
6391 		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
6392 	}
6393 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
6394 	tp->min_len = G_TFMINPKTSIZE(ctlb);
6395 	tp->skip_ofst = G_TFOFFSET(ctla);
6396 	tp->skip_len = G_TFLENGTH(ctla);
6397 
6398 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
6399 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
6400 	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
6401 
6402 	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6403 		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6404 		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6405 	}
6406 }
6407 
6408 /**
6409  *	t4_read_tcb - read a hardware TCP Control Block structure
6410  *	@adap: the adapter
6411  *	@win: PCI-E Memory Window to use
6412  *	@tid: the TCB ID
6413  *	@tcb: the buffer to return the TCB in
6414  *
6415  *	Reads the indicated hardware TCP Control Block and returns it in
6416  *	the supplied buffer.  Returns 0 on success.
6417  */
6418 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4])
6419 {
6420 	u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE);
6421 	u32 tcb_addr = tcb_base + tid * TCB_SIZE;
6422 	__be32 raw_tcb[TCB_SIZE/4];
6423 	int ret, word;
6424 
6425 	ret = t4_memory_rw_addr(adap, win,
6426 				tcb_addr, sizeof raw_tcb, raw_tcb,
6427 				T4_MEMORY_READ);
6428 	if (ret)
6429 		return ret;
6430 
6431 	for (word = 0; word < 32; word++)
6432 		tcb[word] = be32_to_cpu(raw_tcb[word]);
6433 	return 0;
6434 }
6435 
6436 /**
6437  *	t4_pmtx_get_stats - returns the HW stats from PMTX
6438  *	@adap: the adapter
6439  *	@cnt: where to store the count statistics
6440  *	@cycles: where to store the cycle statistics
6441  *
6442  *	Returns performance statistics from PMTX.
6443  */
6444 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6445 {
6446 	int i;
6447 	u32 data[2];
6448 
6449 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6450 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
6451 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
6452 		if (is_t4(adap->params.chip)) {
6453 			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
6454 		} else {
6455 			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
6456 					 A_PM_TX_DBG_DATA, data, 2,
6457 					 A_PM_TX_DBG_STAT_MSB);
6458 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6459 		}
6460 	}
6461 }
6462 
6463 /**
6464  *	t4_pmrx_get_stats - returns the HW stats from PMRX
6465  *	@adap: the adapter
6466  *	@cnt: where to store the count statistics
6467  *	@cycles: where to store the cycle statistics
6468  *
6469  *	Returns performance statistics from PMRX.
6470  */
6471 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6472 {
6473 	int i;
6474 	u32 data[2];
6475 
6476 	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6477 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
6478 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
6479 		if (is_t4(adap->params.chip)) {
6480 			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
6481 		} else {
6482 			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
6483 					 A_PM_RX_DBG_DATA, data, 2,
6484 					 A_PM_RX_DBG_STAT_MSB);
6485 			cycles[i] = (((u64)data[0] << 32) | data[1]);
6486 		}
6487 	}
6488 }
6489 
6490 /**
6491  *	t4_get_mps_bg_map - return the buffer groups associated with a port
6492  *	@adapter: the adapter
6493  *	@pidx: the port index
6494  *
6495  *	Returns a bitmap indicating which MPS buffer groups are associated
6496  *	with the given Port.  Bit i is set if buffer group i is used by the
6497  *	Port.
6498  */
6499 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6500 {
6501 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6502 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6503 	u32 param, val;
6504 	int ret;
6505 
6506 	if (pidx >= nports) {
6507 		CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports);
6508 		return 0;
6509 	}
6510 
6511 	/* FW version >= 1.16.34.0 can determine buffergroup map using
6512 	 * FW_PARAMS_PARAM_DEV_MPSBGMAP API. We will initially try to
6513 	 * use this API. If it fails, revert back to old hardcoded way.
6514 	 * The value obtained from FW is encoded in below format
6515 	 * val = (( MPSBGMAP[Port 3] << 24 ) |
6516 	 *        ( MPSBGMAP[Port 2] << 16 ) |
6517 	 *        ( MPSBGMAP[Port 1] <<  8 ) |
6518 	 *        ( MPSBGMAP[Port 0] <<  0 ))
6519 	 */
6520 	if (adapter->flags & FW_OK) {
6521 		param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6522 			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6523 		ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6524 					 0, 1, &param, &val);
6525 		if (!ret)
6526 			return (val >> (8 * pidx)) & 0xff;
6527 	}
6528 
6529 	/* FW_PARAMS_PARAM_DEV_MPSBGMAP API has failed. Falling back to driver
6530 	 * to determine bgmap.
6531 	 */
6532 	switch (chip_version) {
6533 	case CHELSIO_T4:
6534 	case CHELSIO_T5:
6535 		switch (nports) {
6536 		case 1: return 0xf;
6537 		case 2: return 3 << (2 * pidx);
6538 		case 4: return 1 << pidx;
6539 		}
6540 		break;
6541 
6542 	case CHELSIO_T6:
6543 		switch (nports) {
6544 		case 2: return 1 << (2 * pidx);
6545 		}
6546 		break;
6547 	}
6548 
6549 	CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6550 	       chip_version, nports);
6551 	return 0;
6552 }
6553 
6554 /**
6555  *      t4_get_tp_e2c_map - return the E2C channel map associated with a port
6556  *      @adapter: the adapter
6557  *      @pidx: the port index
6558  */
6559 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6560 {
6561 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6562 	u32 param, val = 0;
6563 	int ret;
6564 
6565 	if (pidx >= nports) {
6566 		CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports);
6567 		return 0;
6568 	}
6569 
6570 	/* FW version >= 1.16.44.0 can determine E2C channel map using
6571 	 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6572 	 */
6573 	param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
6574 		 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP));
6575 	ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6576 					 0, 1, &param, &val);
6577 	if (!ret)
6578 		return (val >> (8*pidx)) & 0xff;
6579 
6580 	return 0;
6581 }
6582 
6583 /**
6584  *	t4_get_tp_ch_map - return TP ingress channels associated with a port
6585  *	@adapter: the adapter
6586  *	@pidx: the port index
6587  *
6588  *	Returns a bitmap indicating which TP Ingress Channels are associated with
6589  *	a given Port.  Bit i is set if TP Ingress Channel i is used by the Port.
6590  */
6591 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx)
6592 {
6593 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6594 	unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL));
6595 
6596 	if (pidx >= nports) {
6597 		CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports);
6598 		return 0;
6599 	}
6600 
6601 	switch (chip_version) {
6602 	case CHELSIO_T4:
6603 	case CHELSIO_T5:
6604 		/*
6605 		 * Note that this happens to be the same values as the MPS
6606 		 * Buffer Group Map for these Chips.  But we replicate the code
6607 		 * here because they're really separate concepts.
6608 		 */
6609 		switch (nports) {
6610 		case 1: return 0xf;
6611 		case 2: return 3 << (2 * pidx);
6612 		case 4: return 1 << pidx;
6613 		}
6614 		break;
6615 
6616 	case CHELSIO_T6:
6617 		switch (nports) {
6618 		case 2: return 1 << pidx;
6619 		}
6620 		break;
6621 	}
6622 
6623 	CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
6624 	       chip_version, nports);
6625 	return 0;
6626 }
6627 
6628 /**
6629  *      t4_get_port_type_description - return Port Type string description
6630  *      @port_type: firmware Port Type enumeration
6631  */
6632 const char *t4_get_port_type_description(enum fw_port_type port_type)
6633 {
6634 	static const char *const port_type_description[] = {
6635 		"Fiber_XFI",
6636 		"Fiber_XAUI",
6637 		"BT_SGMII",
6638 		"BT_XFI",
6639 		"BT_XAUI",
6640 		"KX4",
6641 		"CX4",
6642 		"KX",
6643 		"KR",
6644 		"SFP",
6645 		"BP_AP",
6646 		"BP4_AP",
6647 		"QSFP_10G",
6648 		"QSA",
6649 		"QSFP",
6650 		"BP40_BA",
6651 		"KR4_100G",
6652 		"CR4_QSFP",
6653 		"CR_QSFP",
6654 		"CR2_QSFP",
6655 		"SFP28",
6656 		"KR_SFP28",
6657 	};
6658 
6659 	if (port_type < ARRAY_SIZE(port_type_description))
6660 		return port_type_description[port_type];
6661 	return "UNKNOWN";
6662 }
6663 
6664 /**
6665  *      t4_get_port_stats_offset - collect port stats relative to a previous
6666  *				   snapshot
6667  *      @adap: The adapter
6668  *      @idx: The port
6669  *      @stats: Current stats to fill
6670  *      @offset: Previous stats snapshot
6671  */
6672 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6673 		struct port_stats *stats,
6674 		struct port_stats *offset)
6675 {
6676 	u64 *s, *o;
6677 	int i;
6678 
6679 	t4_get_port_stats(adap, idx, stats);
6680 	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
6681 			i < (sizeof(struct port_stats)/sizeof(u64)) ;
6682 			i++, s++, o++)
6683 		*s -= *o;
6684 }
6685 
6686 /**
6687  *	t4_get_port_stats - collect port statistics
6688  *	@adap: the adapter
6689  *	@idx: the port index
6690  *	@p: the stats structure to fill
6691  *
6692  *	Collect statistics related to the given port from HW.
6693  */
6694 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6695 {
6696 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6697 	u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
6698 
6699 #define GET_STAT(name) \
6700 	t4_read_reg64(adap, \
6701 	(is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
6702 	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
6703 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6704 
6705 	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
6706 	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
6707 	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
6708 	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
6709 	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
6710 	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
6711 	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
6712 	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
6713 	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
6714 	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
6715 	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
6716 	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
6717 	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
6718 	p->tx_drop		= GET_STAT(TX_PORT_DROP);
6719 	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
6720 	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
6721 	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
6722 	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
6723 	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
6724 	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
6725 	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
6726 	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
6727 	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
6728 
6729 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6730 		if (stat_ctl & F_COUNTPAUSESTATTX) {
6731 			p->tx_frames -= p->tx_pause;
6732 			p->tx_octets -= p->tx_pause * 64;
6733 		}
6734 		if (stat_ctl & F_COUNTPAUSEMCTX)
6735 			p->tx_mcast_frames -= p->tx_pause;
6736 	}
6737 
6738 	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
6739 	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
6740 	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
6741 	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
6742 	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
6743 	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
6744 	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
6745 	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
6746 	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
6747 	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
6748 	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
6749 	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
6750 	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
6751 	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
6752 	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
6753 	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
6754 	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
6755 	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
6756 	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
6757 	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
6758 	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
6759 	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
6760 	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
6761 	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
6762 	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
6763 	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
6764 	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
6765 
6766 	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6767 		if (stat_ctl & F_COUNTPAUSESTATRX) {
6768 			p->rx_frames -= p->rx_pause;
6769 			p->rx_octets -= p->rx_pause * 64;
6770 		}
6771 		if (stat_ctl & F_COUNTPAUSEMCRX)
6772 			p->rx_mcast_frames -= p->rx_pause;
6773 	}
6774 
6775 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6776 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6777 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6778 	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6779 	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6780 	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6781 	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6782 	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6783 
6784 #undef GET_STAT
6785 #undef GET_STAT_COM
6786 }
6787 
6788 /**
6789  *	t4_get_lb_stats - collect loopback port statistics
6790  *	@adap: the adapter
6791  *	@idx: the loopback port index
6792  *	@p: the stats structure to fill
6793  *
6794  *	Return HW statistics for the given loopback port.
6795  */
6796 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6797 {
6798 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6799 
6800 #define GET_STAT(name) \
6801 	t4_read_reg64(adap, \
6802 	(is_t4(adap->params.chip) ? \
6803 	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6804 	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6805 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6806 
6807 	p->octets	= GET_STAT(BYTES);
6808 	p->frames	= GET_STAT(FRAMES);
6809 	p->bcast_frames	= GET_STAT(BCAST);
6810 	p->mcast_frames	= GET_STAT(MCAST);
6811 	p->ucast_frames	= GET_STAT(UCAST);
6812 	p->error_frames	= GET_STAT(ERROR);
6813 
6814 	p->frames_64		= GET_STAT(64B);
6815 	p->frames_65_127	= GET_STAT(65B_127B);
6816 	p->frames_128_255	= GET_STAT(128B_255B);
6817 	p->frames_256_511	= GET_STAT(256B_511B);
6818 	p->frames_512_1023	= GET_STAT(512B_1023B);
6819 	p->frames_1024_1518	= GET_STAT(1024B_1518B);
6820 	p->frames_1519_max	= GET_STAT(1519B_MAX);
6821 	p->drop			= GET_STAT(DROP_FRAMES);
6822 
6823 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6824 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6825 	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6826 	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6827 	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6828 	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6829 	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6830 	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6831 
6832 #undef GET_STAT
6833 #undef GET_STAT_COM
6834 }
6835 
6836 /*	t4_mk_filtdelwr - create a delete filter WR
6837  *	@ftid: the filter ID
6838  *	@wr: the filter work request to populate
6839  *	@rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6
6840  *	@qid: ingress queue to receive the delete notification
6841  *
6842  *	Creates a filter work request to delete the supplied filter.  If @qid
6843  *	is negative the delete notification is suppressed.
6844  */
6845 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr,
6846 		     int rqtype, int qid)
6847 {
6848 	memset(wr, 0, sizeof(*wr));
6849 	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6850 	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6851 	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6852 				    V_FW_FILTER_WR_RQTYPE(rqtype) |
6853 				    V_FW_FILTER_WR_NOREPLY(qid < 0));
6854 	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6855 	if (qid >= 0)
6856 		wr->rx_chan_rx_rpl_iq =
6857 				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6858 }
6859 
6860 #define INIT_CMD(var, cmd, rd_wr) do { \
6861 	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6862 					F_FW_CMD_REQUEST | \
6863 					F_FW_CMD_##rd_wr); \
6864 	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6865 } while (0)
6866 
6867 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6868 			  u32 addr, u32 val)
6869 {
6870 	u32 ldst_addrspace;
6871 	struct fw_ldst_cmd c;
6872 
6873 	memset(&c, 0, sizeof(c));
6874 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6875 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6876 					F_FW_CMD_REQUEST |
6877 					F_FW_CMD_WRITE |
6878 					ldst_addrspace);
6879 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6880 	c.u.addrval.addr = cpu_to_be32(addr);
6881 	c.u.addrval.val = cpu_to_be32(val);
6882 
6883 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6884 }
6885 
6886 /**
6887  *	t4_mdio_rd - read a PHY register through MDIO
6888  *	@adap: the adapter
6889  *	@mbox: mailbox to use for the FW command
6890  *	@phy_addr: the PHY address
6891  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6892  *	@reg: the register to read
6893  *	@valp: where to store the value
6894  *
6895  *	Issues a FW command through the given mailbox to read a PHY register.
6896  */
6897 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6898 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
6899 {
6900 	int ret;
6901 	u32 ldst_addrspace;
6902 	struct fw_ldst_cmd c;
6903 
6904 	memset(&c, 0, sizeof(c));
6905 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6906 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6907 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
6908 					ldst_addrspace);
6909 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6910 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6911 					 V_FW_LDST_CMD_MMD(mmd));
6912 	c.u.mdio.raddr = cpu_to_be16(reg);
6913 
6914 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6915 	if (ret == 0)
6916 		*valp = be16_to_cpu(c.u.mdio.rval);
6917 	return ret;
6918 }
6919 
6920 /**
6921  *	t4_mdio_wr - write a PHY register through MDIO
6922  *	@adap: the adapter
6923  *	@mbox: mailbox to use for the FW command
6924  *	@phy_addr: the PHY address
6925  *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6926  *	@reg: the register to write
6927  *	@valp: value to write
6928  *
6929  *	Issues a FW command through the given mailbox to write a PHY register.
6930  */
6931 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6932 	       unsigned int mmd, unsigned int reg, unsigned int val)
6933 {
6934 	u32 ldst_addrspace;
6935 	struct fw_ldst_cmd c;
6936 
6937 	memset(&c, 0, sizeof(c));
6938 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6939 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6940 					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6941 					ldst_addrspace);
6942 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6943 	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6944 					 V_FW_LDST_CMD_MMD(mmd));
6945 	c.u.mdio.raddr = cpu_to_be16(reg);
6946 	c.u.mdio.rval = cpu_to_be16(val);
6947 
6948 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6949 }
6950 
6951 /**
6952  *
6953  *	t4_sge_decode_idma_state - decode the idma state
6954  *	@adap: the adapter
6955  *	@state: the state idma is stuck in
6956  */
6957 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6958 {
6959 	static const char * const t4_decode[] = {
6960 		"IDMA_IDLE",
6961 		"IDMA_PUSH_MORE_CPL_FIFO",
6962 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6963 		"Not used",
6964 		"IDMA_PHYSADDR_SEND_PCIEHDR",
6965 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6966 		"IDMA_PHYSADDR_SEND_PAYLOAD",
6967 		"IDMA_SEND_FIFO_TO_IMSG",
6968 		"IDMA_FL_REQ_DATA_FL_PREP",
6969 		"IDMA_FL_REQ_DATA_FL",
6970 		"IDMA_FL_DROP",
6971 		"IDMA_FL_H_REQ_HEADER_FL",
6972 		"IDMA_FL_H_SEND_PCIEHDR",
6973 		"IDMA_FL_H_PUSH_CPL_FIFO",
6974 		"IDMA_FL_H_SEND_CPL",
6975 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6976 		"IDMA_FL_H_SEND_IP_HDR",
6977 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6978 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6979 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6980 		"IDMA_FL_D_SEND_PCIEHDR",
6981 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6982 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6983 		"IDMA_FL_SEND_PCIEHDR",
6984 		"IDMA_FL_PUSH_CPL_FIFO",
6985 		"IDMA_FL_SEND_CPL",
6986 		"IDMA_FL_SEND_PAYLOAD_FIRST",
6987 		"IDMA_FL_SEND_PAYLOAD",
6988 		"IDMA_FL_REQ_NEXT_DATA_FL",
6989 		"IDMA_FL_SEND_NEXT_PCIEHDR",
6990 		"IDMA_FL_SEND_PADDING",
6991 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6992 		"IDMA_FL_SEND_FIFO_TO_IMSG",
6993 		"IDMA_FL_REQ_DATAFL_DONE",
6994 		"IDMA_FL_REQ_HEADERFL_DONE",
6995 	};
6996 	static const char * const t5_decode[] = {
6997 		"IDMA_IDLE",
6998 		"IDMA_ALMOST_IDLE",
6999 		"IDMA_PUSH_MORE_CPL_FIFO",
7000 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7001 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7002 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7003 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7004 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7005 		"IDMA_SEND_FIFO_TO_IMSG",
7006 		"IDMA_FL_REQ_DATA_FL",
7007 		"IDMA_FL_DROP",
7008 		"IDMA_FL_DROP_SEND_INC",
7009 		"IDMA_FL_H_REQ_HEADER_FL",
7010 		"IDMA_FL_H_SEND_PCIEHDR",
7011 		"IDMA_FL_H_PUSH_CPL_FIFO",
7012 		"IDMA_FL_H_SEND_CPL",
7013 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7014 		"IDMA_FL_H_SEND_IP_HDR",
7015 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7016 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7017 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7018 		"IDMA_FL_D_SEND_PCIEHDR",
7019 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7020 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7021 		"IDMA_FL_SEND_PCIEHDR",
7022 		"IDMA_FL_PUSH_CPL_FIFO",
7023 		"IDMA_FL_SEND_CPL",
7024 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7025 		"IDMA_FL_SEND_PAYLOAD",
7026 		"IDMA_FL_REQ_NEXT_DATA_FL",
7027 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7028 		"IDMA_FL_SEND_PADDING",
7029 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7030 	};
7031 	static const char * const t6_decode[] = {
7032 		"IDMA_IDLE",
7033 		"IDMA_PUSH_MORE_CPL_FIFO",
7034 		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
7035 		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
7036 		"IDMA_PHYSADDR_SEND_PCIEHDR",
7037 		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
7038 		"IDMA_PHYSADDR_SEND_PAYLOAD",
7039 		"IDMA_FL_REQ_DATA_FL",
7040 		"IDMA_FL_DROP",
7041 		"IDMA_FL_DROP_SEND_INC",
7042 		"IDMA_FL_H_REQ_HEADER_FL",
7043 		"IDMA_FL_H_SEND_PCIEHDR",
7044 		"IDMA_FL_H_PUSH_CPL_FIFO",
7045 		"IDMA_FL_H_SEND_CPL",
7046 		"IDMA_FL_H_SEND_IP_HDR_FIRST",
7047 		"IDMA_FL_H_SEND_IP_HDR",
7048 		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
7049 		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
7050 		"IDMA_FL_H_SEND_IP_HDR_PADDING",
7051 		"IDMA_FL_D_SEND_PCIEHDR",
7052 		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
7053 		"IDMA_FL_D_REQ_NEXT_DATA_FL",
7054 		"IDMA_FL_SEND_PCIEHDR",
7055 		"IDMA_FL_PUSH_CPL_FIFO",
7056 		"IDMA_FL_SEND_CPL",
7057 		"IDMA_FL_SEND_PAYLOAD_FIRST",
7058 		"IDMA_FL_SEND_PAYLOAD",
7059 		"IDMA_FL_REQ_NEXT_DATA_FL",
7060 		"IDMA_FL_SEND_NEXT_PCIEHDR",
7061 		"IDMA_FL_SEND_PADDING",
7062 		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
7063 	};
7064 	static const u32 sge_regs[] = {
7065 		A_SGE_DEBUG_DATA_LOW_INDEX_2,
7066 		A_SGE_DEBUG_DATA_LOW_INDEX_3,
7067 		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
7068 	};
7069 	const char **sge_idma_decode;
7070 	int sge_idma_decode_nstates;
7071 	int i;
7072 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
7073 
7074 	/* Select the right set of decode strings to dump depending on the
7075 	 * adapter chip type.
7076 	 */
7077 	switch (chip_version) {
7078 	case CHELSIO_T4:
7079 		sge_idma_decode = (const char **)t4_decode;
7080 		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
7081 		break;
7082 
7083 	case CHELSIO_T5:
7084 		sge_idma_decode = (const char **)t5_decode;
7085 		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
7086 		break;
7087 
7088 	case CHELSIO_T6:
7089 		sge_idma_decode = (const char **)t6_decode;
7090 		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
7091 		break;
7092 
7093 	default:
7094 		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
7095 		return;
7096 	}
7097 
7098 	if (state < sge_idma_decode_nstates)
7099 		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
7100 	else
7101 		CH_WARN(adapter, "idma state %d unknown\n", state);
7102 
7103 	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
7104 		CH_WARN(adapter, "SGE register %#x value %#x\n",
7105 			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
7106 }
7107 
7108 /**
7109  *      t4_sge_ctxt_flush - flush the SGE context cache
7110  *      @adap: the adapter
7111  *      @mbox: mailbox to use for the FW command
7112  *
7113  *      Issues a FW command through the given mailbox to flush the
7114  *      SGE context cache.
7115  */
7116 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
7117 {
7118 	int ret;
7119 	u32 ldst_addrspace;
7120 	struct fw_ldst_cmd c;
7121 
7122 	memset(&c, 0, sizeof(c));
7123 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
7124 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
7125 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
7126 					ldst_addrspace);
7127 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
7128 	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
7129 
7130 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7131 	return ret;
7132 }
7133 
7134 /**
7135  *      t4_fw_hello - establish communication with FW
7136  *      @adap: the adapter
7137  *      @mbox: mailbox to use for the FW command
7138  *      @evt_mbox: mailbox to receive async FW events
7139  *      @master: specifies the caller's willingness to be the device master
7140  *	@state: returns the current device state (if non-NULL)
7141  *
7142  *	Issues a command to establish communication with FW.  Returns either
7143  *	an error (negative integer) or the mailbox of the Master PF.
7144  */
7145 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
7146 		enum dev_master master, enum dev_state *state)
7147 {
7148 	int ret;
7149 	struct fw_hello_cmd c;
7150 	u32 v;
7151 	unsigned int master_mbox;
7152 	int retries = FW_CMD_HELLO_RETRIES;
7153 
7154 retry:
7155 	memset(&c, 0, sizeof(c));
7156 	INIT_CMD(c, HELLO, WRITE);
7157 	c.err_to_clearinit = cpu_to_be32(
7158 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
7159 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
7160 		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
7161 					mbox : M_FW_HELLO_CMD_MBMASTER) |
7162 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
7163 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
7164 		F_FW_HELLO_CMD_CLEARINIT);
7165 
7166 	/*
7167 	 * Issue the HELLO command to the firmware.  If it's not successful
7168 	 * but indicates that we got a "busy" or "timeout" condition, retry
7169 	 * the HELLO until we exhaust our retry limit.  If we do exceed our
7170 	 * retry limit, check to see if the firmware left us any error
7171 	 * information and report that if so ...
7172 	 */
7173 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7174 	if (ret != FW_SUCCESS) {
7175 		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
7176 			goto retry;
7177 		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
7178 			t4_report_fw_error(adap);
7179 		return ret;
7180 	}
7181 
7182 	v = be32_to_cpu(c.err_to_clearinit);
7183 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
7184 	if (state) {
7185 		if (v & F_FW_HELLO_CMD_ERR)
7186 			*state = DEV_STATE_ERR;
7187 		else if (v & F_FW_HELLO_CMD_INIT)
7188 			*state = DEV_STATE_INIT;
7189 		else
7190 			*state = DEV_STATE_UNINIT;
7191 	}
7192 
7193 	/*
7194 	 * If we're not the Master PF then we need to wait around for the
7195 	 * Master PF Driver to finish setting up the adapter.
7196 	 *
7197 	 * Note that we also do this wait if we're a non-Master-capable PF and
7198 	 * there is no current Master PF; a Master PF may show up momentarily
7199 	 * and we wouldn't want to fail pointlessly.  (This can happen when an
7200 	 * OS loads lots of different drivers rapidly at the same time).  In
7201 	 * this case, the Master PF returned by the firmware will be
7202 	 * M_PCIE_FW_MASTER so the test below will work ...
7203 	 */
7204 	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
7205 	    master_mbox != mbox) {
7206 		int waiting = FW_CMD_HELLO_TIMEOUT;
7207 
7208 		/*
7209 		 * Wait for the firmware to either indicate an error or
7210 		 * initialized state.  If we see either of these we bail out
7211 		 * and report the issue to the caller.  If we exhaust the
7212 		 * "hello timeout" and we haven't exhausted our retries, try
7213 		 * again.  Otherwise bail with a timeout error.
7214 		 */
7215 		for (;;) {
7216 			u32 pcie_fw;
7217 
7218 			msleep(50);
7219 			waiting -= 50;
7220 
7221 			/*
7222 			 * If neither Error nor Initialialized are indicated
7223 			 * by the firmware keep waiting till we exaust our
7224 			 * timeout ... and then retry if we haven't exhausted
7225 			 * our retries ...
7226 			 */
7227 			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
7228 			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
7229 				if (waiting <= 0) {
7230 					if (retries-- > 0)
7231 						goto retry;
7232 
7233 					return -ETIMEDOUT;
7234 				}
7235 				continue;
7236 			}
7237 
7238 			/*
7239 			 * We either have an Error or Initialized condition
7240 			 * report errors preferentially.
7241 			 */
7242 			if (state) {
7243 				if (pcie_fw & F_PCIE_FW_ERR)
7244 					*state = DEV_STATE_ERR;
7245 				else if (pcie_fw & F_PCIE_FW_INIT)
7246 					*state = DEV_STATE_INIT;
7247 			}
7248 
7249 			/*
7250 			 * If we arrived before a Master PF was selected and
7251 			 * there's not a valid Master PF, grab its identity
7252 			 * for our caller.
7253 			 */
7254 			if (master_mbox == M_PCIE_FW_MASTER &&
7255 			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
7256 				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
7257 			break;
7258 		}
7259 	}
7260 
7261 	return master_mbox;
7262 }
7263 
7264 /**
7265  *	t4_fw_bye - end communication with FW
7266  *	@adap: the adapter
7267  *	@mbox: mailbox to use for the FW command
7268  *
7269  *	Issues a command to terminate communication with FW.
7270  */
7271 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
7272 {
7273 	struct fw_bye_cmd c;
7274 
7275 	memset(&c, 0, sizeof(c));
7276 	INIT_CMD(c, BYE, WRITE);
7277 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7278 }
7279 
7280 /**
7281  *	t4_fw_reset - issue a reset to FW
7282  *	@adap: the adapter
7283  *	@mbox: mailbox to use for the FW command
7284  *	@reset: specifies the type of reset to perform
7285  *
7286  *	Issues a reset command of the specified type to FW.
7287  */
7288 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7289 {
7290 	struct fw_reset_cmd c;
7291 
7292 	memset(&c, 0, sizeof(c));
7293 	INIT_CMD(c, RESET, WRITE);
7294 	c.val = cpu_to_be32(reset);
7295 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7296 }
7297 
7298 /**
7299  *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7300  *	@adap: the adapter
7301  *	@mbox: mailbox to use for the FW RESET command (if desired)
7302  *	@force: force uP into RESET even if FW RESET command fails
7303  *
7304  *	Issues a RESET command to firmware (if desired) with a HALT indication
7305  *	and then puts the microprocessor into RESET state.  The RESET command
7306  *	will only be issued if a legitimate mailbox is provided (mbox <=
7307  *	M_PCIE_FW_MASTER).
7308  *
7309  *	This is generally used in order for the host to safely manipulate the
7310  *	adapter without fear of conflicting with whatever the firmware might
7311  *	be doing.  The only way out of this state is to RESTART the firmware
7312  *	...
7313  */
7314 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7315 {
7316 	int ret = 0;
7317 
7318 	/*
7319 	 * If a legitimate mailbox is provided, issue a RESET command
7320 	 * with a HALT indication.
7321 	 */
7322 	if (mbox <= M_PCIE_FW_MASTER) {
7323 		struct fw_reset_cmd c;
7324 
7325 		memset(&c, 0, sizeof(c));
7326 		INIT_CMD(c, RESET, WRITE);
7327 		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
7328 		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
7329 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7330 	}
7331 
7332 	/*
7333 	 * Normally we won't complete the operation if the firmware RESET
7334 	 * command fails but if our caller insists we'll go ahead and put the
7335 	 * uP into RESET.  This can be useful if the firmware is hung or even
7336 	 * missing ...  We'll have to take the risk of putting the uP into
7337 	 * RESET without the cooperation of firmware in that case.
7338 	 *
7339 	 * We also force the firmware's HALT flag to be on in case we bypassed
7340 	 * the firmware RESET command above or we're dealing with old firmware
7341 	 * which doesn't have the HALT capability.  This will serve as a flag
7342 	 * for the incoming firmware to know that it's coming out of a HALT
7343 	 * rather than a RESET ... if it's new enough to understand that ...
7344 	 */
7345 	if (ret == 0 || force) {
7346 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
7347 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
7348 				 F_PCIE_FW_HALT);
7349 	}
7350 
7351 	/*
7352 	 * And we always return the result of the firmware RESET command
7353 	 * even when we force the uP into RESET ...
7354 	 */
7355 	return ret;
7356 }
7357 
7358 /**
7359  *	t4_fw_restart - restart the firmware by taking the uP out of RESET
7360  *	@adap: the adapter
7361  *	@reset: if we want to do a RESET to restart things
7362  *
7363  *	Restart firmware previously halted by t4_fw_halt().  On successful
7364  *	return the previous PF Master remains as the new PF Master and there
7365  *	is no need to issue a new HELLO command, etc.
7366  *
7367  *	We do this in two ways:
7368  *
7369  *	 1. If we're dealing with newer firmware we'll simply want to take
7370  *	    the chip's microprocessor out of RESET.  This will cause the
7371  *	    firmware to start up from its start vector.  And then we'll loop
7372  *	    until the firmware indicates it's started again (PCIE_FW.HALT
7373  *	    reset to 0) or we timeout.
7374  *
7375  *	 2. If we're dealing with older firmware then we'll need to RESET
7376  *	    the chip since older firmware won't recognize the PCIE_FW.HALT
7377  *	    flag and automatically RESET itself on startup.
7378  */
7379 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7380 {
7381 	if (reset) {
7382 		/*
7383 		 * Since we're directing the RESET instead of the firmware
7384 		 * doing it automatically, we need to clear the PCIE_FW.HALT
7385 		 * bit.
7386 		 */
7387 		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
7388 
7389 		/*
7390 		 * If we've been given a valid mailbox, first try to get the
7391 		 * firmware to do the RESET.  If that works, great and we can
7392 		 * return success.  Otherwise, if we haven't been given a
7393 		 * valid mailbox or the RESET command failed, fall back to
7394 		 * hitting the chip with a hammer.
7395 		 */
7396 		if (mbox <= M_PCIE_FW_MASTER) {
7397 			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7398 			msleep(100);
7399 			if (t4_fw_reset(adap, mbox,
7400 					F_PIORST | F_PIORSTMODE) == 0)
7401 				return 0;
7402 		}
7403 
7404 		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
7405 		msleep(2000);
7406 	} else {
7407 		int ms;
7408 
7409 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
7410 		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7411 			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
7412 				return FW_SUCCESS;
7413 			msleep(100);
7414 			ms += 100;
7415 		}
7416 		return -ETIMEDOUT;
7417 	}
7418 	return 0;
7419 }
7420 
7421 /**
7422  *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7423  *	@adap: the adapter
7424  *	@mbox: mailbox to use for the FW RESET command (if desired)
7425  *	@fw_data: the firmware image to write
7426  *	@size: image size
7427  *	@force: force upgrade even if firmware doesn't cooperate
7428  *
7429  *	Perform all of the steps necessary for upgrading an adapter's
7430  *	firmware image.  Normally this requires the cooperation of the
7431  *	existing firmware in order to halt all existing activities
7432  *	but if an invalid mailbox token is passed in we skip that step
7433  *	(though we'll still put the adapter microprocessor into RESET in
7434  *	that case).
7435  *
7436  *	On successful return the new firmware will have been loaded and
7437  *	the adapter will have been fully RESET losing all previous setup
7438  *	state.  On unsuccessful return the adapter may be completely hosed ...
7439  *	positive errno indicates that the adapter is ~probably~ intact, a
7440  *	negative errno indicates that things are looking bad ...
7441  */
7442 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7443 		  const u8 *fw_data, unsigned int size, int force)
7444 {
7445 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7446 	unsigned int bootstrap =
7447 	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
7448 	int reset, ret;
7449 
7450 	if (!t4_fw_matches_chip(adap, fw_hdr))
7451 		return -EINVAL;
7452 
7453 	/* Disable FW_OK flags so that mbox commands with FW_OK flags check
7454 	 * wont be send when we are flashing FW.
7455 	 */
7456 	adap->flags &= ~FW_OK;
7457 
7458 	if (!bootstrap) {
7459 		ret = t4_fw_halt(adap, mbox, force);
7460 		if (ret < 0 && !force)
7461 			goto out;
7462 	}
7463 
7464 	ret = t4_load_fw(adap, fw_data, size, bootstrap);
7465 	if (ret < 0 || bootstrap)
7466 		goto out;
7467 
7468 	/*
7469 	 * Older versions of the firmware don't understand the new
7470 	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7471 	 * restart.  So for newly loaded older firmware we'll have to do the
7472 	 * RESET for it so it starts up on a clean slate.  We can tell if
7473 	 * the newly loaded firmware will handle this right by checking
7474 	 * its header flags to see if it advertises the capability.
7475 	 */
7476 	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7477 	ret = t4_fw_restart(adap, mbox, reset);
7478 out:
7479 	adap->flags |= FW_OK;
7480 	return ret;
7481 }
7482 
7483 /**
7484  *	t4_fl_pkt_align - return the fl packet alignment
7485  *	@adap: the adapter
7486  *	is_packed: True when the driver uses packed FLM mode
7487  *
7488  *	T4 has a single field to specify the packing and padding boundary.
7489  *	T5 onwards has separate fields for this and hence the alignment for
7490  *	next packet offset is maximum of these two.
7491  *
7492  */
7493 int t4_fl_pkt_align(struct adapter *adap, bool is_packed)
7494 {
7495 	u32 sge_control, sge_control2;
7496 	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7497 
7498 	sge_control = t4_read_reg(adap, A_SGE_CONTROL);
7499 
7500 	/* T4 uses a single control field to specify both the PCIe Padding and
7501 	 * Packing Boundary.  T5 introduced the ability to specify these
7502 	 * separately.  The actual Ingress Packet Data alignment boundary
7503 	 * within Packed Buffer Mode is the maximum of these two
7504 	 * specifications.  (Note that it makes no real practical sense to
7505 	 * have the Pading Boudary be larger than the Packing Boundary but you
7506 	 * could set the chip up that way and, in fact, legacy T4 code would
7507 	 * end doing this because it would initialize the Padding Boundary and
7508 	 * leave the Packing Boundary initialized to 0 (16 bytes).)
7509 	 * Padding Boundary values in T6 starts from 8B,
7510 	 * where as it is 32B for T4 and T5.
7511 	 */
7512 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7513 		ingpad_shift = X_INGPADBOUNDARY_SHIFT;
7514 	else
7515 		ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
7516 
7517 	ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
7518 
7519 	fl_align = ingpadboundary;
7520 	if (!is_t4(adap->params.chip) && is_packed) {
7521 		/* T5 has a weird interpretation of one of the PCIe Packing
7522 		 * Boundary values.  No idea why ...
7523 		 */
7524 		sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
7525 		ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
7526 		if (ingpackboundary == X_INGPACKBOUNDARY_16B)
7527 			ingpackboundary = 16;
7528 		else
7529 			ingpackboundary = 1 << (ingpackboundary +
7530 						X_INGPACKBOUNDARY_SHIFT);
7531 
7532 		fl_align = max(ingpadboundary, ingpackboundary);
7533 	}
7534 	return fl_align;
7535 }
7536 
7537 /**
7538  *	t4_fixup_host_params_compat - fix up host-dependent parameters
7539  *	@adap: the adapter
7540  *	@page_size: the host's Base Page Size
7541  *	@cache_line_size: the host's Cache Line Size
7542  *	@chip_compat: maintain compatibility with designated chip
7543  *
7544  *	Various registers in the chip contain values which are dependent on the
7545  *	host's Base Page and Cache Line Sizes.  This function will fix all of
7546  *	those registers with the appropriate values as passed in ...
7547  *
7548  *	@chip_compat is used to limit the set of changes that are made
7549  *	to be compatible with the indicated chip release.  This is used by
7550  *	drivers to maintain compatibility with chip register settings when
7551  *	the drivers haven't [yet] been updated with new chip support.
7552  */
7553 int t4_fixup_host_params_compat(struct adapter *adap,
7554 				unsigned int page_size,
7555 				unsigned int cache_line_size,
7556 				enum chip_type chip_compat)
7557 {
7558 	unsigned int page_shift = fls(page_size) - 1;
7559 	unsigned int sge_hps = page_shift - 10;
7560 	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7561 	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7562 	unsigned int fl_align_log = fls(fl_align) - 1;
7563 
7564 	t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
7565 		     V_HOSTPAGESIZEPF0(sge_hps) |
7566 		     V_HOSTPAGESIZEPF1(sge_hps) |
7567 		     V_HOSTPAGESIZEPF2(sge_hps) |
7568 		     V_HOSTPAGESIZEPF3(sge_hps) |
7569 		     V_HOSTPAGESIZEPF4(sge_hps) |
7570 		     V_HOSTPAGESIZEPF5(sge_hps) |
7571 		     V_HOSTPAGESIZEPF6(sge_hps) |
7572 		     V_HOSTPAGESIZEPF7(sge_hps));
7573 
7574 	if (is_t4(adap->params.chip) || is_t4(chip_compat)) {
7575 		t4_set_reg_field(adap, A_SGE_CONTROL,
7576 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7577 				 F_EGRSTATUSPAGESIZE,
7578 				 V_INGPADBOUNDARY(fl_align_log -
7579 						  X_INGPADBOUNDARY_SHIFT) |
7580 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
7581 	} else {
7582 		unsigned int pack_align;
7583 		unsigned int ingpad, ingpack;
7584 		unsigned int pcie_cap;
7585 
7586 		/* T5 introduced the separation of the Free List Padding and
7587 		 * Packing Boundaries.  Thus, we can select a smaller Padding
7588 		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7589 		 * Bandwidth, and use a Packing Boundary which is large enough
7590 		 * to avoid false sharing between CPUs, etc.
7591 		 *
7592 		 * For the PCI Link, the smaller the Padding Boundary the
7593 		 * better.  For the Memory Controller, a smaller Padding
7594 		 * Boundary is better until we cross under the Memory Line
7595 		 * Size (the minimum unit of transfer to/from Memory).  If we
7596 		 * have a Padding Boundary which is smaller than the Memory
7597 		 * Line Size, that'll involve a Read-Modify-Write cycle on the
7598 		 * Memory Controller which is never good.
7599 		 */
7600 
7601 		/* We want the Packing Boundary to be based on the Cache Line
7602 		 * Size in order to help avoid False Sharing performance
7603 		 * issues between CPUs, etc.  We also want the Packing
7604 		 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
7605 		 * get best performance when the Packing Boundary is a
7606 		 * multiple of the Maximum Payload Size.
7607 		 */
7608 		pack_align = fl_align;
7609 		pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
7610 		if (pcie_cap) {
7611 			unsigned int mps, mps_log;
7612 			u16 devctl;
7613 
7614 			/*
7615 			 * The PCIe Device Control Maximum Payload Size field
7616 			 * [bits 7:5] encodes sizes as powers of 2 starting at
7617 			 * 128 bytes.
7618 			 */
7619 			t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
7620 					    &devctl);
7621 			mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7622 			mps = 1 << mps_log;
7623 			if (mps > pack_align)
7624 				pack_align = mps;
7625 		}
7626 
7627 		/* N.B. T5/T6 have a crazy special interpretation of the "0"
7628 		 * value for the Packing Boundary.  This corresponds to 16
7629 		 * bytes instead of the expected 32 bytes.  So if we want 32
7630 		 * bytes, the best we can really do is 64 bytes ...
7631 		 */
7632 		if (pack_align <= 16) {
7633 			ingpack = X_INGPACKBOUNDARY_16B;
7634 			fl_align = 16;
7635 		} else if (pack_align == 32) {
7636 			ingpack = X_INGPACKBOUNDARY_64B;
7637 			fl_align = 64;
7638 		} else {
7639 			unsigned int pack_align_log = fls(pack_align) - 1;
7640 			ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
7641 			fl_align = pack_align;
7642 		}
7643 
7644 		/* Use the smallest Ingress Padding which isn't smaller than
7645 		 * the Memory Controller Read/Write Size.  We'll take that as
7646 		 * being 8 bytes since we don't know of any system with a
7647 		 * wider Memory Controller Bus Width.
7648 		 */
7649 		if (is_t5(adap->params.chip))
7650 			ingpad = X_INGPADBOUNDARY_32B;
7651 		else
7652 			ingpad = X_T6_INGPADBOUNDARY_8B;
7653 
7654 		t4_set_reg_field(adap, A_SGE_CONTROL,
7655 				 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
7656 				 F_EGRSTATUSPAGESIZE,
7657 				 V_INGPADBOUNDARY(ingpad) |
7658 				 V_EGRSTATUSPAGESIZE(stat_len != 64));
7659 		t4_set_reg_field(adap, A_SGE_CONTROL2,
7660 				 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
7661 				 V_INGPACKBOUNDARY(ingpack));
7662 	}
7663 	/*
7664 	 * Adjust various SGE Free List Host Buffer Sizes.
7665 	 *
7666 	 * This is something of a crock since we're using fixed indices into
7667 	 * the array which are also known by the sge.c code and the T4
7668 	 * Firmware Configuration File.  We need to come up with a much better
7669 	 * approach to managing this array.  For now, the first four entries
7670 	 * are:
7671 	 *
7672 	 *   0: Host Page Size
7673 	 *   1: 64KB
7674 	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7675 	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7676 	 *
7677 	 * For the single-MTU buffers in unpacked mode we need to include
7678 	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7679 	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7680 	 * Padding boundary.  All of these are accommodated in the Factory
7681 	 * Default Firmware Configuration File but we need to adjust it for
7682 	 * this host's cache line size.
7683 	 */
7684 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
7685 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
7686 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1)
7687 		     & ~(fl_align-1));
7688 	t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
7689 		     (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1)
7690 		     & ~(fl_align-1));
7691 
7692 	t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
7693 
7694 	return 0;
7695 }
7696 
7697 /**
7698  *	t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
7699  *	@adap: the adapter
7700  *	@page_size: the host's Base Page Size
7701  *	@cache_line_size: the host's Cache Line Size
7702  *
7703  *	Various registers in T4 contain values which are dependent on the
7704  *	host's Base Page and Cache Line Sizes.  This function will fix all of
7705  *	those registers with the appropriate values as passed in ...
7706  *
7707  *	This routine makes changes which are compatible with T4 chips.
7708  */
7709 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7710 			 unsigned int cache_line_size)
7711 {
7712 	return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
7713 					   T4_LAST_REV);
7714 }
7715 
7716 /**
7717  *	t4_fw_initialize - ask FW to initialize the device
7718  *	@adap: the adapter
7719  *	@mbox: mailbox to use for the FW command
7720  *
7721  *	Issues a command to FW to partially initialize the device.  This
7722  *	performs initialization that generally doesn't depend on user input.
7723  */
7724 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7725 {
7726 	struct fw_initialize_cmd c;
7727 
7728 	memset(&c, 0, sizeof(c));
7729 	INIT_CMD(c, INITIALIZE, WRITE);
7730 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7731 }
7732 
7733 /**
7734  *	t4_query_params_rw - query FW or device parameters
7735  *	@adap: the adapter
7736  *	@mbox: mailbox to use for the FW command
7737  *	@pf: the PF
7738  *	@vf: the VF
7739  *	@nparams: the number of parameters
7740  *	@params: the parameter names
7741  *	@val: the parameter values
7742  *	@rw: Write and read flag
7743  *	@sleep_ok: if true, we may sleep awaiting mbox cmd completion
7744  *
7745  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
7746  *	queried at once.
7747  */
7748 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7749 		       unsigned int vf, unsigned int nparams, const u32 *params,
7750 		       u32 *val, int rw, bool sleep_ok)
7751 {
7752 	int i, ret;
7753 	struct fw_params_cmd c;
7754 	__be32 *p = &c.param[0].mnem;
7755 
7756 	if (nparams > 7)
7757 		return -EINVAL;
7758 
7759 	memset(&c, 0, sizeof(c));
7760 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7761 				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
7762 				  V_FW_PARAMS_CMD_PFN(pf) |
7763 				  V_FW_PARAMS_CMD_VFN(vf));
7764 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7765 
7766 	for (i = 0; i < nparams; i++) {
7767 		*p++ = cpu_to_be32(*params++);
7768 		if (rw)
7769 			*p = cpu_to_be32(*(val + i));
7770 		p++;
7771 	}
7772 
7773 	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7774 	if (ret == 0)
7775 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7776 			*val++ = be32_to_cpu(*p);
7777 	return ret;
7778 }
7779 
7780 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7781 		    unsigned int vf, unsigned int nparams, const u32 *params,
7782 		    u32 *val)
7783 {
7784 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7785 				  true);
7786 }
7787 
7788 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7789 		    unsigned int vf, unsigned int nparams, const u32 *params,
7790 		    u32 *val)
7791 {
7792 	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7793 				  false);
7794 }
7795 
7796 /**
7797  *      t4_set_params_timeout - sets FW or device parameters
7798  *      @adap: the adapter
7799  *      @mbox: mailbox to use for the FW command
7800  *      @pf: the PF
7801  *      @vf: the VF
7802  *      @nparams: the number of parameters
7803  *      @params: the parameter names
7804  *      @val: the parameter values
7805  *      @timeout: the timeout time
7806  *
7807  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
7808  *      specified at once.
7809  */
7810 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7811 			  unsigned int pf, unsigned int vf,
7812 			  unsigned int nparams, const u32 *params,
7813 			  const u32 *val, int timeout)
7814 {
7815 	struct fw_params_cmd c;
7816 	__be32 *p = &c.param[0].mnem;
7817 
7818 	if (nparams > 7)
7819 		return -EINVAL;
7820 
7821 	memset(&c, 0, sizeof(c));
7822 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
7823 				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7824 				  V_FW_PARAMS_CMD_PFN(pf) |
7825 				  V_FW_PARAMS_CMD_VFN(vf));
7826 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7827 
7828 	while (nparams--) {
7829 		*p++ = cpu_to_be32(*params++);
7830 		*p++ = cpu_to_be32(*val++);
7831 	}
7832 
7833 	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7834 }
7835 
7836 /**
7837  *	t4_set_params - sets FW or device parameters
7838  *	@adap: the adapter
7839  *	@mbox: mailbox to use for the FW command
7840  *	@pf: the PF
7841  *	@vf: the VF
7842  *	@nparams: the number of parameters
7843  *	@params: the parameter names
7844  *	@val: the parameter values
7845  *
7846  *	Sets the value of FW or device parameters.  Up to 7 parameters can be
7847  *	specified at once.
7848  */
7849 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7850 		  unsigned int vf, unsigned int nparams, const u32 *params,
7851 		  const u32 *val)
7852 {
7853 	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7854 				     FW_CMD_MAX_TIMEOUT);
7855 }
7856 
7857 /**
7858  *	t4_cfg_pfvf - configure PF/VF resource limits
7859  *	@adap: the adapter
7860  *	@mbox: mailbox to use for the FW command
7861  *	@pf: the PF being configured
7862  *	@vf: the VF being configured
7863  *	@txq: the max number of egress queues
7864  *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
7865  *	@rxqi: the max number of interrupt-capable ingress queues
7866  *	@rxq: the max number of interruptless ingress queues
7867  *	@tc: the PCI traffic class
7868  *	@vi: the max number of virtual interfaces
7869  *	@cmask: the channel access rights mask for the PF/VF
7870  *	@pmask: the port access rights mask for the PF/VF
7871  *	@nexact: the maximum number of exact MPS filters
7872  *	@rcaps: read capabilities
7873  *	@wxcaps: write/execute capabilities
7874  *
7875  *	Configures resource limits and capabilities for a physical or virtual
7876  *	function.
7877  */
7878 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7879 		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7880 		unsigned int rxqi, unsigned int rxq, unsigned int tc,
7881 		unsigned int vi, unsigned int cmask, unsigned int pmask,
7882 		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7883 {
7884 	struct fw_pfvf_cmd c;
7885 
7886 	memset(&c, 0, sizeof(c));
7887 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7888 				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7889 				  V_FW_PFVF_CMD_VFN(vf));
7890 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7891 	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7892 				     V_FW_PFVF_CMD_NIQ(rxq));
7893 	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7894 				    V_FW_PFVF_CMD_PMASK(pmask) |
7895 				    V_FW_PFVF_CMD_NEQ(txq));
7896 	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7897 				      V_FW_PFVF_CMD_NVI(vi) |
7898 				      V_FW_PFVF_CMD_NEXACTF(nexact));
7899 	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7900 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7901 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7902 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7903 }
7904 
7905 /**
7906  *	t4_alloc_vi_func - allocate a virtual interface
7907  *	@adap: the adapter
7908  *	@mbox: mailbox to use for the FW command
7909  *	@port: physical port associated with the VI
7910  *	@pf: the PF owning the VI
7911  *	@vf: the VF owning the VI
7912  *	@nmac: number of MAC addresses needed (1 to 5)
7913  *	@mac: the MAC addresses of the VI
7914  *	@rss_size: size of RSS table slice associated with this VI
7915  *	@portfunc: which Port Application Function MAC Address is desired
7916  *	@idstype: Intrusion Detection Type
7917  *
7918  *	Allocates a virtual interface for the given physical port.  If @mac is
7919  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
7920  *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7921  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
7922  *	stored consecutively so the space needed is @nmac * 6 bytes.
7923  *	Returns a negative error number or the non-negative VI id.
7924  */
7925 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7926 		     unsigned int port, unsigned int pf, unsigned int vf,
7927 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
7928 		     unsigned int portfunc, unsigned int idstype)
7929 {
7930 	int ret;
7931 	struct fw_vi_cmd c;
7932 
7933 	memset(&c, 0, sizeof(c));
7934 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7935 				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7936 				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7937 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7938 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7939 				     V_FW_VI_CMD_FUNC(portfunc));
7940 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7941 	c.nmac = nmac - 1;
7942 	if(!rss_size)
7943 		c.norss_rsssize = F_FW_VI_CMD_NORSS;
7944 
7945 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7946 	if (ret)
7947 		return ret;
7948 
7949 	if (mac) {
7950 		memcpy(mac, c.mac, sizeof(c.mac));
7951 		switch (nmac) {
7952 		case 5:
7953 			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7954 			/* FALLTHRU */
7955 		case 4:
7956 			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7957 			/* FALLTHRU */
7958 		case 3:
7959 			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7960 			/* FALLTHRU */
7961 		case 2:
7962 			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
7963 		}
7964 	}
7965 	if (rss_size)
7966 		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7967 	return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7968 }
7969 
7970 /**
7971  *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7972  *      @adap: the adapter
7973  *      @mbox: mailbox to use for the FW command
7974  *      @port: physical port associated with the VI
7975  *      @pf: the PF owning the VI
7976  *      @vf: the VF owning the VI
7977  *      @nmac: number of MAC addresses needed (1 to 5)
7978  *      @mac: the MAC addresses of the VI
7979  *      @rss_size: size of RSS table slice associated with this VI
7980  *
7981  *	backwards compatible and convieniance routine to allocate a Virtual
7982  *	Interface with a Ethernet Port Application Function and Intrustion
7983  *	Detection System disabled.
7984  */
7985 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7986 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7987 		unsigned int *rss_size)
7988 {
7989 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7990 				FW_VI_FUNC_ETH, 0);
7991 }
7992 
7993 
7994 /**
7995  * 	t4_free_vi - free a virtual interface
7996  * 	@adap: the adapter
7997  * 	@mbox: mailbox to use for the FW command
7998  * 	@pf: the PF owning the VI
7999  * 	@vf: the VF owning the VI
8000  * 	@viid: virtual interface identifiler
8001  *
8002  * 	Free a previously allocated virtual interface.
8003  */
8004 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
8005 	       unsigned int vf, unsigned int viid)
8006 {
8007 	struct fw_vi_cmd c;
8008 
8009 	memset(&c, 0, sizeof(c));
8010 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
8011 				  F_FW_CMD_REQUEST |
8012 				  F_FW_CMD_EXEC |
8013 				  V_FW_VI_CMD_PFN(pf) |
8014 				  V_FW_VI_CMD_VFN(vf));
8015 	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
8016 	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
8017 
8018 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8019 }
8020 
8021 /**
8022  *	t4_set_rxmode - set Rx properties of a virtual interface
8023  *	@adap: the adapter
8024  *	@mbox: mailbox to use for the FW command
8025  *	@viid: the VI id
8026  *	@mtu: the new MTU or -1
8027  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
8028  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
8029  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
8030  *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
8031  *	@sleep_ok: if true we may sleep while awaiting command completion
8032  *
8033  *	Sets Rx properties of a virtual interface.
8034  */
8035 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
8036 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
8037 		  bool sleep_ok)
8038 {
8039 	struct fw_vi_rxmode_cmd c;
8040 
8041 	/* convert to FW values */
8042 	if (mtu < 0)
8043 		mtu = M_FW_VI_RXMODE_CMD_MTU;
8044 	if (promisc < 0)
8045 		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
8046 	if (all_multi < 0)
8047 		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
8048 	if (bcast < 0)
8049 		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
8050 	if (vlanex < 0)
8051 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
8052 
8053 	memset(&c, 0, sizeof(c));
8054 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
8055 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8056 				   V_FW_VI_RXMODE_CMD_VIID(viid));
8057 	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
8058 	c.mtu_to_vlanexen =
8059 		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
8060 			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
8061 			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
8062 			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
8063 			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
8064 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8065 }
8066 
8067 /**
8068  *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
8069  *	@adap: the adapter
8070  *	@viid: the VI id
8071  *	@mac: the MAC address
8072  *	@mask: the mask
8073  *	@idx: index at which to add this entry
8074  *	@lookup_type: MAC address for inner (1) or outer (0) header
8075  *	@sleep_ok: call is allowed to sleep
8076  *
8077  *	Adds the mac entry at the specified index using raw mac interface.
8078  *
8079  *	Returns a negative error number or the allocated index for this mac.
8080  */
8081 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
8082 			  const u8 *addr, const u8 *mask, unsigned int idx,
8083 			  u8 lookup_type, bool sleep_ok)
8084 {
8085 	int ret = 0;
8086 	struct fw_vi_mac_cmd c;
8087 	struct fw_vi_mac_raw *p = &c.u.raw;
8088 	u32 val;
8089 
8090 	memset(&c, 0, sizeof(c));
8091 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8092 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8093 				   V_FW_VI_MAC_CMD_VIID(viid));
8094 	val = V_FW_CMD_LEN16(1) |
8095 	      V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
8096 	c.freemacs_to_len16 = cpu_to_be32(val);
8097 
8098 	/* Specify that this is an inner mac address */
8099 	p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
8100 
8101 	/* Lookup Type. Outer header: 0, Inner header: 1 */
8102 	p->data0_pkd = cpu_to_be32(lookup_type << 10);
8103 	p->data0m_pkd = cpu_to_be64(3 << 10); /* Lookup mask */
8104 
8105 	/* Copy the address and the mask */
8106 	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
8107 	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
8108 
8109 	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
8110 	if (ret == 0) {
8111 		ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
8112 		if (ret != idx)
8113 			ret = -ENOMEM;
8114 	}
8115 
8116 	return ret;
8117 }
8118 
8119 /**
8120  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
8121  *	@adap: the adapter
8122  *	@mbox: mailbox to use for the FW command
8123  *	@viid: the VI id
8124  *	@free: if true any existing filters for this VI id are first removed
8125  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8126  *	@addr: the MAC address(es)
8127  *	@idx: where to store the index of each allocated filter
8128  *	@hash: pointer to hash address filter bitmap
8129  *	@sleep_ok: call is allowed to sleep
8130  *
8131  *	Allocates an exact-match filter for each of the supplied addresses and
8132  *	sets it to the corresponding address.  If @idx is not %NULL it should
8133  *	have at least @naddr entries, each of which will be set to the index of
8134  *	the filter allocated for the corresponding MAC address.  If a filter
8135  *	could not be allocated for an address its index is set to 0xffff.
8136  *	If @hash is not %NULL addresses that fail to allocate an exact filter
8137  *	are hashed and update the hash filter bitmap pointed at by @hash.
8138  *
8139  *	Returns a negative error number or the number of filters allocated.
8140  */
8141 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8142 		      unsigned int viid, bool free, unsigned int naddr,
8143 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8144 {
8145 	int offset, ret = 0;
8146 	struct fw_vi_mac_cmd c;
8147 	unsigned int nfilters = 0;
8148 	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8149 	unsigned int rem = naddr;
8150 
8151 	if (naddr > max_naddr)
8152 		return -EINVAL;
8153 
8154 	for (offset = 0; offset < naddr ; /**/) {
8155 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8156 					 ? rem
8157 					 : ARRAY_SIZE(c.u.exact));
8158 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8159 						     u.exact[fw_naddr]), 16);
8160 		struct fw_vi_mac_exact *p;
8161 		int i;
8162 
8163 		memset(&c, 0, sizeof(c));
8164 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8165 					   F_FW_CMD_REQUEST |
8166 					   F_FW_CMD_WRITE |
8167 					   V_FW_CMD_EXEC(free) |
8168 					   V_FW_VI_MAC_CMD_VIID(viid));
8169 		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
8170 						  V_FW_CMD_LEN16(len16));
8171 
8172 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8173 			p->valid_to_idx =
8174 				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8175 					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
8176 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8177 		}
8178 
8179 		/*
8180 		 * It's okay if we run out of space in our MAC address arena.
8181 		 * Some of the addresses we submit may get stored so we need
8182 		 * to run through the reply to see what the results were ...
8183 		 */
8184 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8185 		if (ret && ret != -FW_ENOMEM)
8186 			break;
8187 
8188 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8189 			u16 index = G_FW_VI_MAC_CMD_IDX(
8190 						be16_to_cpu(p->valid_to_idx));
8191 
8192 			if (idx)
8193 				idx[offset+i] = (index >=  max_naddr
8194 						 ? 0xffff
8195 						 : index);
8196 			if (index < max_naddr)
8197 				nfilters++;
8198 			else if (hash)
8199 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
8200 		}
8201 
8202 		free = false;
8203 		offset += fw_naddr;
8204 		rem -= fw_naddr;
8205 	}
8206 
8207 	if (ret == 0 || ret == -FW_ENOMEM)
8208 		ret = nfilters;
8209 	return ret;
8210 }
8211 
8212 /**
8213  *	t4_free_mac_filt - frees exact-match filters of given MAC addresses
8214  *	@adap: the adapter
8215  *	@mbox: mailbox to use for the FW command
8216  *	@viid: the VI id
8217  *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8218  *	@addr: the MAC address(es)
8219  *	@sleep_ok: call is allowed to sleep
8220  *
8221  *	Frees the exact-match filter for each of the supplied addresses
8222  *
8223  *	Returns a negative error number or the number of filters freed.
8224  */
8225 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8226 		      unsigned int viid, unsigned int naddr,
8227 		      const u8 **addr, bool sleep_ok)
8228 {
8229 	int offset, ret = 0;
8230 	struct fw_vi_mac_cmd c;
8231 	unsigned int nfilters = 0;
8232 	unsigned int max_naddr = is_t4(adap->params.chip) ?
8233 				       NUM_MPS_CLS_SRAM_L_INSTANCES :
8234 				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8235 	unsigned int rem = naddr;
8236 
8237 	if (naddr > max_naddr)
8238 		return -EINVAL;
8239 
8240 	for (offset = 0; offset < (int)naddr ; /**/) {
8241 		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8242 					 ? rem
8243 					 : ARRAY_SIZE(c.u.exact));
8244 		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8245 						     u.exact[fw_naddr]), 16);
8246 		struct fw_vi_mac_exact *p;
8247 		int i;
8248 
8249 		memset(&c, 0, sizeof(c));
8250 		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8251 				     F_FW_CMD_REQUEST |
8252 				     F_FW_CMD_WRITE |
8253 				     V_FW_CMD_EXEC(0) |
8254 				     V_FW_VI_MAC_CMD_VIID(viid));
8255 		c.freemacs_to_len16 =
8256 				cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) |
8257 					    V_FW_CMD_LEN16(len16));
8258 
8259 		for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8260 			p->valid_to_idx = cpu_to_be16(
8261 				F_FW_VI_MAC_CMD_VALID |
8262 				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE));
8263 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8264 		}
8265 
8266 		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8267 		if (ret)
8268 			break;
8269 
8270 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8271 			u16 index = G_FW_VI_MAC_CMD_IDX(
8272 						be16_to_cpu(p->valid_to_idx));
8273 
8274 			if (index < max_naddr)
8275 				nfilters++;
8276 		}
8277 
8278 		offset += fw_naddr;
8279 		rem -= fw_naddr;
8280 	}
8281 
8282 	if (ret == 0)
8283 		ret = nfilters;
8284 	return ret;
8285 }
8286 
8287 /**
8288  *	t4_change_mac - modifies the exact-match filter for a MAC address
8289  *	@adap: the adapter
8290  *	@mbox: mailbox to use for the FW command
8291  *	@viid: the VI id
8292  *	@idx: index of existing filter for old value of MAC address, or -1
8293  *	@addr: the new MAC address value
8294  *	@persist: whether a new MAC allocation should be persistent
8295  *	@add_smt: if true also add the address to the HW SMT
8296  *
8297  *	Modifies an exact-match filter and sets it to the new MAC address if
8298  *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
8299  *	latter case the address is added persistently if @persist is %true.
8300  *
8301  *	Note that in general it is not possible to modify the value of a given
8302  *	filter so the generic way to modify an address filter is to free the one
8303  *	being used by the old address value and allocate a new filter for the
8304  *	new address value.
8305  *
8306  *	Returns a negative error number or the index of the filter with the new
8307  *	MAC value.  Note that this index may differ from @idx.
8308  */
8309 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8310 		  int idx, const u8 *addr, bool persist, bool add_smt)
8311 {
8312 	int ret, mode;
8313 	struct fw_vi_mac_cmd c;
8314 	struct fw_vi_mac_exact *p = c.u.exact;
8315 	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
8316 
8317 	if (idx < 0)		/* new allocation */
8318 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8319 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8320 
8321 	memset(&c, 0, sizeof(c));
8322 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8323 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8324 				   V_FW_VI_MAC_CMD_VIID(viid));
8325 	c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
8326 	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
8327 				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
8328 				      V_FW_VI_MAC_CMD_IDX(idx));
8329 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
8330 
8331 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8332 	if (ret == 0) {
8333 		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
8334 		if (ret >= max_mac_addr)
8335 			ret = -ENOMEM;
8336 	}
8337 	return ret;
8338 }
8339 
8340 /**
8341  *	t4_set_addr_hash - program the MAC inexact-match hash filter
8342  *	@adap: the adapter
8343  *	@mbox: mailbox to use for the FW command
8344  *	@viid: the VI id
8345  *	@ucast: whether the hash filter should also match unicast addresses
8346  *	@vec: the value to be written to the hash filter
8347  *	@sleep_ok: call is allowed to sleep
8348  *
8349  *	Sets the 64-bit inexact-match hash filter for a virtual interface.
8350  */
8351 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8352 		     bool ucast, u64 vec, bool sleep_ok)
8353 {
8354 	struct fw_vi_mac_cmd c;
8355 	u32 val;
8356 
8357 	memset(&c, 0, sizeof(c));
8358 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
8359 				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
8360 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8361 	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
8362 	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
8363 	c.freemacs_to_len16 = cpu_to_be32(val);
8364 	c.u.hash.hashvec = cpu_to_be64(vec);
8365 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8366 }
8367 
8368 /**
8369  *      t4_enable_vi_params - enable/disable a virtual interface
8370  *      @adap: the adapter
8371  *      @mbox: mailbox to use for the FW command
8372  *      @viid: the VI id
8373  *      @rx_en: 1=enable Rx, 0=disable Rx
8374  *      @tx_en: 1=enable Tx, 0=disable Tx
8375  *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8376  *
8377  *      Enables/disables a virtual interface.  Note that setting DCB Enable
8378  *      only makes sense when enabling a Virtual Interface ...
8379  */
8380 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8381 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8382 {
8383 	struct fw_vi_enable_cmd c;
8384 
8385 	memset(&c, 0, sizeof(c));
8386 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8387 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8388 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8389 	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
8390 				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
8391 				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
8392 				     FW_LEN16(c));
8393 	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8394 }
8395 
8396 /**
8397  *	t4_enable_vi - enable/disable a virtual interface
8398  *	@adap: the adapter
8399  *	@mbox: mailbox to use for the FW command
8400  *	@viid: the VI id
8401  *	@rx_en: 1=enable Rx, 0=disable Rx
8402  *	@tx_en: 1=enable Tx, 0=disable Tx
8403  *
8404  *	Enables/disables a virtual interface.  Note that setting DCB Enable
8405  *	only makes sense when enabling a Virtual Interface ...
8406  */
8407 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8408 		 bool rx_en, bool tx_en)
8409 {
8410 	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8411 }
8412 
8413 /**
8414  *	t4_identify_port - identify a VI's port by blinking its LED
8415  *	@adap: the adapter
8416  *	@mbox: mailbox to use for the FW command
8417  *	@viid: the VI id
8418  *	@nblinks: how many times to blink LED at 2.5 Hz
8419  *
8420  *	Identifies a VI's port by blinking its LED.
8421  */
8422 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8423 		     unsigned int nblinks)
8424 {
8425 	struct fw_vi_enable_cmd c;
8426 
8427 	memset(&c, 0, sizeof(c));
8428 	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
8429 				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8430 				   V_FW_VI_ENABLE_CMD_VIID(viid));
8431 	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
8432 	c.blinkdur = cpu_to_be16(nblinks);
8433 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8434 }
8435 
8436 /**
8437  *	t4_iq_stop - stop an ingress queue and its FLs
8438  *	@adap: the adapter
8439  *	@mbox: mailbox to use for the FW command
8440  *	@pf: the PF owning the queues
8441  *	@vf: the VF owning the queues
8442  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8443  *	@iqid: ingress queue id
8444  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8445  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8446  *
8447  *	Stops an ingress queue and its associated FLs, if any.  This causes
8448  *	any current or future data/messages destined for these queues to be
8449  *	tossed.
8450  */
8451 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8452 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8453 	       unsigned int fl0id, unsigned int fl1id)
8454 {
8455 	struct fw_iq_cmd c;
8456 
8457 	memset(&c, 0, sizeof(c));
8458 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8459 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8460 				  V_FW_IQ_CMD_VFN(vf));
8461 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
8462 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8463 	c.iqid = cpu_to_be16(iqid);
8464 	c.fl0id = cpu_to_be16(fl0id);
8465 	c.fl1id = cpu_to_be16(fl1id);
8466 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8467 }
8468 
8469 /**
8470  *	t4_iq_free - free an ingress queue and its FLs
8471  *	@adap: the adapter
8472  *	@mbox: mailbox to use for the FW command
8473  *	@pf: the PF owning the queues
8474  *	@vf: the VF owning the queues
8475  *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8476  *	@iqid: ingress queue id
8477  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8478  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8479  *
8480  *	Frees an ingress queue and its associated FLs, if any.
8481  */
8482 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8483 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8484 	       unsigned int fl0id, unsigned int fl1id)
8485 {
8486 	struct fw_iq_cmd c;
8487 
8488 	memset(&c, 0, sizeof(c));
8489 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
8490 				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
8491 				  V_FW_IQ_CMD_VFN(vf));
8492 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
8493 	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
8494 	c.iqid = cpu_to_be16(iqid);
8495 	c.fl0id = cpu_to_be16(fl0id);
8496 	c.fl1id = cpu_to_be16(fl1id);
8497 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8498 }
8499 
8500 /**
8501  *	t4_eth_eq_free - free an Ethernet egress queue
8502  *	@adap: the adapter
8503  *	@mbox: mailbox to use for the FW command
8504  *	@pf: the PF owning the queue
8505  *	@vf: the VF owning the queue
8506  *	@eqid: egress queue id
8507  *
8508  *	Frees an Ethernet egress queue.
8509  */
8510 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8511 		   unsigned int vf, unsigned int eqid)
8512 {
8513 	struct fw_eq_eth_cmd c;
8514 
8515 	memset(&c, 0, sizeof(c));
8516 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
8517 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8518 				  V_FW_EQ_ETH_CMD_PFN(pf) |
8519 				  V_FW_EQ_ETH_CMD_VFN(vf));
8520 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
8521 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
8522 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8523 }
8524 
8525 /**
8526  *	t4_ctrl_eq_free - free a control egress queue
8527  *	@adap: the adapter
8528  *	@mbox: mailbox to use for the FW command
8529  *	@pf: the PF owning the queue
8530  *	@vf: the VF owning the queue
8531  *	@eqid: egress queue id
8532  *
8533  *	Frees a control egress queue.
8534  */
8535 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8536 		    unsigned int vf, unsigned int eqid)
8537 {
8538 	struct fw_eq_ctrl_cmd c;
8539 
8540 	memset(&c, 0, sizeof(c));
8541 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
8542 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8543 				  V_FW_EQ_CTRL_CMD_PFN(pf) |
8544 				  V_FW_EQ_CTRL_CMD_VFN(vf));
8545 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
8546 	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
8547 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8548 }
8549 
8550 /**
8551  *	t4_ofld_eq_free - free an offload egress queue
8552  *	@adap: the adapter
8553  *	@mbox: mailbox to use for the FW command
8554  *	@pf: the PF owning the queue
8555  *	@vf: the VF owning the queue
8556  *	@eqid: egress queue id
8557  *
8558  *	Frees a control egress queue.
8559  */
8560 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8561 		    unsigned int vf, unsigned int eqid)
8562 {
8563 	struct fw_eq_ofld_cmd c;
8564 
8565 	memset(&c, 0, sizeof(c));
8566 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
8567 				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
8568 				  V_FW_EQ_OFLD_CMD_PFN(pf) |
8569 				  V_FW_EQ_OFLD_CMD_VFN(vf));
8570 	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
8571 	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
8572 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8573 }
8574 
8575 /**
8576  *	t4_link_down_rc_str - return a string for a Link Down Reason Code
8577  *	@link_down_rc: Link Down Reason Code
8578  *
8579  *	Returns a string representation of the Link Down Reason Code.
8580  */
8581 const char *t4_link_down_rc_str(unsigned char link_down_rc)
8582 {
8583 	static const char * const reason[] = {
8584 		"Link Down",
8585 		"Remote Fault",
8586 		"Auto-negotiation Failure",
8587 		"Reserved",
8588 		"Insufficient Airflow",
8589 		"Unable To Determine Reason",
8590 		"No RX Signal Detected",
8591 		"Reserved",
8592 	};
8593 
8594 	if (link_down_rc >= ARRAY_SIZE(reason))
8595 		return "Bad Reason Code";
8596 
8597 	return reason[link_down_rc];
8598 }
8599 
8600 /**
8601  * Get the highest speed for the port from the advertised port capabilities.
8602  * It will be either the highest speed from the list of speeds or
8603  * whatever user has set using ethtool.
8604  */
8605 static inline unsigned int fwcap_to_fw_speed(unsigned int acaps)
8606 {
8607 	if (acaps & FW_PORT_CAP_SPEED_100G)
8608 		return FW_PORT_CAP_SPEED_100G;
8609 	if (acaps & FW_PORT_CAP_SPEED_40G)
8610 		return FW_PORT_CAP_SPEED_40G;
8611 	if (acaps & FW_PORT_CAP_SPEED_25G)
8612 		return FW_PORT_CAP_SPEED_25G;
8613 	if (acaps & FW_PORT_CAP_SPEED_10G)
8614 		return FW_PORT_CAP_SPEED_10G;
8615 	if (acaps & FW_PORT_CAP_SPEED_1G)
8616 		return FW_PORT_CAP_SPEED_1G;
8617 	if (acaps & FW_PORT_CAP_SPEED_100M)
8618 		return FW_PORT_CAP_SPEED_100M;
8619 	return 0;
8620 }
8621 
8622 /**
8623  *	t4_handle_get_port_info - process a FW reply message
8624  *	@pi: the port info
8625  *	@rpl: start of the FW message
8626  *
8627  *	Processes a GET_PORT_INFO FW reply message.
8628  */
8629 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8630 {
8631 	const struct fw_port_cmd *p = (const void *)rpl;
8632 	unsigned int acaps = be16_to_cpu(p->u.info.acap);
8633 	struct adapter *adap = pi->adapter;
8634 
8635 	/* link/module state change message */
8636 	int speed = 0;
8637 	unsigned int fc, fec;
8638 	struct link_config *lc;
8639 	u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
8640 	int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
8641 	u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
8642 
8643 	/*
8644 	 * Unfortunately the format of the Link Status returned by the
8645 	 * Firmware isn't the same as the Firmware Port Capabilities bitfield
8646 	 * used everywhere else ...
8647 	 */
8648 	fc = 0;
8649 	if (stat & F_FW_PORT_CMD_RXPAUSE)
8650 		fc |= PAUSE_RX;
8651 	if (stat & F_FW_PORT_CMD_TXPAUSE)
8652 		fc |= PAUSE_TX;
8653 
8654 	fec = fwcap_to_cc_fec(acaps);
8655 
8656 	if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
8657 		speed = 100;
8658 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
8659 		speed = 1000;
8660 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
8661 		speed = 10000;
8662 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
8663 		speed = 25000;
8664 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
8665 		speed = 40000;
8666 	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
8667 		speed = 100000;
8668 
8669 	lc = &pi->link_cfg;
8670 
8671 	if (mod != pi->mod_type) {
8672 		/*
8673 		 * When a new Transceiver Module is inserted, the Firmware
8674 		 * will examine any Forward Error Correction parameters
8675 		 * present in the Transceiver Module i2c EPROM and determine
8676 		 * the supported and recommended FEC settings from those
8677 		 * based on IEEE 802.3 standards.  We always record the
8678 		 * IEEE 802.3 recommended "automatic" settings.
8679 		 */
8680 		lc->auto_fec = fec;
8681 
8682 		pi->mod_type = mod;
8683 		t4_os_portmod_changed(adap, pi->port_id);
8684 	}
8685 
8686 	if (link_ok != lc->link_ok || speed != lc->speed ||
8687 	    fc != lc->fc || fec != lc->fec) {	/* something changed */
8688 		if (!link_ok && lc->link_ok) {
8689 			unsigned char rc = G_FW_PORT_CMD_LINKDNRC(stat);
8690 
8691 			lc->link_down_rc = rc;
8692 			CH_WARN_RATELIMIT(adap,
8693 				"Port %d link down, reason: %s\n",
8694 				pi->tx_chan, t4_link_down_rc_str(rc));
8695 		}
8696 		lc->link_ok = link_ok;
8697 		lc->speed = speed;
8698 		lc->fc = fc;
8699 		lc->fec = fec;
8700 
8701 		lc->supported = be16_to_cpu(p->u.info.pcap);
8702 		lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
8703 		lc->advertising = be16_to_cpu(p->u.info.acap) & ADVERT_MASK;
8704 
8705 		if (lc->advertising & FW_PORT_CAP_ANEG) {
8706 			lc->autoneg = AUTONEG_ENABLE;
8707 		} else {
8708 			/* When Autoneg is disabled, user needs to set
8709 			 * single speed.
8710 			 * Similar to cxgb4_ethtool.c: set_link_ksettings
8711 			 */
8712 			lc->advertising = 0;
8713 			lc->requested_speed = fwcap_to_fw_speed(acaps);
8714 			lc->autoneg = AUTONEG_DISABLE;
8715 		}
8716 
8717 		t4_os_link_changed(adap, pi->port_id, link_ok);
8718 	}
8719 }
8720 
8721 /**
8722  *	t4_update_port_info - retrieve and update port information if changed
8723  *	@pi: the port_info
8724  *
8725  *	We issue a Get Port Information Command to the Firmware and, if
8726  *	successful, we check to see if anything is different from what we
8727  *	last recorded and update things accordingly.
8728  */
8729  int t4_update_port_info(struct port_info *pi)
8730  {
8731 	struct fw_port_cmd port_cmd;
8732 	int ret;
8733 
8734 	memset(&port_cmd, 0, sizeof port_cmd);
8735 	port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
8736 					    F_FW_CMD_REQUEST | F_FW_CMD_READ |
8737 					    V_FW_PORT_CMD_PORTID(pi->tx_chan));
8738 	port_cmd.action_to_len16 = cpu_to_be32(
8739 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
8740 		FW_LEN16(port_cmd));
8741 	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8742 			 &port_cmd, sizeof(port_cmd), &port_cmd);
8743 	if (ret)
8744 		return ret;
8745 
8746 	t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8747 	return 0;
8748 }
8749 
8750 /**
8751  *      t4_handle_fw_rpl - process a FW reply message
8752  *      @adap: the adapter
8753  *      @rpl: start of the FW message
8754  *
8755  *      Processes a FW message, such as link state change messages.
8756  */
8757 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8758 {
8759 	u8 opcode = *(const u8 *)rpl;
8760 
8761 	/*
8762 	 * This might be a port command ... this simplifies the following
8763 	 * conditionals ...  We can get away with pre-dereferencing
8764 	 * action_to_len16 because it's in the first 16 bytes and all messages
8765 	 * will be at least that long.
8766 	 */
8767 	const struct fw_port_cmd *p = (const void *)rpl;
8768 	unsigned int action =
8769 		G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
8770 
8771 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
8772 		int i;
8773 		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
8774 		struct port_info *pi = NULL;
8775 
8776 		for_each_port(adap, i) {
8777 			pi = adap2pinfo(adap, i);
8778 			if (pi->tx_chan == chan)
8779 				break;
8780 		}
8781 
8782 		t4_handle_get_port_info(pi, rpl);
8783 	} else {
8784 		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
8785 		return -EINVAL;
8786 	}
8787 	return 0;
8788 }
8789 
8790 /**
8791  *	get_pci_mode - determine a card's PCI mode
8792  *	@adapter: the adapter
8793  *	@p: where to store the PCI settings
8794  *
8795  *	Determines a card's PCI mode and associated parameters, such as speed
8796  *	and width.
8797  */
8798 static void get_pci_mode(struct adapter *adapter,
8799 				   struct pci_params *p)
8800 {
8801 	u16 val;
8802 	u32 pcie_cap;
8803 
8804 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8805 	if (pcie_cap) {
8806 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
8807 		p->speed = val & PCI_EXP_LNKSTA_CLS;
8808 		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8809 	}
8810 }
8811 
8812 /**
8813  *	init_link_config - initialize a link's SW state
8814  *	@lc: pointer to structure holding the link state
8815  *	@pcaps: link Port Capabilities
8816  *	@acaps: link current Advertised Port Capabilities
8817  *
8818  *	Initializes the SW state maintained for each link, including the link's
8819  *	capabilities and default speed/flow-control/autonegotiation settings.
8820  */
8821 static void init_link_config(struct link_config *lc, unsigned int pcaps,
8822 			     unsigned int acaps)
8823 {
8824 	lc->supported = pcaps;
8825 	lc->lp_advertising = 0;
8826 	lc->requested_speed = 0;
8827 	lc->speed = 0;
8828 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8829 
8830 	/*
8831 	 * For Forward Error Control, we default to whatever the Firmware
8832 	 * tells us the Link is currently advertising.
8833 	 */
8834 	lc->auto_fec = fwcap_to_cc_fec(acaps);
8835 	lc->requested_fec = FEC_AUTO;
8836 	lc->fec = lc->auto_fec;
8837 
8838 	if (lc->supported & FW_PORT_CAP_ANEG) {
8839 		lc->advertising = lc->supported & ADVERT_MASK;
8840 		lc->autoneg = AUTONEG_ENABLE;
8841 		lc->requested_fc |= PAUSE_AUTONEG;
8842 	} else {
8843 		lc->advertising = 0;
8844 		lc->autoneg = AUTONEG_DISABLE;
8845 	}
8846 }
8847 
8848 /**
8849  *	t4_wait_dev_ready - wait till to reads of registers work
8850  *
8851  *	Right after the device is RESET is can take a small amount of time
8852  *	for it to respond to register reads.  Until then, all reads will
8853  *	return either 0xff...ff or 0xee...ee.  Return an error if reads
8854  *	don't work within a reasonable time frame.
8855  */
8856 int t4_wait_dev_ready(struct adapter *adapter)
8857 {
8858 	u32 whoami;
8859 
8860 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
8861 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
8862 		return 0;
8863 
8864 	msleep(500);
8865 	whoami = t4_read_reg(adapter, A_PL_WHOAMI);
8866 	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
8867 		return 0;
8868 
8869 	CH_ERR(adapter, "Device didn't become ready for access, "
8870 	       "whoami = %#x\n", whoami);
8871 	return -EIO;
8872 }
8873 
8874 struct flash_desc {
8875 	u32 vendor_and_model_id;
8876 	u32 size_mb;
8877 };
8878 
8879 int t4_get_flash_params(struct adapter *adapter)
8880 {
8881 	/* Table for non-Numonix supported flash parts.  Numonix parts are left
8882 	 * to the preexisting well-tested code.  All flash parts have 64KB
8883 	 * sectors.
8884 	 */
8885 	static struct flash_desc supported_flash[] = {
8886 		{ 0x00150201, 4 << 20 },	/* Spansion 4MB S25FL032P */
8887 	};
8888 
8889 	int ret;
8890 	u32 flashid = 0;
8891 	unsigned int part, manufacturer;
8892 	unsigned int density, size;
8893 
8894 
8895 	/*
8896 	 * Issue a Read ID Command to the Flash part.  We decode supported
8897 	 * Flash parts and their sizes from this.  There's a newer Query
8898 	 * Command which can retrieve detailed geometry information but
8899 	 * many Flash parts don't support it.
8900 	 */
8901 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
8902 	if (!ret)
8903 		ret = sf1_read(adapter, 3, 0, 1, &flashid);
8904 	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
8905 	if (ret < 0)
8906 		return ret;
8907 
8908 	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8909 		if (supported_flash[part].vendor_and_model_id == flashid) {
8910 			adapter->params.sf_size =
8911 				supported_flash[part].size_mb;
8912 			adapter->params.sf_nsec =
8913 				adapter->params.sf_size / SF_SEC_SIZE;
8914 			goto found;
8915 		}
8916 
8917 	manufacturer = flashid & 0xff;
8918 	switch (manufacturer) {
8919 	case 0x20: { /* Micron/Numonix */
8920 		/*
8921 		 * This Density -> Size decoding table is taken from Micron
8922 		 * Data Sheets.
8923 		 */
8924 		density = (flashid >> 16) & 0xff;
8925 		switch (density) {
8926 		case 0x14: size = 1 << 20; break; /*   1MB */
8927 		case 0x15: size = 1 << 21; break; /*   2MB */
8928 		case 0x16: size = 1 << 22; break; /*   4MB */
8929 		case 0x17: size = 1 << 23; break; /*   8MB */
8930 		case 0x18: size = 1 << 24; break; /*  16MB */
8931 		case 0x19: size = 1 << 25; break; /*  32MB */
8932 		case 0x20: size = 1 << 26; break; /*  64MB */
8933 		case 0x21: size = 1 << 27; break; /* 128MB */
8934 		case 0x22: size = 1 << 28; break; /* 256MB */
8935 
8936 		default:
8937 			CH_ERR(adapter, "Micron Flash Part has bad size, "
8938 			       "ID = %#x, Density code = %#x\n",
8939 			       flashid, density);
8940 			return -EINVAL;
8941 		}
8942 
8943 		adapter->params.sf_size = size;
8944 		adapter->params.sf_nsec = size / SF_SEC_SIZE;
8945 		break;
8946 	}
8947 
8948 	default:
8949 		CH_ERR(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
8950 		return -EINVAL;
8951 	}
8952 
8953  found:
8954 	/*
8955 	 * We should ~probably~ reject adapters with FLASHes which are too
8956 	 * small but we have some legacy FPGAs with small FLASHes that we'd
8957 	 * still like to use.  So instead we emit a scary message ...
8958 	 */
8959 	if (adapter->params.sf_size < FLASH_MIN_SIZE)
8960 		CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8961 			flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
8962 
8963 	return 0;
8964 }
8965 
8966 static void set_pcie_completion_timeout(struct adapter *adapter,
8967 						  u8 range)
8968 {
8969 	u16 val;
8970 	u32 pcie_cap;
8971 
8972 	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
8973 	if (pcie_cap) {
8974 		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
8975 		val &= 0xfff0;
8976 		val |= range ;
8977 		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
8978 	}
8979 }
8980 
8981 /**
8982  *	t4_get_chip_type - Determine chip type from device ID
8983  *	@adap: the adapter
8984  *	@ver: adapter version
8985  */
8986 enum chip_type t4_get_chip_type(struct adapter *adap, int ver)
8987 {
8988 	enum chip_type chip = 0;
8989 	u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
8990 
8991 	/* Retrieve adapter's device ID */
8992 	switch (ver) {
8993 		case CHELSIO_T4_FPGA:
8994 			chip |= CHELSIO_CHIP_FPGA;
8995 			/*FALLTHROUGH*/
8996 		case CHELSIO_T4:
8997 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
8998 			break;
8999 		case CHELSIO_T5_FPGA:
9000 			chip |= CHELSIO_CHIP_FPGA;
9001 			/*FALLTHROUGH*/
9002 		case CHELSIO_T5:
9003 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9004 			break;
9005 		case CHELSIO_T6_FPGA:
9006 			chip |= CHELSIO_CHIP_FPGA;
9007 			/*FALLTHROUGH*/
9008 		case CHELSIO_T6:
9009 			chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9010 			break;
9011 		default:
9012 			CH_ERR(adap, "Device %d is not supported\n",
9013 			       adap->params.pci.device_id);
9014 			return -EINVAL;
9015 	}
9016 
9017 	/* T4A1 chip is no longer supported */
9018 	if (chip == T4_A1) {
9019 		CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n");
9020 		return -EINVAL;
9021 	}
9022 	return chip;
9023 }
9024 
9025 /**
9026  *	t4_prep_pf - prepare SW and HW for PF operation
9027  *	@adapter: the adapter
9028  *
9029  *	Initialize adapter SW state for the various HW modules, set initial
9030  *	values for some adapter tunables on each PF.
9031  */
9032 int t4_prep_pf(struct adapter *adapter)
9033 {
9034 	int ret, ver;
9035 
9036 	ret = t4_wait_dev_ready(adapter);
9037 	if (ret < 0)
9038 		return ret;
9039 
9040 	get_pci_mode(adapter, &adapter->params.pci);
9041 
9042 
9043 	/* Retrieve adapter's device ID
9044 	 */
9045 	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id);
9046 	t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id);
9047 
9048 	ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
9049 	adapter->params.chip = t4_get_chip_type(adapter, ver);
9050 	if (is_t4(adapter->params.chip)) {
9051 		adapter->params.arch.sge_fl_db = F_DBPRIO;
9052 		adapter->params.arch.mps_tcam_size =
9053 				 NUM_MPS_CLS_SRAM_L_INSTANCES;
9054 		adapter->params.arch.mps_rplc_size = 128;
9055 		adapter->params.arch.nchan = NCHAN;
9056 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9057 		adapter->params.arch.vfcount = 128;
9058 		/* Congestion map is for 4 channels so that
9059 		 * MPS can have 4 priority per port.
9060 		 */
9061 		adapter->params.arch.cng_ch_bits_log = 2;
9062 	} else if (is_t5(adapter->params.chip)) {
9063 		adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
9064 		adapter->params.arch.mps_tcam_size =
9065 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9066 		adapter->params.arch.mps_rplc_size = 128;
9067 		adapter->params.arch.nchan = NCHAN;
9068 		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9069 		adapter->params.arch.vfcount = 128;
9070 		adapter->params.arch.cng_ch_bits_log = 2;
9071 	} else if (is_t6(adapter->params.chip)) {
9072 		adapter->params.arch.sge_fl_db = 0;
9073 		adapter->params.arch.mps_tcam_size =
9074 				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9075 		adapter->params.arch.mps_rplc_size = 256;
9076 		adapter->params.arch.nchan = 2;
9077 		adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9078 		adapter->params.arch.vfcount = 256;
9079 		/* Congestion map will be for 2 channels so that
9080 		 * MPS can have 8 priority per port.
9081 		 */
9082 		adapter->params.arch.cng_ch_bits_log = 3;
9083 	} else {
9084 		CH_ERR(adapter, "Device %d is not supported\n",
9085 			adapter->params.pci.device_id);
9086 		return -EINVAL;
9087 	}
9088 
9089 	adapter->params.pci.vpd_cap_addr =
9090 		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
9091 
9092 	if (is_fpga(adapter->params.chip)) {
9093 		/* FPGA */
9094 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
9095 	} else {
9096 		/* ASIC */
9097 		adapter->params.cim_la_size = CIMLA_SIZE;
9098 	}
9099 
9100 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9101 
9102 	/*
9103 	 * Default port and clock for debugging in case we can't reach FW.
9104 	 */
9105 	adapter->params.nports = 1;
9106 	adapter->params.portvec = 1;
9107 	adapter->params.vpd.cclk = 50000;
9108 
9109 	/* Set pci completion timeout value to 4 seconds. */
9110 	set_pcie_completion_timeout(adapter, 0xd);
9111 	return 0;
9112 }
9113 
9114 /**
9115  *      t4_prep_master_pf - prepare SW for master PF operations
9116  *      @adapter: the adapter
9117  *
9118  */
9119 int t4_prep_master_pf(struct adapter *adapter)
9120 {
9121 	int ret;
9122 
9123 	ret = t4_prep_pf(adapter);
9124 	if (ret < 0)
9125 		return ret;
9126 
9127 	ret = t4_get_flash_params(adapter);
9128 	if (ret < 0) {
9129 		CH_ERR(adapter,
9130 		       "Unable to retrieve Flash parameters ret = %d\n", -ret);
9131 		return ret;
9132 	}
9133 
9134 	return 0;
9135 }
9136 
9137 /**
9138  *      t4_prep_adapter - prepare SW and HW for operation
9139  *      @adapter: the adapter
9140  *      @reset: if true perform a HW reset
9141  *
9142  *      Initialize adapter SW state for the various HW modules, set initial
9143  *      values for some adapter tunables.
9144  */
9145 int t4_prep_adapter(struct adapter *adapter, bool reset)
9146 {
9147 	return t4_prep_master_pf(adapter);
9148 }
9149 
9150 /**
9151  *	t4_shutdown_adapter - shut down adapter, host & wire
9152  *	@adapter: the adapter
9153  *
9154  *	Perform an emergency shutdown of the adapter and stop it from
9155  *	continuing any further communication on the ports or DMA to the
9156  *	host.  This is typically used when the adapter and/or firmware
9157  *	have crashed and we want to prevent any further accidental
9158  *	communication with the rest of the world.  This will also force
9159  *	the port Link Status to go down -- if register writes work --
9160  *	which should help our peers figure out that we're down.
9161  */
9162 int t4_shutdown_adapter(struct adapter *adapter)
9163 {
9164 	int port;
9165 
9166 	t4_intr_disable(adapter);
9167 	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
9168 	for_each_port(adapter, port) {
9169 		u32 a_port_cfg = is_t4(adapter->params.chip) ?
9170 				 PORT_REG(port, A_XGMAC_PORT_CFG) :
9171 				 T5_PORT_REG(port, A_MAC_PORT_CFG);
9172 
9173 		t4_write_reg(adapter, a_port_cfg,
9174 			     t4_read_reg(adapter, a_port_cfg)
9175 			     & ~V_SIGNAL_DET(1));
9176 	}
9177 	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
9178 
9179 	return 0;
9180 }
9181 
9182 /**
9183  *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9184  *	@adapter: the adapter
9185  *	@qid: the Queue ID
9186  *	@qtype: the Ingress or Egress type for @qid
9187  *	@user: true if this request is for a user mode queue
9188  *	@pbar2_qoffset: BAR2 Queue Offset
9189  *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9190  *
9191  *	Returns the BAR2 SGE Queue Registers information associated with the
9192  *	indicated Absolute Queue ID.  These are passed back in return value
9193  *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9194  *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9195  *
9196  *	This may return an error which indicates that BAR2 SGE Queue
9197  *	registers aren't available.  If an error is not returned, then the
9198  *	following values are returned:
9199  *
9200  *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9201  *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9202  *
9203  *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9204  *	require the "Inferred Queue ID" ability may be used.  E.g. the
9205  *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9206  *	then these "Inferred Queue ID" register may not be used.
9207  */
9208 int t4_bar2_sge_qregs(struct adapter *adapter,
9209 		      unsigned int qid,
9210 		      enum t4_bar2_qtype qtype,
9211 		      int user,
9212 		      u64 *pbar2_qoffset,
9213 		      unsigned int *pbar2_qid)
9214 {
9215 	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9216 	u64 bar2_page_offset, bar2_qoffset;
9217 	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9218 
9219 	/* T4 doesn't support BAR2 SGE Queue registers for kernel
9220 	 * mode queues.
9221 	 */
9222 	if (!user && is_t4(adapter->params.chip))
9223 		return -EINVAL;
9224 
9225 	/* Get our SGE Page Size parameters.
9226 	 */
9227 	page_shift = adapter->params.sge.hps + 10;
9228 	page_size = 1 << page_shift;
9229 
9230 	/* Get the right Queues per Page parameters for our Queue.
9231 	 */
9232 	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9233 		     ? adapter->params.sge.eq_qpp
9234 		     : adapter->params.sge.iq_qpp);
9235 	qpp_mask = (1 << qpp_shift) - 1;
9236 
9237 	/* Calculate the basics of the BAR2 SGE Queue register area:
9238 	 *  o The BAR2 page the Queue registers will be in.
9239 	 *  o The BAR2 Queue ID.
9240 	 *  o The BAR2 Queue ID Offset into the BAR2 page.
9241 	 */
9242 	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9243 	bar2_qid = qid & qpp_mask;
9244 	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9245 
9246 	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
9247 	 * hardware will infer the Absolute Queue ID simply from the writes to
9248 	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9249 	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
9250 	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9251 	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9252 	 * from the BAR2 Page and BAR2 Queue ID.
9253 	 *
9254 	 * One important censequence of this is that some BAR2 SGE registers
9255 	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9256 	 * there.  But other registers synthesize the SGE Queue ID purely
9257 	 * from the writes to the registers -- the Write Combined Doorbell
9258 	 * Buffer is a good example.  These BAR2 SGE Registers are only
9259 	 * available for those BAR2 SGE Register areas where the SGE Absolute
9260 	 * Queue ID can be inferred from simple writes.
9261 	 */
9262 	bar2_qoffset = bar2_page_offset;
9263 	bar2_qinferred = (bar2_qid_offset < page_size);
9264 	if (bar2_qinferred) {
9265 		bar2_qoffset += bar2_qid_offset;
9266 		bar2_qid = 0;
9267 	}
9268 
9269 	*pbar2_qoffset = bar2_qoffset;
9270 	*pbar2_qid = bar2_qid;
9271 	return 0;
9272 }
9273 
9274 /**
9275  *	t4_init_devlog_params - initialize adapter->params.devlog
9276  *	@adap: the adapter
9277  *	@fw_attach: whether we can talk to the firmware
9278  *
9279  *	Initialize various fields of the adapter's Firmware Device Log
9280  *	Parameters structure.
9281  */
9282 int t4_init_devlog_params(struct adapter *adap, int fw_attach)
9283 {
9284 	struct devlog_params *dparams = &adap->params.devlog;
9285 	u32 pf_dparams;
9286 	unsigned int devlog_meminfo;
9287 	struct fw_devlog_cmd devlog_cmd;
9288 	int ret;
9289 
9290 	/* If we're dealing with newer firmware, the Device Log Paramerters
9291 	 * are stored in a designated register which allows us to access the
9292 	 * Device Log even if we can't talk to the firmware.
9293 	 */
9294 	pf_dparams =
9295 		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
9296 	if (pf_dparams) {
9297 		unsigned int nentries, nentries128;
9298 
9299 		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
9300 		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
9301 
9302 		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
9303 		nentries = (nentries128 + 1) * 128;
9304 		dparams->size = nentries * sizeof(struct fw_devlog_e);
9305 
9306 		return 0;
9307 	}
9308 
9309 	/*
9310 	 * For any failing returns ...
9311 	 */
9312 	memset(dparams, 0, sizeof *dparams);
9313 
9314 	/*
9315 	 * If we can't talk to the firmware, there's really nothing we can do
9316 	 * at this point.
9317 	 */
9318 	if (!fw_attach)
9319 		return -ENXIO;
9320 
9321 	/* Otherwise, ask the firmware for it's Device Log Parameters.
9322 	 */
9323 	memset(&devlog_cmd, 0, sizeof devlog_cmd);
9324 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9325 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
9326 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9327 	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9328 			 &devlog_cmd);
9329 	if (ret)
9330 		return ret;
9331 
9332 	devlog_meminfo =
9333 		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9334 	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
9335 	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
9336 	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9337 
9338 	return 0;
9339 }
9340 
9341 /**
9342  *	t4_init_sge_params - initialize adap->params.sge
9343  *	@adapter: the adapter
9344  *
9345  *	Initialize various fields of the adapter's SGE Parameters structure.
9346  */
9347 int t4_init_sge_params(struct adapter *adapter)
9348 {
9349 	struct sge_params *sge_params = &adapter->params.sge;
9350 	u32 hps, qpp;
9351 	unsigned int s_hps, s_qpp;
9352 
9353 	/* Extract the SGE Page Size for our PF.
9354 	 */
9355 	hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
9356 	s_hps = (S_HOSTPAGESIZEPF0 +
9357 		 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf);
9358 	sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
9359 
9360 	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9361 	 */
9362 	s_qpp = (S_QUEUESPERPAGEPF0 +
9363 		(S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
9364 	qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
9365 	sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
9366 	qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
9367 	sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
9368 
9369 	return 0;
9370 }
9371 
9372 /**
9373  *      t4_init_tp_params - initialize adap->params.tp
9374  *      @adap: the adapter
9375  * 	@sleep_ok: if true we may sleep while awaiting command completion
9376  *
9377  *      Initialize various fields of the adapter's TP Parameters structure.
9378  */
9379 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9380 {
9381 	int chan;
9382 	u32 v;
9383 
9384 	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
9385 	adap->params.tp.tre = G_TIMERRESOLUTION(v);
9386 	adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
9387 
9388 	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9389 	for (chan = 0; chan < NCHAN; chan++)
9390 		adap->params.tp.tx_modq[chan] = chan;
9391 
9392 	/* Cache the adapter's Compressed Filter Mode and global Incress
9393 	 * Configuration.
9394 	 */
9395 	t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9396 		       A_TP_VLAN_PRI_MAP, sleep_ok);
9397 	t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9398 		       A_TP_INGRESS_CONFIG, sleep_ok);
9399 
9400 	/* For T6, cache the adapter's compressed error vector
9401 	 * and passing outer header info for encapsulated packets.
9402 	 */
9403 	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9404 		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
9405 		adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
9406 	}
9407 
9408 	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9409 	 * shift positions of several elements of the Compressed Filter Tuple
9410 	 * for this adapter which we need frequently ...
9411 	 */
9412 	adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
9413 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
9414 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
9415 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
9416 	adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
9417 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
9418 	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9419 								F_ETHERTYPE);
9420 	adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9421 								F_MACMATCH);
9422 	adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9423 								F_MPSHITTYPE);
9424 	adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9425 							   F_FRAGMENTATION);
9426 
9427 	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9428 	 * represents the presence of an Outer VLAN instead of a VNIC ID.
9429 	 */
9430 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
9431 		adap->params.tp.vnic_shift = -1;
9432 
9433 	return 0;
9434 }
9435 
9436 /**
9437  *      t4_filter_field_shift - calculate filter field shift
9438  *      @adap: the adapter
9439  *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9440  *
9441  *      Return the shift position of a filter field within the Compressed
9442  *      Filter Tuple.  The filter field is specified via its selection bit
9443  *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
9444  */
9445 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9446 {
9447 	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9448 	unsigned int sel;
9449 	int field_shift;
9450 
9451 	if ((filter_mode & filter_sel) == 0)
9452 		return -1;
9453 
9454 	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9455 		switch (filter_mode & sel) {
9456 		case F_FCOE:
9457 			field_shift += W_FT_FCOE;
9458 			break;
9459 		case F_PORT:
9460 			field_shift += W_FT_PORT;
9461 			break;
9462 		case F_VNIC_ID:
9463 			field_shift += W_FT_VNIC_ID;
9464 			break;
9465 		case F_VLAN:
9466 			field_shift += W_FT_VLAN;
9467 			break;
9468 		case F_TOS:
9469 			field_shift += W_FT_TOS;
9470 			break;
9471 		case F_PROTOCOL:
9472 			field_shift += W_FT_PROTOCOL;
9473 			break;
9474 		case F_ETHERTYPE:
9475 			field_shift += W_FT_ETHERTYPE;
9476 			break;
9477 		case F_MACMATCH:
9478 			field_shift += W_FT_MACMATCH;
9479 			break;
9480 		case F_MPSHITTYPE:
9481 			field_shift += W_FT_MPSHITTYPE;
9482 			break;
9483 		case F_FRAGMENTATION:
9484 			field_shift += W_FT_FRAGMENTATION;
9485 			break;
9486 		}
9487 	}
9488 	return field_shift;
9489 }
9490 
9491 /**
9492  *	t4_create_filter_info - return Compressed Filter Value/Mask tuple
9493  *	@adapter: the adapter
9494  *	@filter_value: Filter Value return value pointer
9495  *	@filter_mask: Filter Mask return value pointer
9496  *	@fcoe: FCoE filter selection
9497  *	@port: physical port filter selection
9498  *	@vnic: Virtual NIC ID filter selection
9499  *	@vlan: VLAN ID filter selection
9500  *	@vlan_pcp: VLAN Priority Code Point
9501  *	@vlan_dei: VLAN Drop Eligibility Indicator
9502  *	@tos: Type Of Server filter selection
9503  *	@protocol: IP Protocol filter selection
9504  *	@ethertype: Ethernet Type filter selection
9505  *	@macmatch: MPS MAC Index filter selection
9506  *	@matchtype: MPS Hit Type filter selection
9507  *	@frag: IP Fragmentation filter selection
9508  *
9509  *	Construct a Compressed Filter Value/Mask tuple based on a set of
9510  *	"filter selection" values.  For each passed filter selection value
9511  *	which is greater than or equal to 0, we put that value into the
9512  *	constructed Filter Value and the appropriate mask into the Filter
9513  *	Mask.  If a filter selections is specified which is not currently
9514  *	configured into the hardware, an error will be returned.  Otherwise
9515  *	the constructed FIlter Value/Mask tuple will be returned via the
9516  *	specified return value pointers and success will be returned.
9517  *
9518  *	All filter selection values and the returned Filter Value/Mask values
9519  *	are in Host-Endian format.
9520  */
9521 int t4_create_filter_info(const struct adapter *adapter,
9522 			  u64 *filter_value, u64 *filter_mask,
9523 			  int fcoe, int port, int vnic,
9524 			  int vlan, int vlan_pcp, int vlan_dei,
9525 			  int tos, int protocol, int ethertype,
9526 			  int macmatch, int matchtype, int frag)
9527 {
9528 	const struct tp_params *tp = &adapter->params.tp;
9529 	u64 v, m;
9530 
9531 	/*
9532 	 * If any selected filter field isn't enabled, return an error.
9533 	 */
9534 	#define BAD_FILTER(__field) \
9535 		((__field) >= 0 && tp->__field##_shift < 0)
9536 	if (BAD_FILTER(fcoe)       ||
9537 	    BAD_FILTER(port)       ||
9538 	    BAD_FILTER(vnic)       ||
9539 	    BAD_FILTER(vlan)       ||
9540 	    BAD_FILTER(tos)        ||
9541 	    BAD_FILTER(protocol)   ||
9542 	    BAD_FILTER(ethertype)  ||
9543 	    BAD_FILTER(macmatch)   ||
9544 	    BAD_FILTER(matchtype) ||
9545 	    BAD_FILTER(frag))
9546 		return -EINVAL;
9547 	#undef BAD_FILTER
9548 
9549 	/*
9550 	 * We have to have VLAN ID selected if we want to also select on
9551 	 * either the Priority Code Point or Drop Eligibility Indicator
9552 	 * fields.
9553 	 */
9554 	if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0)
9555 		return -EINVAL;
9556 
9557 	/*
9558 	 * Construct Filter Value and Mask.
9559 	 */
9560 	v = m = 0;
9561 	#define SET_FILTER_FIELD(__field, __width) \
9562 	do { \
9563 		if ((__field) >= 0) { \
9564 			const int shift = tp->__field##_shift; \
9565 			\
9566 			v |= (__field) << shift; \
9567 			m |= ((1ULL << (__width)) - 1) << shift; \
9568 		} \
9569 	} while (0)
9570 	SET_FILTER_FIELD(fcoe,      W_FT_FCOE);
9571 	SET_FILTER_FIELD(port,      W_FT_PORT);
9572 	SET_FILTER_FIELD(tos,       W_FT_TOS);
9573 	SET_FILTER_FIELD(protocol,  W_FT_PROTOCOL);
9574 	SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE);
9575 	SET_FILTER_FIELD(macmatch,  W_FT_MACMATCH);
9576 	SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE);
9577 	SET_FILTER_FIELD(frag,      W_FT_FRAGMENTATION);
9578 	#undef SET_FILTER_FIELD
9579 
9580 	/*
9581 	 * We handle VNIC ID and VLANs separately because they're slightly
9582 	 * different than the rest of the fields.  Both require that a
9583 	 * corresponding "valid" bit be set in the Filter Value and Mask.
9584 	 * These bits are in the top bit of the field.  Additionally, we can
9585 	 * select the Priority Code Point and Drop Eligibility Indicator
9586 	 * fields for VLANs as an option.  Remember that the format of a VLAN
9587 	 * Tag is:
9588 	 *
9589 	 * bits: 3  1      12
9590 	 *     +---+-+------------+
9591 	 *     |PCP|D|   VLAN ID  |
9592 	 *     +---+-+------------+
9593 	 */
9594 	if (vnic >= 0) {
9595 		v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift;
9596 		m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift;
9597 	}
9598 	if (vlan >= 0) {
9599 		v |= ((1ULL << (W_FT_VLAN-1)) | vlan)  << tp->vlan_shift;
9600 		m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift;
9601 
9602 		if (vlan_dei >= 0) {
9603 			v |= vlan_dei << (tp->vlan_shift + 12);
9604 			m |= 0x7      << (tp->vlan_shift + 12);
9605 		}
9606 		if (vlan_pcp >= 0) {
9607 			v |= vlan_pcp << (tp->vlan_shift + 13);
9608 			m |= 0x7      << (tp->vlan_shift + 13);
9609 		}
9610 	}
9611 
9612 	/*
9613 	 * Pass back computed Filter Value and Mask; return success.
9614 	 */
9615 	*filter_value = v;
9616 	*filter_mask = m;
9617 	return 0;
9618 }
9619 
9620 int t4_init_rss_mode(struct adapter *adap, int mbox)
9621 {
9622 	int i, ret;
9623 	struct fw_rss_vi_config_cmd rvc;
9624 
9625 	memset(&rvc, 0, sizeof(rvc));
9626 
9627 	for_each_port(adap, i) {
9628 		struct port_info *p = adap2pinfo(adap, i);
9629 		rvc.op_to_viid =
9630 			cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
9631 				    F_FW_CMD_REQUEST | F_FW_CMD_READ |
9632 				    V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
9633 		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9634 		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9635 		if (ret)
9636 			return ret;
9637 		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9638 	}
9639 	return 0;
9640 }
9641 
9642 static int t4_init_portmirror(struct port_info *pi, int mbox,
9643 		       int port, int pf, int vf)
9644 {
9645 	struct adapter *adapter = pi->adapter;
9646 	int ret;
9647 
9648 	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL);
9649 	if (ret < 0)
9650 		return ret;
9651 
9652 	CH_INFO(adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n",
9653 		port, G_FW_VIID_PFN(ret), G_FW_VIID_VIN(ret));
9654 
9655 	pi->viid_mirror = ret;
9656 	return 0;
9657 }
9658 
9659 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf)
9660 {
9661 	int ret, i, j = 0;
9662 
9663 	for_each_port(adap, i) {
9664 		struct port_info *pi = adap2pinfo(adap, i);
9665 
9666 		while ((adap->params.portvec & (1 << j)) == 0)
9667 			j++;
9668 
9669 		ret = t4_init_portmirror(pi, mbox, j, pf, vf);
9670 		if (ret)
9671 			return ret;
9672 		j++;
9673 	}
9674 	return 0;
9675 }
9676 
9677 /**
9678  *	t4_init_portinfo - allocate a virtual interface and initialize port_info
9679  *	@pi: the port_info
9680  *	@mbox: mailbox to use for the FW command
9681  *	@port: physical port associated with the VI
9682  *	@pf: the PF owning the VI
9683  *	@vf: the VF owning the VI
9684  *	@mac: the MAC address of the VI
9685  *
9686  *	Allocates a virtual interface for the given physical port.  If @mac is
9687  *	not %NULL it contains the MAC address of the VI as assigned by FW.
9688  *	@mac should be large enough to hold an Ethernet address.
9689  *	Returns < 0 on error.
9690  */
9691 int t4_init_portinfo(struct port_info *pi, int mbox,
9692 		     int port, int pf, int vf, u8 mac[])
9693 {
9694 	int ret;
9695 	struct fw_port_cmd c;
9696 	unsigned int rss_size;
9697 
9698 	memset(&c, 0, sizeof(c));
9699 	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
9700 				     F_FW_CMD_REQUEST | F_FW_CMD_READ |
9701 				     V_FW_PORT_CMD_PORTID(port));
9702 	c.action_to_len16 = cpu_to_be32(
9703 		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
9704 		FW_LEN16(c));
9705 	ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
9706 	if (ret)
9707 		return ret;
9708 
9709 	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9710 	if (ret < 0)
9711 		return ret;
9712 
9713 	pi->viid = ret;
9714 	pi->tx_chan = port;
9715 	pi->lport = port;
9716 	pi->rss_size = rss_size;
9717 	pi->rx_chan = t4_get_tp_e2c_map(pi->adapter, port);
9718 
9719 	ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
9720 	pi->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
9721 		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
9722 	pi->port_type = G_FW_PORT_CMD_PTYPE(ret);
9723 	pi->mod_type = FW_PORT_MOD_TYPE_NA;
9724 
9725 	init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap),
9726 			 be16_to_cpu(c.u.info.acap));
9727 	return 0;
9728 }
9729 
9730 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9731 {
9732 	u8 addr[6];
9733 	int ret, i, j = 0;
9734 
9735 	for_each_port(adap, i) {
9736 		struct port_info *pi = adap2pinfo(adap, i);
9737 
9738 		while ((adap->params.portvec & (1 << j)) == 0)
9739 			j++;
9740 
9741 		ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9742 		if (ret)
9743 			return ret;
9744 
9745 		t4_os_set_hw_addr(adap, i, addr);
9746 		j++;
9747 	}
9748 	return 0;
9749 }
9750 
9751 /**
9752  *	t4_read_cimq_cfg - read CIM queue configuration
9753  *	@adap: the adapter
9754  *	@base: holds the queue base addresses in bytes
9755  *	@size: holds the queue sizes in bytes
9756  *	@thres: holds the queue full thresholds in bytes
9757  *
9758  *	Returns the current configuration of the CIM queues, starting with
9759  *	the IBQs, then the OBQs.
9760  */
9761 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9762 {
9763 	unsigned int i, v;
9764 	int cim_num_obq = is_t4(adap->params.chip) ?
9765 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9766 
9767 	for (i = 0; i < CIM_NUM_IBQ; i++) {
9768 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
9769 			     V_QUENUMSELECT(i));
9770 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9771 		/* value is in 256-byte units */
9772 		*base++ = G_CIMQBASE(v) * 256;
9773 		*size++ = G_CIMQSIZE(v) * 256;
9774 		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
9775 	}
9776 	for (i = 0; i < cim_num_obq; i++) {
9777 		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9778 			     V_QUENUMSELECT(i));
9779 		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9780 		/* value is in 256-byte units */
9781 		*base++ = G_CIMQBASE(v) * 256;
9782 		*size++ = G_CIMQSIZE(v) * 256;
9783 	}
9784 }
9785 
9786 /**
9787  *	t4_read_cim_ibq - read the contents of a CIM inbound queue
9788  *	@adap: the adapter
9789  *	@qid: the queue index
9790  *	@data: where to store the queue contents
9791  *	@n: capacity of @data in 32-bit words
9792  *
9793  *	Reads the contents of the selected CIM queue starting at address 0 up
9794  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9795  *	error and the number of 32-bit words actually read on success.
9796  */
9797 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9798 {
9799 	int i, err, attempts;
9800 	unsigned int addr;
9801 	const unsigned int nwords = CIM_IBQ_SIZE * 4;
9802 
9803 	if (qid > 5 || (n & 3))
9804 		return -EINVAL;
9805 
9806 	addr = qid * nwords;
9807 	if (n > nwords)
9808 		n = nwords;
9809 
9810 	/* It might take 3-10ms before the IBQ debug read access is allowed.
9811 	 * Wait for 1 Sec with a delay of 1 usec.
9812 	 */
9813 	attempts = 1000000;
9814 
9815 	for (i = 0; i < n; i++, addr++) {
9816 		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
9817 			     F_IBQDBGEN);
9818 		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
9819 				      attempts, 1);
9820 		if (err)
9821 			return err;
9822 		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
9823 	}
9824 	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
9825 	return i;
9826 }
9827 
9828 /**
9829  *	t4_read_cim_obq - read the contents of a CIM outbound queue
9830  *	@adap: the adapter
9831  *	@qid: the queue index
9832  *	@data: where to store the queue contents
9833  *	@n: capacity of @data in 32-bit words
9834  *
9835  *	Reads the contents of the selected CIM queue starting at address 0 up
9836  *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9837  *	error and the number of 32-bit words actually read on success.
9838  */
9839 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9840 {
9841 	int i, err;
9842 	unsigned int addr, v, nwords;
9843 	int cim_num_obq = is_t4(adap->params.chip) ?
9844 				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9845 
9846 	if ((qid > (cim_num_obq - 1)) || (n & 3))
9847 		return -EINVAL;
9848 
9849 	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
9850 		     V_QUENUMSELECT(qid));
9851 	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
9852 
9853 	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
9854 	nwords = G_CIMQSIZE(v) * 64;  /* same */
9855 	if (n > nwords)
9856 		n = nwords;
9857 
9858 	for (i = 0; i < n; i++, addr++) {
9859 		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
9860 			     F_OBQDBGEN);
9861 		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
9862 				      2, 1);
9863 		if (err)
9864 			return err;
9865 		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
9866 	}
9867 	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
9868 	return i;
9869 }
9870 
9871 /**
9872  *	t4_cim_read - read a block from CIM internal address space
9873  *	@adap: the adapter
9874  *	@addr: the start address within the CIM address space
9875  *	@n: number of words to read
9876  *	@valp: where to store the result
9877  *
9878  *	Reads a block of 4-byte words from the CIM intenal address space.
9879  */
9880 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9881 		unsigned int *valp)
9882 {
9883 	int ret = 0;
9884 
9885 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9886 		return -EBUSY;
9887 
9888 	for ( ; !ret && n--; addr += 4) {
9889 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
9890 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9891 				      0, 5, 2);
9892 		if (!ret)
9893 			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
9894 	}
9895 	return ret;
9896 }
9897 
9898 /**
9899  *	t4_cim_write - write a block into CIM internal address space
9900  *	@adap: the adapter
9901  *	@addr: the start address within the CIM address space
9902  *	@n: number of words to write
9903  *	@valp: set of values to write
9904  *
9905  *	Writes a block of 4-byte words into the CIM intenal address space.
9906  */
9907 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9908 		 const unsigned int *valp)
9909 {
9910 	int ret = 0;
9911 
9912 	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
9913 		return -EBUSY;
9914 
9915 	for ( ; !ret && n--; addr += 4) {
9916 		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
9917 		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
9918 		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
9919 				      0, 5, 2);
9920 	}
9921 	return ret;
9922 }
9923 
9924 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9925 			 unsigned int val)
9926 {
9927 	return t4_cim_write(adap, addr, 1, &val);
9928 }
9929 
9930 /**
9931  *	t4_cim_read_la - read CIM LA capture buffer
9932  *	@adap: the adapter
9933  *	@la_buf: where to store the LA data
9934  *	@wrptr: the HW write pointer within the capture buffer
9935  *
9936  *	Reads the contents of the CIM LA buffer with the most recent entry at
9937  *	the end	of the returned data and with the entry at @wrptr first.
9938  *	We try to leave the LA in the running state we find it in.
9939  */
9940 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9941 {
9942 	int i, ret;
9943 	unsigned int cfg, val, idx;
9944 
9945 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
9946 	if (ret)
9947 		return ret;
9948 
9949 	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
9950 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
9951 		if (ret)
9952 			return ret;
9953 	}
9954 
9955 	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9956 	if (ret)
9957 		goto restart;
9958 
9959 	idx = G_UPDBGLAWRPTR(val);
9960 	if (wrptr)
9961 		*wrptr = idx;
9962 
9963 	for (i = 0; i < adap->params.cim_la_size; i++) {
9964 		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9965 				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
9966 		if (ret)
9967 			break;
9968 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
9969 		if (ret)
9970 			break;
9971 		if (val & F_UPDBGLARDEN) {
9972 			ret = -ETIMEDOUT;
9973 			break;
9974 		}
9975 		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
9976 		if (ret)
9977 			break;
9978 
9979 		/* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
9980 		idx = (idx + 1) & M_UPDBGLARDPTR;
9981 		/*
9982 		 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9983 		 * identify the 32-bit portion of the full 312-bit data
9984 		 */
9985 		if (is_t6(adap->params.chip))
9986 			while ((idx & 0xf) > 9)
9987 				idx = (idx + 1) % M_UPDBGLARDPTR;
9988 	}
9989 restart:
9990 	if (cfg & F_UPDBGLAEN) {
9991 		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
9992 				      cfg & ~F_UPDBGLARDEN);
9993 		if (!ret)
9994 			ret = r;
9995 	}
9996 	return ret;
9997 }
9998 
9999 /**
10000  *	t4_tp_read_la - read TP LA capture buffer
10001  *	@adap: the adapter
10002  *	@la_buf: where to store the LA data
10003  *	@wrptr: the HW write pointer within the capture buffer
10004  *
10005  *	Reads the contents of the TP LA buffer with the most recent entry at
10006  *	the end	of the returned data and with the entry at @wrptr first.
10007  *	We leave the LA in the running state we find it in.
10008  */
10009 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10010 {
10011 	bool last_incomplete;
10012 	unsigned int i, cfg, val, idx;
10013 
10014 	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
10015 	if (cfg & F_DBGLAENABLE)			/* freeze LA */
10016 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10017 			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
10018 
10019 	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
10020 	idx = G_DBGLAWPTR(val);
10021 	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
10022 	if (last_incomplete)
10023 		idx = (idx + 1) & M_DBGLARPTR;
10024 	if (wrptr)
10025 		*wrptr = idx;
10026 
10027 	val &= 0xffff;
10028 	val &= ~V_DBGLARPTR(M_DBGLARPTR);
10029 	val |= adap->params.tp.la_mask;
10030 
10031 	for (i = 0; i < TPLA_SIZE; i++) {
10032 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
10033 		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
10034 		idx = (idx + 1) & M_DBGLARPTR;
10035 	}
10036 
10037 	/* Wipe out last entry if it isn't valid */
10038 	if (last_incomplete)
10039 		la_buf[TPLA_SIZE - 1] = ~0ULL;
10040 
10041 	if (cfg & F_DBGLAENABLE)		/* restore running state */
10042 		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
10043 			     cfg | adap->params.tp.la_mask);
10044 }
10045 
10046 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10047  * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
10048  * state for more than the Warning Threshold then we'll issue a warning about
10049  * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
10050  * appears to be hung every Warning Repeat second till the situation clears.
10051  * If the situation clears, we'll note that as well.
10052  */
10053 #define SGE_IDMA_WARN_THRESH 1
10054 #define SGE_IDMA_WARN_REPEAT 300
10055 
10056 /**
10057  *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10058  *	@adapter: the adapter
10059  *	@idma: the adapter IDMA Monitor state
10060  *
10061  *	Initialize the state of an SGE Ingress DMA Monitor.
10062  */
10063 void t4_idma_monitor_init(struct adapter *adapter,
10064 			  struct sge_idma_monitor_state *idma)
10065 {
10066 	/* Initialize the state variables for detecting an SGE Ingress DMA
10067 	 * hang.  The SGE has internal counters which count up on each clock
10068 	 * tick whenever the SGE finds its Ingress DMA State Engines in the
10069 	 * same state they were on the previous clock tick.  The clock used is
10070 	 * the Core Clock so we have a limit on the maximum "time" they can
10071 	 * record; typically a very small number of seconds.  For instance,
10072 	 * with a 600MHz Core Clock, we can only count up to a bit more than
10073 	 * 7s.  So we'll synthesize a larger counter in order to not run the
10074 	 * risk of having the "timers" overflow and give us the flexibility to
10075 	 * maintain a Hung SGE State Machine of our own which operates across
10076 	 * a longer time frame.
10077 	 */
10078 	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10079 	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
10080 }
10081 
10082 /**
10083  *	t4_idma_monitor - monitor SGE Ingress DMA state
10084  *	@adapter: the adapter
10085  *	@idma: the adapter IDMA Monitor state
10086  *	@hz: number of ticks/second
10087  *	@ticks: number of ticks since the last IDMA Monitor call
10088  */
10089 void t4_idma_monitor(struct adapter *adapter,
10090 		     struct sge_idma_monitor_state *idma,
10091 		     int hz, int ticks)
10092 {
10093 	int i, idma_same_state_cnt[2];
10094 
10095 	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
10096 	  * are counters inside the SGE which count up on each clock when the
10097 	  * SGE finds its Ingress DMA State Engines in the same states they
10098 	  * were in the previous clock.  The counters will peg out at
10099 	  * 0xffffffff without wrapping around so once they pass the 1s
10100 	  * threshold they'll stay above that till the IDMA state changes.
10101 	  */
10102 	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
10103 	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
10104 	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10105 
10106 	for (i = 0; i < 2; i++) {
10107 		u32 debug0, debug11;
10108 
10109 		/* If the Ingress DMA Same State Counter ("timer") is less
10110 		 * than 1s, then we can reset our synthesized Stall Timer and
10111 		 * continue.  If we have previously emitted warnings about a
10112 		 * potential stalled Ingress Queue, issue a note indicating
10113 		 * that the Ingress Queue has resumed forward progress.
10114 		 */
10115 		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10116 			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
10117 				CH_WARN(adapter, "SGE idma%d, queue %u, "
10118 					"resumed after %d seconds\n",
10119 					i, idma->idma_qid[i],
10120 					idma->idma_stalled[i]/hz);
10121 			idma->idma_stalled[i] = 0;
10122 			continue;
10123 		}
10124 
10125 		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10126 		 * domain.  The first time we get here it'll be because we
10127 		 * passed the 1s Threshold; each additional time it'll be
10128 		 * because the RX Timer Callback is being fired on its regular
10129 		 * schedule.
10130 		 *
10131 		 * If the stall is below our Potential Hung Ingress Queue
10132 		 * Warning Threshold, continue.
10133 		 */
10134 		if (idma->idma_stalled[i] == 0) {
10135 			idma->idma_stalled[i] = hz;
10136 			idma->idma_warn[i] = 0;
10137 		} else {
10138 			idma->idma_stalled[i] += ticks;
10139 			idma->idma_warn[i] -= ticks;
10140 		}
10141 
10142 		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
10143 			continue;
10144 
10145 		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10146 		 */
10147 		if (idma->idma_warn[i] > 0)
10148 			continue;
10149 		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
10150 
10151 		/* Read and save the SGE IDMA State and Queue ID information.
10152 		 * We do this every time in case it changes across time ...
10153 		 * can't be too careful ...
10154 		 */
10155 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
10156 		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10157 		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10158 
10159 		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
10160 		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
10161 		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10162 
10163 		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
10164 			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10165 			i, idma->idma_qid[i], idma->idma_state[i],
10166 			idma->idma_stalled[i]/hz,
10167 			debug0, debug11);
10168 		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10169 	}
10170 }
10171 
10172 /**
10173  *     t4_set_vf_mac - Set MAC address for the specified VF
10174  *     @adapter: The adapter
10175  *     @vf: one of the VFs instantiated by the specified PF
10176  *     @naddr: the number of MAC addresses
10177  *     @addr: the MAC address(es) to be set to the specified VF
10178  */
10179 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
10180 		      unsigned int naddr, u8 *addr)
10181 {
10182 	struct fw_acl_mac_cmd cmd;
10183 
10184 	memset(&cmd, 0, sizeof(cmd));
10185 	cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
10186 				    F_FW_CMD_REQUEST |
10187 				    F_FW_CMD_WRITE |
10188 				    V_FW_ACL_MAC_CMD_PFN(adapter->pf) |
10189 				    V_FW_ACL_MAC_CMD_VFN(vf));
10190 
10191 	/* Note: Do not enable the ACL */
10192 	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10193 	cmd.nmac = naddr;
10194 
10195 	switch (adapter->pf) {
10196 	case 3:
10197 		memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10198 		break;
10199 	case 2:
10200 		memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10201 		break;
10202 	case 1:
10203 		memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10204 		break;
10205 	case 0:
10206 		memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10207 		break;
10208 	}
10209 
10210 	return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10211 }
10212 
10213 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper
10214  * functions
10215  */
10216 
10217 /**
10218  *	t4_read_pace_tbl - read the pace table
10219  *	@adap: the adapter
10220  *	@pace_vals: holds the returned values
10221  *
10222  *	Returns the values of TP's pace table in microseconds.
10223  */
10224 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10225 {
10226 	unsigned int i, v;
10227 
10228 	for (i = 0; i < NTX_SCHED; i++) {
10229 		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
10230 		v = t4_read_reg(adap, A_TP_PACE_TABLE);
10231 		pace_vals[i] = dack_ticks_to_usec(adap, v);
10232 	}
10233 }
10234 
10235 /**
10236  *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10237  *	@adap: the adapter
10238  *	@sched: the scheduler index
10239  *	@kbps: the byte rate in Kbps
10240  *	@ipg: the interpacket delay in tenths of nanoseconds
10241  * 	@sleep_ok: if true we may sleep while awaiting command completion
10242  *
10243  *	Return the current configuration of a HW Tx scheduler.
10244  */
10245 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
10246 		     unsigned int *ipg, bool sleep_ok)
10247 {
10248 	unsigned int v, addr, bpt, cpt;
10249 
10250 	if (kbps) {
10251 		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
10252 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10253 		if (sched & 1)
10254 			v >>= 16;
10255 		bpt = (v >> 8) & 0xff;
10256 		cpt = v & 0xff;
10257 		if (!cpt)
10258 			*kbps = 0;	/* scheduler disabled */
10259 		else {
10260 			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10261 			*kbps = (v * bpt) / 125;
10262 		}
10263 	}
10264 	if (ipg) {
10265 		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
10266 		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10267 		if (sched & 1)
10268 			v >>= 16;
10269 		v &= 0xffff;
10270 		*ipg = (10000 * v) / core_ticks_per_usec(adap);
10271 	}
10272 }
10273 
10274 /**
10275  *	t4_load_cfg - download config file
10276  *	@adap: the adapter
10277  *	@cfg_data: the cfg text file to write
10278  *	@size: text file size
10279  *
10280  *	Write the supplied config text file to the card's serial flash.
10281  */
10282 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10283 {
10284 	int ret, i, n, cfg_addr;
10285 	unsigned int addr;
10286 	unsigned int flash_cfg_start_sec;
10287 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10288 
10289 	cfg_addr = t4_flash_cfg_addr(adap);
10290 	if (cfg_addr < 0)
10291 		return cfg_addr;
10292 
10293 	addr = cfg_addr;
10294 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
10295 
10296 	if (size > FLASH_CFG_MAX_SIZE) {
10297 		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
10298 		       FLASH_CFG_MAX_SIZE);
10299 		return -EFBIG;
10300 	}
10301 
10302 	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
10303 			 sf_sec_size);
10304 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10305 				     flash_cfg_start_sec + i - 1);
10306 	/*
10307 	 * If size == 0 then we're simply erasing the FLASH sectors associated
10308 	 * with the on-adapter Firmware Configuration File.
10309 	 */
10310 	if (ret || size == 0)
10311 		goto out;
10312 
10313 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
10314 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10315 		if ( (size - i) <  SF_PAGE_SIZE)
10316 			n = size - i;
10317 		else
10318 			n = SF_PAGE_SIZE;
10319 		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
10320 		if (ret)
10321 			goto out;
10322 
10323 		addr += SF_PAGE_SIZE;
10324 		cfg_data += SF_PAGE_SIZE;
10325 	}
10326 
10327 out:
10328 	if (ret)
10329 		CH_ERR(adap, "config file %s failed %d\n",
10330 		       (size == 0 ? "clear" : "download"), ret);
10331 	return ret;
10332 }
10333 
10334 /**
10335  *	t5_fw_init_extern_mem - initialize the external memory
10336  *	@adap: the adapter
10337  *
10338  *	Initializes the external memory on T5.
10339  */
10340 int t5_fw_init_extern_mem(struct adapter *adap)
10341 {
10342 	u32 params[1], val[1];
10343 	int ret;
10344 
10345 	if (!is_t5(adap->params.chip))
10346 		return 0;
10347 
10348 	val[0] = 0xff; /* Initialize all MCs */
10349 	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
10350 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
10351 	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
10352 			FW_CMD_MAX_TIMEOUT);
10353 
10354 	return ret;
10355 }
10356 
10357 /* BIOS boot headers */
10358 typedef struct pci_expansion_rom_header {
10359 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
10360 	u8	reserved[22]; /* Reserved per processor Architecture data */
10361 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
10362 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
10363 
10364 /* Legacy PCI Expansion ROM Header */
10365 typedef struct legacy_pci_expansion_rom_header {
10366 	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
10367 	u8	size512; /* Current Image Size in units of 512 bytes */
10368 	u8	initentry_point[4];
10369 	u8	cksum; /* Checksum computed on the entire Image */
10370 	u8	reserved[16]; /* Reserved */
10371 	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
10372 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
10373 
10374 /* EFI PCI Expansion ROM Header */
10375 typedef struct efi_pci_expansion_rom_header {
10376 	u8	signature[2]; // ROM signature. The value 0xaa55
10377 	u8	initialization_size[2]; /* Units 512. Includes this header */
10378 	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
10379 	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
10380 	u8	efi_machine_type[2]; /* Machine type from EFI image header */
10381 	u8	compression_type[2]; /* Compression type. */
10382 		/*
10383 		 * Compression type definition
10384 		 * 0x0: uncompressed
10385 		 * 0x1: Compressed
10386 		 * 0x2-0xFFFF: Reserved
10387 		 */
10388 	u8	reserved[8]; /* Reserved */
10389 	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
10390 	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
10391 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
10392 
10393 /* PCI Data Structure Format */
10394 typedef struct pcir_data_structure { /* PCI Data Structure */
10395 	u8	signature[4]; /* Signature. The string "PCIR" */
10396 	u8	vendor_id[2]; /* Vendor Identification */
10397 	u8	device_id[2]; /* Device Identification */
10398 	u8	vital_product[2]; /* Pointer to Vital Product Data */
10399 	u8	length[2]; /* PCIR Data Structure Length */
10400 	u8	revision; /* PCIR Data Structure Revision */
10401 	u8	class_code[3]; /* Class Code */
10402 	u8	image_length[2]; /* Image Length. Multiple of 512B */
10403 	u8	code_revision[2]; /* Revision Level of Code/Data */
10404 	u8	code_type; /* Code Type. */
10405 		/*
10406 		 * PCI Expansion ROM Code Types
10407 		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
10408 		 * 0x01: Open Firmware standard for PCI. FCODE
10409 		 * 0x02: Hewlett-Packard PA RISC. HP reserved
10410 		 * 0x03: EFI Image. EFI
10411 		 * 0x04-0xFF: Reserved.
10412 		 */
10413 	u8	indicator; /* Indicator. Identifies the last image in the ROM */
10414 	u8	reserved[2]; /* Reserved */
10415 } pcir_data_t; /* PCI__DATA_STRUCTURE */
10416 
10417 /* BOOT constants */
10418 enum {
10419 	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
10420 	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
10421 	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
10422 	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
10423 	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
10424 	VENDOR_ID = 0x1425, /* Vendor ID */
10425 	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
10426 };
10427 
10428 /*
10429  *	modify_device_id - Modifies the device ID of the Boot BIOS image
10430  *	@adatper: the device ID to write.
10431  *	@boot_data: the boot image to modify.
10432  *
10433  *	Write the supplied device ID to the boot BIOS image.
10434  */
10435 static void modify_device_id(int device_id, u8 *boot_data)
10436 {
10437 	legacy_pci_exp_rom_header_t *header;
10438 	pcir_data_t *pcir_header;
10439 	u32 cur_header = 0;
10440 
10441 	/*
10442 	 * Loop through all chained images and change the device ID's
10443 	 */
10444 	while (1) {
10445 		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
10446 		pcir_header = (pcir_data_t *) &boot_data[cur_header +
10447 			      le16_to_cpu(*(u16*)header->pcir_offset)];
10448 
10449 		/*
10450 		 * Only modify the Device ID if code type is Legacy or HP.
10451 		 * 0x00: Okay to modify
10452 		 * 0x01: FCODE. Do not be modify
10453 		 * 0x03: Okay to modify
10454 		 * 0x04-0xFF: Do not modify
10455 		 */
10456 		if (pcir_header->code_type == 0x00) {
10457 			u8 csum = 0;
10458 			int i;
10459 
10460 			/*
10461 			 * Modify Device ID to match current adatper
10462 			 */
10463 			*(u16*) pcir_header->device_id = device_id;
10464 
10465 			/*
10466 			 * Set checksum temporarily to 0.
10467 			 * We will recalculate it later.
10468 			 */
10469 			header->cksum = 0x0;
10470 
10471 			/*
10472 			 * Calculate and update checksum
10473 			 */
10474 			for (i = 0; i < (header->size512 * 512); i++)
10475 				csum += (u8)boot_data[cur_header + i];
10476 
10477 			/*
10478 			 * Invert summed value to create the checksum
10479 			 * Writing new checksum value directly to the boot data
10480 			 */
10481 			boot_data[cur_header + 7] = -csum;
10482 
10483 		} else if (pcir_header->code_type == 0x03) {
10484 
10485 			/*
10486 			 * Modify Device ID to match current adatper
10487 			 */
10488 			*(u16*) pcir_header->device_id = device_id;
10489 
10490 		}
10491 
10492 
10493 		/*
10494 		 * Check indicator element to identify if this is the last
10495 		 * image in the ROM.
10496 		 */
10497 		if (pcir_header->indicator & 0x80)
10498 			break;
10499 
10500 		/*
10501 		 * Move header pointer up to the next image in the ROM.
10502 		 */
10503 		cur_header += header->size512 * 512;
10504 	}
10505 }
10506 
10507 #ifdef CHELSIO_T4_DIAGS
10508 /*
10509  *	t4_earse_sf - Erase entire serial Flash region
10510  *	@adapter: the adapter
10511  *
10512  *	Clears the entire serial flash region.
10513  */
10514 int t4_erase_sf(struct adapter *adap)
10515 {
10516 	unsigned int nsectors;
10517 	int ret;
10518 
10519 	nsectors = FLASH_END_SEC;
10520 	if (nsectors > adap->params.sf_nsec)
10521 		nsectors = adap->params.sf_nsec;
10522 
10523 	// Erase all sectors of flash before and including the FW.
10524 	// Flash layout is in t4_hw.h.
10525 	ret = t4_flash_erase_sectors(adap, 0, nsectors - 1);
10526 	if (ret)
10527 		CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret);
10528 	return ret;
10529 }
10530 #endif
10531 
10532 /*
10533  *	t4_load_boot - download boot flash
10534  *	@adapter: the adapter
10535  *	@boot_data: the boot image to write
10536  *	@boot_addr: offset in flash to write boot_data
10537  *	@size: image size
10538  *
10539  *	Write the supplied boot image to the card's serial flash.
10540  *	The boot image has the following sections: a 28-byte header and the
10541  *	boot image.
10542  */
10543 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10544 		 unsigned int boot_addr, unsigned int size)
10545 {
10546 	pci_exp_rom_header_t *header;
10547 	int pcir_offset ;
10548 	pcir_data_t *pcir_header;
10549 	int ret, addr;
10550 	uint16_t device_id;
10551 	unsigned int i;
10552 	unsigned int boot_sector = (boot_addr * 1024 );
10553 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10554 
10555 	/*
10556 	 * Make sure the boot image does not encroach on the firmware region
10557 	 */
10558 	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10559 		CH_ERR(adap, "boot image encroaching on firmware region\n");
10560 		return -EFBIG;
10561 	}
10562 
10563 	/*
10564 	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10565 	 * and Boot configuration data sections. These 3 boot sections span
10566 	 * sectors 0 to 7 in flash and live right before the FW image location.
10567 	 */
10568 	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
10569 			sf_sec_size);
10570 	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10571 				     (boot_sector >> 16) + i - 1);
10572 
10573 	/*
10574 	 * If size == 0 then we're simply erasing the FLASH sectors associated
10575 	 * with the on-adapter option ROM file
10576 	 */
10577 	if (ret || (size == 0))
10578 		goto out;
10579 
10580 	/* Get boot header */
10581 	header = (pci_exp_rom_header_t *)boot_data;
10582 	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
10583 	/* PCIR Data Structure */
10584 	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
10585 
10586 	/*
10587 	 * Perform some primitive sanity testing to avoid accidentally
10588 	 * writing garbage over the boot sectors.  We ought to check for
10589 	 * more but it's not worth it for now ...
10590 	 */
10591 	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10592 		CH_ERR(adap, "boot image too small/large\n");
10593 		return -EFBIG;
10594 	}
10595 
10596 #ifndef CHELSIO_T4_DIAGS
10597 	/*
10598 	 * Check BOOT ROM header signature
10599 	 */
10600 	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
10601 		CH_ERR(adap, "Boot image missing signature\n");
10602 		return -EINVAL;
10603 	}
10604 
10605 	/*
10606 	 * Check PCI header signature
10607 	 */
10608 	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
10609 		CH_ERR(adap, "PCI header missing signature\n");
10610 		return -EINVAL;
10611 	}
10612 
10613 	/*
10614 	 * Check Vendor ID matches Chelsio ID
10615 	 */
10616 	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
10617 		CH_ERR(adap, "Vendor ID missing signature\n");
10618 		return -EINVAL;
10619 	}
10620 #endif
10621 
10622 	/*
10623 	 * Retrieve adapter's device ID
10624 	 */
10625 	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
10626 	/* Want to deal with PF 0 so I strip off PF 4 indicator */
10627 	device_id = device_id & 0xf0ff;
10628 
10629 	/*
10630 	 * Check PCIE Device ID
10631 	 */
10632 	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
10633 		/*
10634 		 * Change the device ID in the Boot BIOS image to match
10635 		 * the Device ID of the current adapter.
10636 		 */
10637 		modify_device_id(device_id, boot_data);
10638 	}
10639 
10640 	/*
10641 	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10642 	 * we finish copying the rest of the boot image. This will ensure
10643 	 * that the BIOS boot header will only be written if the boot image
10644 	 * was written in full.
10645 	 */
10646 	addr = boot_sector;
10647 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10648 		addr += SF_PAGE_SIZE;
10649 		boot_data += SF_PAGE_SIZE;
10650 		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
10651 		if (ret)
10652 			goto out;
10653 	}
10654 
10655 	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10656 			     (const u8 *)header, 0);
10657 
10658 out:
10659 	if (ret)
10660 		CH_ERR(adap, "boot image download failed, error %d\n", ret);
10661 	return ret;
10662 }
10663 
10664 /*
10665  *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
10666  *	@adapter: the adapter
10667  *
10668  *	Return the address within the flash where the OptionROM Configuration
10669  *	is stored, or an error if the device FLASH is too small to contain
10670  *	a OptionROM Configuration.
10671  */
10672 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10673 {
10674 	/*
10675 	 * If the device FLASH isn't large enough to hold a Firmware
10676 	 * Configuration File, return an error.
10677 	 */
10678 	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10679 		return -ENOSPC;
10680 
10681 	return FLASH_BOOTCFG_START;
10682 }
10683 
10684 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
10685 {
10686 	int ret, i, n, cfg_addr;
10687 	unsigned int addr;
10688 	unsigned int flash_cfg_start_sec;
10689 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10690 
10691 	cfg_addr = t4_flash_bootcfg_addr(adap);
10692 	if (cfg_addr < 0)
10693 		return cfg_addr;
10694 
10695 	addr = cfg_addr;
10696 	flash_cfg_start_sec = addr / SF_SEC_SIZE;
10697 
10698 	if (size > FLASH_BOOTCFG_MAX_SIZE) {
10699 		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
10700 			FLASH_BOOTCFG_MAX_SIZE);
10701 		return -EFBIG;
10702 	}
10703 
10704 	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
10705 			 sf_sec_size);
10706 	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10707 					flash_cfg_start_sec + i - 1);
10708 
10709 	/*
10710 	 * If size == 0 then we're simply erasing the FLASH sectors associated
10711 	 * with the on-adapter OptionROM Configuration File.
10712 	 */
10713 	if (ret || size == 0)
10714 		goto out;
10715 
10716 	/* this will write to the flash up to SF_PAGE_SIZE at a time */
10717 	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
10718 		if ( (size - i) <  SF_PAGE_SIZE)
10719 			n = size - i;
10720 		else
10721 			n = SF_PAGE_SIZE;
10722 		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
10723 		if (ret)
10724 			goto out;
10725 
10726 		addr += SF_PAGE_SIZE;
10727 		cfg_data += SF_PAGE_SIZE;
10728 	}
10729 
10730 out:
10731 	if (ret)
10732 		CH_ERR(adap, "boot config data %s failed %d\n",
10733 				(size == 0 ? "clear" : "download"), ret);
10734 	return ret;
10735 }
10736 
10737 /**
10738  *	t4_set_filter_mode - configure the optional components of filter tuples
10739  *	@adap: the adapter
10740  *	@mode_map: a bitmap selcting which optional filter components to enable
10741  * 	@sleep_ok: if true we may sleep while awaiting command completion
10742  *
10743  *	Sets the filter mode by selecting the optional components to enable
10744  *	in filter tuples.  Returns 0 on success and a negative error if the
10745  *	requested mode needs more bits than are available for optional
10746  *	components.
10747  */
10748 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
10749 		       bool sleep_ok)
10750 {
10751 	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
10752 
10753 	int i, nbits = 0;
10754 
10755 	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
10756 		if (mode_map & (1 << i))
10757 			nbits += width[i];
10758 	if (nbits > FILTER_OPT_LEN)
10759 		return -EINVAL;
10760 
10761 	t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
10762 
10763 	return 0;
10764 }
10765 
10766 /**
10767  *	t4_clr_port_stats - clear port statistics
10768  *	@adap: the adapter
10769  *	@idx: the port index
10770  *
10771  *	Clear HW statistics for the given port.
10772  */
10773 void t4_clr_port_stats(struct adapter *adap, int idx)
10774 {
10775 	unsigned int i;
10776 	u32 bgmap = t4_get_mps_bg_map(adap, idx);
10777 	u32 port_base_addr;
10778 
10779 	if (is_t4(adap->params.chip))
10780 		port_base_addr = PORT_BASE(idx);
10781 	else
10782 		port_base_addr = T5_PORT_BASE(idx);
10783 
10784 	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
10785 			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
10786 		t4_write_reg(adap, port_base_addr + i, 0);
10787 	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
10788 			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
10789 		t4_write_reg(adap, port_base_addr + i, 0);
10790 	for (i = 0; i < 4; i++)
10791 		if (bgmap & (1 << i)) {
10792 			t4_write_reg(adap,
10793 			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
10794 			t4_write_reg(adap,
10795 			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
10796 		}
10797 }
10798 
10799 /**
10800  *	t4_i2c_rd - read I2C data from adapter
10801  *	@adap: the adapter
10802  *	@port: Port number if per-port device; <0 if not
10803  *	@devid: per-port device ID or absolute device ID
10804  *	@offset: byte offset into device I2C space
10805  *	@len: byte length of I2C space data
10806  *	@buf: buffer in which to return I2C data
10807  *
10808  *	Reads the I2C data from the indicated device and location.
10809  */
10810 int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
10811 	      int port, unsigned int devid,
10812 	      unsigned int offset, unsigned int len,
10813 	      u8 *buf)
10814 {
10815 	u32 ldst_addrspace;
10816 	struct fw_ldst_cmd ldst;
10817 	int ret;
10818 
10819 	if (port >= 4 ||
10820 	    devid >= 256 ||
10821 	    offset >= 256 ||
10822 	    len > sizeof ldst.u.i2c.data)
10823 		return -EINVAL;
10824 
10825 	memset(&ldst, 0, sizeof ldst);
10826 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10827 	ldst.op_to_addrspace =
10828 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10829 			    F_FW_CMD_REQUEST |
10830 			    F_FW_CMD_READ |
10831 			    ldst_addrspace);
10832 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10833 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10834 	ldst.u.i2c.did = devid;
10835 	ldst.u.i2c.boffset = offset;
10836 	ldst.u.i2c.blen = len;
10837 	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10838 	if (!ret)
10839 		memcpy(buf, ldst.u.i2c.data, len);
10840 	return ret;
10841 }
10842 
10843 /**
10844  *	t4_i2c_wr - write I2C data to adapter
10845  *	@adap: the adapter
10846  *	@port: Port number if per-port device; <0 if not
10847  *	@devid: per-port device ID or absolute device ID
10848  *	@offset: byte offset into device I2C space
10849  *	@len: byte length of I2C space data
10850  *	@buf: buffer containing new I2C data
10851  *
10852  *	Write the I2C data to the indicated device and location.
10853  */
10854 int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
10855 	      int port, unsigned int devid,
10856 	      unsigned int offset, unsigned int len,
10857 	      u8 *buf)
10858 {
10859 	u32 ldst_addrspace;
10860 	struct fw_ldst_cmd ldst;
10861 
10862 	if (port >= 4 ||
10863 	    devid >= 256 ||
10864 	    offset >= 256 ||
10865 	    len > sizeof ldst.u.i2c.data)
10866 		return -EINVAL;
10867 
10868 	memset(&ldst, 0, sizeof ldst);
10869 	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
10870 	ldst.op_to_addrspace =
10871 		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10872 			    F_FW_CMD_REQUEST |
10873 			    F_FW_CMD_WRITE |
10874 			    ldst_addrspace);
10875 	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
10876 	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
10877 	ldst.u.i2c.did = devid;
10878 	ldst.u.i2c.boffset = offset;
10879 	ldst.u.i2c.blen = len;
10880 	memcpy(ldst.u.i2c.data, buf, len);
10881 	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
10882 }
10883 
10884 /**
10885  * 	t4_sge_ctxt_rd - read an SGE context through FW
10886  * 	@adap: the adapter
10887  * 	@mbox: mailbox to use for the FW command
10888  * 	@cid: the context id
10889  * 	@ctype: the context type
10890  * 	@data: where to store the context data
10891  *
10892  * 	Issues a FW command through the given mailbox to read an SGE context.
10893  */
10894 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10895 		   enum ctxt_type ctype, u32 *data)
10896 {
10897 	int ret;
10898 	struct fw_ldst_cmd c;
10899 
10900 	if (ctype == CTXT_EGRESS)
10901 		ret = FW_LDST_ADDRSPC_SGE_EGRC;
10902 	else if (ctype == CTXT_INGRESS)
10903 		ret = FW_LDST_ADDRSPC_SGE_INGC;
10904 	else if (ctype == CTXT_FLM)
10905 		ret = FW_LDST_ADDRSPC_SGE_FLMC;
10906 	else
10907 		ret = FW_LDST_ADDRSPC_SGE_CONMC;
10908 
10909 	memset(&c, 0, sizeof(c));
10910 	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
10911 					F_FW_CMD_REQUEST | F_FW_CMD_READ |
10912 					V_FW_LDST_CMD_ADDRSPACE(ret));
10913 	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10914 	c.u.idctxt.physid = cpu_to_be32(cid);
10915 
10916 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10917 	if (ret == 0) {
10918 		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10919 		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10920 		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10921 		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10922 		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10923 		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10924 	}
10925 	return ret;
10926 }
10927 
10928 /**
10929  * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10930  * 	@adap: the adapter
10931  * 	@cid: the context id
10932  * 	@ctype: the context type
10933  * 	@data: where to store the context data
10934  *
10935  * 	Reads an SGE context directly, bypassing FW.  This is only for
10936  * 	debugging when FW is unavailable.
10937  */
10938 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
10939 		      u32 *data)
10940 {
10941 	int i, ret;
10942 
10943 	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
10944 	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
10945 	if (!ret)
10946 		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
10947 			*data++ = t4_read_reg(adap, i);
10948 	return ret;
10949 }
10950 
10951 int t4_sched_config(struct adapter *adapter, int type, int minmaxen)
10952 {
10953 	struct fw_sched_cmd cmd;
10954 
10955 	memset(&cmd, 0, sizeof(cmd));
10956 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10957 				      F_FW_CMD_REQUEST |
10958 				      F_FW_CMD_WRITE);
10959 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10960 
10961 	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
10962 	cmd.u.config.type = type;
10963 	cmd.u.config.minmaxen = minmaxen;
10964 
10965 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10966 			       NULL, 1);
10967 }
10968 
10969 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
10970 		    int rateunit, int ratemode, int channel, int class,
10971 		    int minrate, int maxrate, int weight, int pktsize)
10972 {
10973 	struct fw_sched_cmd cmd;
10974 
10975 	memset(&cmd, 0, sizeof(cmd));
10976 	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
10977 				      F_FW_CMD_REQUEST |
10978 				      F_FW_CMD_WRITE);
10979 	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10980 
10981 	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10982 	cmd.u.params.type = type;
10983 	cmd.u.params.level = level;
10984 	cmd.u.params.mode = mode;
10985 	cmd.u.params.ch = channel;
10986 	cmd.u.params.cl = class;
10987 	cmd.u.params.unit = rateunit;
10988 	cmd.u.params.rate = ratemode;
10989 	cmd.u.params.min = cpu_to_be32(minrate);
10990 	cmd.u.params.max = cpu_to_be32(maxrate);
10991 	cmd.u.params.weight = cpu_to_be16(weight);
10992 	cmd.u.params.pktsize = cpu_to_be16(pktsize);
10993 
10994 	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
10995 			       NULL, 1);
10996 }
10997 
10998 /*
10999  *	t4_config_watchdog - configure (enable/disable) a watchdog timer
11000  *	@adapter: the adapter
11001  * 	@mbox: mailbox to use for the FW command
11002  * 	@pf: the PF owning the queue
11003  * 	@vf: the VF owning the queue
11004  *	@timeout: watchdog timeout in ms
11005  *	@action: watchdog timer / action
11006  *
11007  *	There are separate watchdog timers for each possible watchdog
11008  *	action.  Configure one of the watchdog timers by setting a non-zero
11009  *	timeout.  Disable a watchdog timer by using a timeout of zero.
11010  */
11011 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
11012 		       unsigned int pf, unsigned int vf,
11013 		       unsigned int timeout, unsigned int action)
11014 {
11015 	struct fw_watchdog_cmd wdog;
11016 	unsigned int ticks;
11017 
11018 	/*
11019 	 * The watchdog command expects a timeout in units of 10ms so we need
11020 	 * to convert it here (via rounding) and force a minimum of one 10ms
11021 	 * "tick" if the timeout is non-zero but the convertion results in 0
11022 	 * ticks.
11023 	 */
11024 	ticks = (timeout + 5)/10;
11025 	if (timeout && !ticks)
11026 		ticks = 1;
11027 
11028 	memset(&wdog, 0, sizeof wdog);
11029 	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
11030 				     F_FW_CMD_REQUEST |
11031 				     F_FW_CMD_WRITE |
11032 				     V_FW_PARAMS_CMD_PFN(pf) |
11033 				     V_FW_PARAMS_CMD_VFN(vf));
11034 	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
11035 	wdog.timeout = cpu_to_be32(ticks);
11036 	wdog.action = cpu_to_be32(action);
11037 
11038 	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
11039 }
11040 
11041 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
11042 {
11043 	struct fw_devlog_cmd devlog_cmd;
11044 	int ret;
11045 
11046 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11047 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11048 					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
11049 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11050 	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11051 			 sizeof(devlog_cmd), &devlog_cmd);
11052 	if (ret)
11053 		return ret;
11054 
11055 	*level = devlog_cmd.level;
11056 	return 0;
11057 }
11058 
11059 int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
11060 {
11061 	struct fw_devlog_cmd devlog_cmd;
11062 
11063 	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
11064 	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
11065 					     F_FW_CMD_REQUEST |
11066 					     F_FW_CMD_WRITE);
11067 	devlog_cmd.level = level;
11068 	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
11069 	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
11070 			  sizeof(devlog_cmd), &devlog_cmd);
11071 }
11072 
11073