1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * This file is part of the Chelsio T4/T5/T6 Ethernet driver. 14 * 15 * Copyright (C) 2003-2019 Chelsio Communications. All rights reserved. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this 20 * release for licensing terms and conditions. 21 */ 22 23 /* 24 * Copyright 2020 RackTop Systems, Inc. 25 */ 26 27 #include "common.h" 28 #include "t4_regs.h" 29 #include "t4_regs_values.h" 30 #include "t4fw_interface.h" 31 32 /** 33 * t4_wait_op_done_val - wait until an operation is completed 34 * @adapter: the adapter performing the operation 35 * @reg: the register to check for completion 36 * @mask: a single-bit field within @reg that indicates completion 37 * @polarity: the value of the field when the operation is completed 38 * @attempts: number of check iterations 39 * @delay: delay in usecs between iterations 40 * @valp: where to store the value of the register at completion time 41 * 42 * Wait until an operation is completed by checking a bit in a register 43 * up to @attempts times. If @valp is not NULL the value of the register 44 * at the time it indicated completion is stored there. Returns 0 if the 45 * operation completes and -EAGAIN otherwise. 46 */ 47 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 48 int polarity, int attempts, int delay, u32 *valp) 49 { 50 while (1) { 51 u32 val = t4_read_reg(adapter, reg); 52 53 if (!!(val & mask) == polarity) { 54 if (valp) 55 *valp = val; 56 return 0; 57 } 58 if (--attempts == 0) 59 return -EAGAIN; 60 if (delay) 61 udelay(delay); 62 } 63 } 64 65 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 66 int polarity, int attempts, int delay) 67 { 68 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 69 delay, NULL); 70 } 71 72 /** 73 * t4_set_reg_field - set a register field to a value 74 * @adapter: the adapter to program 75 * @addr: the register address 76 * @mask: specifies the portion of the register to modify 77 * @val: the new value for the register field 78 * 79 * Sets a register field specified by the supplied mask to the 80 * given value. 81 */ 82 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 83 u32 val) 84 { 85 u32 v = t4_read_reg(adapter, addr) & ~mask; 86 87 t4_write_reg(adapter, addr, v | val); 88 (void) t4_read_reg(adapter, addr); /* flush */ 89 } 90 91 /** 92 * t4_read_indirect - read indirectly addressed registers 93 * @adap: the adapter 94 * @addr_reg: register holding the indirect address 95 * @data_reg: register holding the value of the indirect register 96 * @vals: where the read register values are stored 97 * @nregs: how many indirect registers to read 98 * @start_idx: index of first indirect register to read 99 * 100 * Reads registers that are accessed indirectly through an address/data 101 * register pair. 102 */ 103 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 104 unsigned int data_reg, u32 *vals, 105 unsigned int nregs, unsigned int start_idx) 106 { 107 while (nregs--) { 108 t4_write_reg(adap, addr_reg, start_idx); 109 *vals++ = t4_read_reg(adap, data_reg); 110 start_idx++; 111 } 112 } 113 114 /** 115 * t4_write_indirect - write indirectly addressed registers 116 * @adap: the adapter 117 * @addr_reg: register holding the indirect addresses 118 * @data_reg: register holding the value for the indirect registers 119 * @vals: values to write 120 * @nregs: how many indirect registers to write 121 * @start_idx: address of first indirect register to write 122 * 123 * Writes a sequential block of registers that are accessed indirectly 124 * through an address/data register pair. 125 */ 126 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 127 unsigned int data_reg, const u32 *vals, 128 unsigned int nregs, unsigned int start_idx) 129 { 130 while (nregs--) { 131 t4_write_reg(adap, addr_reg, start_idx++); 132 t4_write_reg(adap, data_reg, *vals++); 133 } 134 } 135 136 /* 137 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 138 * mechanism. This guarantees that we get the real value even if we're 139 * operating within a Virtual Machine and the Hypervisor is trapping our 140 * Configuration Space accesses. 141 * 142 * N.B. This routine should only be used as a last resort: the firmware uses 143 * the backdoor registers on a regular basis and we can end up 144 * conflicting with it's uses! 145 */ 146 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) 147 { 148 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 149 150 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 151 req |= F_ENABLE; 152 else 153 req |= F_T6_ENABLE; 154 155 if (is_t4(adap->params.chip)) 156 req |= F_LOCALCFG; 157 158 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 159 *val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 160 161 /* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 162 * Configuration Space read. (None of the other fields matter when 163 * F_ENABLE is 0 so a simple register write is easier than a 164 * read-modify-write via t4_set_reg_field().) 165 */ 166 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 167 } 168 169 /* 170 * t4_report_fw_error - report firmware error 171 * @adap: the adapter 172 * 173 * The adapter firmware can indicate error conditions to the host. 174 * If the firmware has indicated an error, print out the reason for 175 * the firmware error. 176 */ 177 static void t4_report_fw_error(struct adapter *adap) 178 { 179 static const char *const reason[] = { 180 "Crash", /* PCIE_FW_EVAL_CRASH */ 181 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 182 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 183 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 184 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 185 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 186 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 187 "Reserved", /* reserved */ 188 }; 189 u32 pcie_fw; 190 191 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 192 if (pcie_fw & F_PCIE_FW_ERR) { 193 CH_ERR(adap, "Firmware reports adapter error: %s\n", 194 reason[G_PCIE_FW_EVAL(pcie_fw)]); 195 adap->flags &= ~FW_OK; 196 } 197 } 198 199 /* 200 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 201 */ 202 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 203 u32 mbox_addr) 204 { 205 for ( ; nflit; nflit--, mbox_addr += 8) 206 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 207 } 208 209 /* 210 * Handle a FW assertion reported in a mailbox. 211 */ 212 static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 213 { 214 CH_ALERT(adap, 215 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 216 asrt->u.assert.filename_0_7, 217 be32_to_cpu(asrt->u.assert.line), 218 be32_to_cpu(asrt->u.assert.x), 219 be32_to_cpu(asrt->u.assert.y)); 220 } 221 222 #define X_CIM_PF_NOACCESS 0xeeeeeeee 223 224 /* 225 * If the OS Driver wants busy waits to keep a watchdog happy, tap it during 226 * busy loops which don't sleep. 227 */ 228 #ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG 229 #define T4_OS_TOUCH_NMI_WATCHDOG() t4_os_touch_nmi_watchdog() 230 #else 231 #define T4_OS_TOUCH_NMI_WATCHDOG() 232 #endif 233 234 #ifdef T4_OS_LOG_MBOX_CMDS 235 /** 236 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log 237 * @adapter: the adapter 238 * @cmd: the Firmware Mailbox Command or Reply 239 * @size: command length in bytes 240 * @access: the time (ms) needed to access the Firmware Mailbox 241 * @execute: the time (ms) the command spent being executed 242 */ 243 static void t4_record_mbox(struct adapter *adapter, 244 const __be64 *cmd, unsigned int size, 245 int access, int execute) 246 { 247 struct mbox_cmd_log *log = adapter->mbox_log; 248 struct mbox_cmd *entry; 249 int i; 250 251 entry = mbox_cmd_log_entry(log, log->cursor++); 252 if (log->cursor == log->size) 253 log->cursor = 0; 254 255 for (i = 0; i < size/8; i++) 256 entry->cmd[i] = be64_to_cpu(cmd[i]); 257 while (i < MBOX_LEN/8) 258 entry->cmd[i++] = 0; 259 entry->timestamp = t4_os_timestamp(); 260 entry->seqno = log->seqno++; 261 entry->access = access; 262 entry->execute = execute; 263 } 264 265 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \ 266 t4_record_mbox(__adapter, __cmd, __size, __access, __execute) 267 268 #else /* !T4_OS_LOG_MBOX_CMDS */ 269 270 #define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \ 271 /* nothing */ 272 273 #endif /* !T4_OS_LOG_MBOX_CMDS */ 274 275 /** 276 * t4_record_mbox_marker - record a marker in the mailbox log 277 * @adapter: the adapter 278 * @marker: byte array marker 279 * @size: marker size in bytes 280 * 281 * We inject a "fake mailbox command" into the Firmware Mailbox Log 282 * using a known command token and then the bytes of the specified 283 * marker. This lets debugging code inject markers into the log to 284 * help identify which commands are in response to higher level code. 285 */ 286 void t4_record_mbox_marker(struct adapter *adapter, 287 const void *marker, unsigned int size) 288 { 289 #ifdef T4_OS_LOG_MBOX_CMDS 290 __be64 marker_cmd[MBOX_LEN/8]; 291 const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64); 292 unsigned int marker_cmd_size; 293 294 if (size > max_marker) 295 size = max_marker; 296 297 marker_cmd[0] = cpu_to_be64(~0LLU); 298 memcpy(&marker_cmd[1], marker, size); 299 memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size); 300 marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64)); 301 302 t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0); 303 #endif /* T4_OS_LOG_MBOX_CMDS */ 304 } 305 306 /* 307 * Delay time in microseconds to wait for mailbox access/fw reply 308 * to mailbox command 309 */ 310 #define MIN_MBOX_CMD_DELAY 900 311 #define MBOX_CMD_DELAY 1000 312 313 /** 314 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 315 * @adap: the adapter 316 * @mbox: index of the mailbox to use 317 * @cmd: the command to write 318 * @size: command length in bytes 319 * @rpl: where to optionally store the reply 320 * @sleep_ok: if true we may sleep while awaiting command completion 321 * @timeout: time to wait for command to finish before timing out 322 * (negative implies @sleep_ok=false) 323 * 324 * Sends the given command to FW through the selected mailbox and waits 325 * for the FW to execute the command. If @rpl is not %NULL it is used to 326 * store the FW's reply to the command. The command and its optional 327 * reply are of the same length. Some FW commands like RESET and 328 * INITIALIZE can take a considerable amount of time to execute. 329 * @sleep_ok determines whether we may sleep while awaiting the response. 330 * If sleeping is allowed we use progressive backoff otherwise we spin. 331 * Note that passing in a negative @timeout is an alternate mechanism 332 * for specifying @sleep_ok=false. This is useful when a higher level 333 * interface allows for specification of @timeout but not @sleep_ok ... 334 * 335 * The return value is 0 on success or a negative errno on failure. A 336 * failure can happen either because we are not able to execute the 337 * command or FW executes it but signals an error. In the latter case 338 * the return value is the error code indicated by FW (negated). 339 */ 340 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 341 int size, void *rpl, bool sleep_ok, int timeout) 342 { 343 #ifdef T4_OS_LOG_MBOX_CMDS 344 u16 access = 0; 345 #endif /* T4_OS_LOG_MBOX_CMDS */ 346 u32 v; 347 u64 res; 348 int i, ret; 349 const __be64 *p = cmd; 350 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 351 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 352 u32 ctl; 353 __be64 cmd_rpl[MBOX_LEN/8]; 354 struct t4_mbox_list entry; 355 u32 pcie_fw; 356 357 if ((size & 15) || size > MBOX_LEN) 358 return -EINVAL; 359 360 /* 361 * If we have a negative timeout, that implies that we can't sleep. 362 */ 363 if (timeout < 0) { 364 sleep_ok = false; 365 timeout = -timeout; 366 } 367 368 /* 369 * Queue ourselves onto the mailbox access list. When our entry is at 370 * the front of the list, we have rights to access the mailbox. So we 371 * wait [for a while] till we're at the front [or bail out with an 372 * EBUSY] ... 373 */ 374 t4_mbox_list_add(adap, &entry); 375 376 for (i = 0; ; i++) { 377 /* 378 * If we've waited too long, return a busy indication. This 379 * really ought to be based on our initial position in the 380 * mailbox access list but this is a start. We very rarely 381 * contend on access to the mailbox ... Also check for a 382 * firmware error which we'll report as a device error. 383 */ 384 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 385 if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) { 386 t4_mbox_list_del(adap, &entry); 387 t4_report_fw_error(adap); 388 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; 389 T4_RECORD_MBOX(adap, cmd, size, ret, 0); 390 return ret; 391 } 392 393 /* 394 * If we're at the head, break out and start the mailbox 395 * protocol. 396 */ 397 if (t4_mbox_list_first_entry(adap) == &entry) 398 break; 399 400 /* 401 * Delay for a bit before checking again ... 402 */ 403 if (sleep_ok) { 404 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY); 405 } else { 406 T4_OS_TOUCH_NMI_WATCHDOG(); 407 udelay(MBOX_CMD_DELAY); 408 } 409 } 410 #ifdef T4_OS_LOG_MBOX_CMDS 411 access = i; 412 #endif /* T4_OS_LOG_MBOX_CMDS */ 413 414 /* 415 * Attempt to gain access to the mailbox. 416 */ 417 for (i = 0; i < 4; i++) { 418 ctl = t4_read_reg(adap, ctl_reg); 419 v = G_MBOWNER(ctl); 420 if (v != X_MBOWNER_NONE) 421 break; 422 } 423 424 /* 425 * If we were unable to gain access, dequeue ourselves from the 426 * mailbox atomic access list and report the error to our caller. 427 */ 428 if (v != X_MBOWNER_PL) { 429 t4_mbox_list_del(adap, &entry); 430 t4_report_fw_error(adap); 431 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 432 T4_RECORD_MBOX(adap, cmd, size, access, ret); 433 return ret; 434 } 435 436 /* 437 * If we gain ownership of the mailbox and there's a "valid" message 438 * in it, this is likely an asynchronous error message from the 439 * firmware. So we'll report that and then proceed on with attempting 440 * to issue our own command ... which may well fail if the error 441 * presaged the firmware crashing ... 442 */ 443 if (ctl & F_MBMSGVALID) { 444 CH_ERR(adap, "found VALID command in mbox %u: " 445 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 446 (unsigned long long)t4_read_reg64(adap, data_reg), 447 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 448 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 449 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 450 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 451 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 452 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 453 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 454 } 455 456 /* 457 * Copy in the new mailbox command and send it on its way ... 458 */ 459 T4_RECORD_MBOX(adap, cmd, size, access, 0); 460 for (i = 0; i < size; i += 8, p++) 461 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 462 463 /* 464 * XXX It's not clear that we need this anymore now 465 * XXX that we have mailbox logging ... 466 */ 467 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 468 469 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 470 (void) t4_read_reg(adap, ctl_reg); /* flush write */ 471 472 /* 473 * Loop waiting for the reply; bail out if we time out or the firmware 474 * reports an error. 475 */ 476 for (i = 0; 477 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) && 478 i < timeout; 479 i++) { 480 if (sleep_ok) { 481 usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY); 482 } else { 483 T4_OS_TOUCH_NMI_WATCHDOG(); 484 udelay(MBOX_CMD_DELAY); 485 } 486 487 v = t4_read_reg(adap, ctl_reg); 488 if (v == X_CIM_PF_NOACCESS) 489 continue; 490 if (G_MBOWNER(v) == X_MBOWNER_PL) { 491 if (!(v & F_MBMSGVALID)) { 492 t4_write_reg(adap, ctl_reg, 493 V_MBOWNER(X_MBOWNER_NONE)); 494 continue; 495 } 496 497 /* 498 * Retrieve the command reply and release the mailbox. 499 */ 500 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg); 501 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 502 t4_mbox_list_del(adap, &entry); 503 504 T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1); 505 506 /* 507 * XXX It's not clear that we need this anymore now 508 * XXX that we have mailbox logging ... 509 */ 510 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 511 CH_MSG(adap, INFO, HW, 512 "command completed in %d ms (%ssleeping)\n", 513 i + 1, sleep_ok ? "" : "non-"); 514 515 res = be64_to_cpu(cmd_rpl[0]); 516 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 517 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 518 res = V_FW_CMD_RETVAL(EIO); 519 } else if (rpl) 520 memcpy(rpl, cmd_rpl, size); 521 return -G_FW_CMD_RETVAL((int)res); 522 } 523 } 524 525 /* 526 * We timed out waiting for a reply to our mailbox command. Report 527 * the error and also check to see if the firmware reported any 528 * errors ... 529 */ 530 t4_mbox_list_del(adap, &entry); 531 532 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 533 T4_RECORD_MBOX(adap, cmd, size, access, ret); 534 CH_ERR(adap, "command 0x%x in mailbox %d timed out\n", 535 *(const u8 *)cmd, mbox); 536 537 t4_report_fw_error(adap); 538 t4_fatal_err(adap); 539 return ret; 540 } 541 542 #ifdef CONFIG_CUDBG 543 /* 544 * The maximum number of times to iterate for FW reply before 545 * issuing a mailbox timeout 546 */ 547 #define FW_REPLY_WAIT_LOOP 6000000 548 549 /** 550 * t4_wr_mbox_meat_timeout_panic - send a command to FW through the given 551 * mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout() 552 * and is only invoked during a kernel crash. Since this function is 553 * called through a atomic notifier chain ,we cannot sleep awaiting a 554 * response from FW, hence repeatedly loop until we get a reply. 555 * 556 * @adap: the adapter 557 * @mbox: index of the mailbox to use 558 * @cmd: the command to write 559 * @size: command length in bytes 560 * @rpl: where to optionally store the reply 561 */ 562 563 static int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox, 564 const void *cmd, int size, void *rpl) 565 { 566 u32 v; 567 u64 res; 568 int i, ret; 569 u64 cnt; 570 const __be64 *p = cmd; 571 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 572 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 573 u32 ctl; 574 __be64 cmd_rpl[MBOX_LEN/8]; 575 u32 pcie_fw; 576 577 if ((size & 15) || size > MBOX_LEN) 578 return -EINVAL; 579 580 /* 581 * Check for a firmware error which we'll report as a 582 * device error. 583 */ 584 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 585 if (pcie_fw & F_PCIE_FW_ERR) { 586 t4_report_fw_error(adap); 587 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; 588 return ret; 589 } 590 591 /* 592 * Attempt to gain access to the mailbox. 593 */ 594 for (i = 0; i < 4; i++) { 595 ctl = t4_read_reg(adap, ctl_reg); 596 v = G_MBOWNER(ctl); 597 if (v != X_MBOWNER_NONE) 598 break; 599 } 600 601 /* 602 * If we were unable to gain access, report the error to our caller. 603 */ 604 if (v != X_MBOWNER_PL) { 605 t4_report_fw_error(adap); 606 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 607 return ret; 608 } 609 610 /* 611 * If we gain ownership of the mailbox and there's a "valid" message 612 * in it, this is likely an asynchronous error message from the 613 * firmware. So we'll report that and then proceed on with attempting 614 * to issue our own command ... which may well fail if the error 615 * presaged the firmware crashing ... 616 */ 617 if (ctl & F_MBMSGVALID) { 618 CH_ERR(adap, "found VALID command in mbox %u: " 619 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 620 (unsigned long long)t4_read_reg64(adap, data_reg), 621 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 622 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 623 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 624 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 625 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 626 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 627 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 628 } 629 630 /* 631 * Copy in the new mailbox command and send it on its way ... 632 */ 633 for (i = 0; i < size; i += 8, p++) 634 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 635 636 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 637 638 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 639 t4_read_reg(adap, ctl_reg); /* flush write */ 640 641 /* 642 * Loop waiting for the reply; bail out if we time out or the firmware 643 * reports an error. 644 */ 645 for (cnt = 0; 646 !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) && 647 cnt < FW_REPLY_WAIT_LOOP; 648 cnt++) { 649 v = t4_read_reg(adap, ctl_reg); 650 if (v == X_CIM_PF_NOACCESS) 651 continue; 652 if (G_MBOWNER(v) == X_MBOWNER_PL) { 653 if (!(v & F_MBMSGVALID)) { 654 t4_write_reg(adap, ctl_reg, 655 V_MBOWNER(X_MBOWNER_NONE)); 656 continue; 657 } 658 659 /* 660 * Retrieve the command reply and release the mailbox. 661 */ 662 get_mbox_rpl(adap, cmd_rpl, size/8, data_reg); 663 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 664 665 CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 666 667 res = be64_to_cpu(cmd_rpl[0]); 668 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 669 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 670 res = V_FW_CMD_RETVAL(EIO); 671 } else if (rpl) 672 memcpy(rpl, cmd_rpl, size); 673 return -G_FW_CMD_RETVAL((int)res); 674 } 675 } 676 677 /* 678 * We timed out waiting for a reply to our mailbox command. Report 679 * the error and also check to see if the firmware reported any 680 * errors ... 681 */ 682 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 683 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 684 *(const u8 *)cmd, mbox); 685 686 t4_report_fw_error(adap); 687 t4_fatal_err(adap); 688 return ret; 689 } 690 #endif 691 692 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 693 void *rpl, bool sleep_ok) 694 { 695 #ifdef CONFIG_CUDBG 696 if (adap->flags & K_CRASH) 697 return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size, 698 rpl); 699 else 700 #endif 701 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 702 sleep_ok, FW_CMD_MAX_TIMEOUT); 703 704 } 705 706 static int t4_edc_err_read(struct adapter *adap, int idx) 707 { 708 u32 edc_ecc_err_addr_reg; 709 u32 edc_bist_status_rdata_reg; 710 711 if (is_t4(adap->params.chip)) { 712 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 713 return 0; 714 } 715 if (idx != MEM_EDC0 && idx != MEM_EDC1) { 716 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 717 return 0; 718 } 719 720 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 721 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 722 723 CH_WARN(adap, 724 "edc%d err addr 0x%x: 0x%x.\n", 725 idx, edc_ecc_err_addr_reg, 726 t4_read_reg(adap, edc_ecc_err_addr_reg)); 727 CH_WARN(adap, 728 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 729 edc_bist_status_rdata_reg, 730 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 731 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 732 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 733 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 734 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 735 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 736 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 737 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 738 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 739 740 return 0; 741 } 742 743 /** 744 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window 745 * @adap: the adapter 746 * @win: PCI-E Memory Window to use 747 * @addr: address within adapter memory 748 * @len: amount of memory to transfer 749 * @hbuf: host memory buffer 750 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 751 * 752 * Reads/writes an [almost] arbitrary memory region in the firmware: the 753 * firmware memory address and host buffer must be aligned on 32-bit 754 * boudaries; the length may be arbitrary. 755 * 756 * NOTES: 757 * 1. The memory is transferred as a raw byte sequence from/to the 758 * firmware's memory. If this memory contains data structures which 759 * contain multi-byte integers, it's the caller's responsibility to 760 * perform appropriate byte order conversions. 761 * 762 * 2. It is the Caller's responsibility to ensure that no other code 763 * uses the specified PCI-E Memory Window while this routine is 764 * using it. This is typically done via the use of OS-specific 765 * locks, etc. 766 */ 767 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr, 768 u32 len, void *hbuf, int dir) 769 { 770 u32 pos, offset, resid; 771 u32 win_pf, mem_reg, mem_aperture, mem_base; 772 u32 *buf; 773 774 /* Argument sanity checks ... 775 */ 776 if (addr & 0x3 || (uintptr_t)hbuf & 0x3) 777 return -EINVAL; 778 buf = (u32 *)hbuf; 779 780 /* It's convenient to be able to handle lengths which aren't a 781 * multiple of 32-bits because we often end up transferring files to 782 * the firmware. So we'll handle that by normalizing the length here 783 * and then handling any residual transfer at the end. 784 */ 785 resid = len & 0x3; 786 len -= resid; 787 788 /* Each PCI-E Memory Window is programmed with a window size -- or 789 * "aperture" -- which controls the granularity of its mapping onto 790 * adapter memory. We need to grab that aperture in order to know 791 * how to use the specified window. The window is also programmed 792 * with the base address of the Memory Window in BAR0's address 793 * space. For T4 this is an absolute PCI-E Bus Address. For T5 794 * the address is relative to BAR0. 795 */ 796 mem_reg = t4_read_reg(adap, 797 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 798 win)); 799 800 /* a dead adapter will return 0xffffffff for PIO reads */ 801 if (mem_reg == 0xffffffff) { 802 CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n", 803 win); 804 return -ENXIO; 805 } 806 807 mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT); 808 mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT; 809 if (is_t4(adap->params.chip)) 810 mem_base -= adap->t4_bar0; 811 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf); 812 813 /* Calculate our initial PCI-E Memory Window Position and Offset into 814 * that Window. 815 */ 816 pos = addr & ~(mem_aperture-1); 817 offset = addr - pos; 818 819 /* Set up initial PCI-E Memory Window to cover the start of our 820 * transfer. (Read it back to ensure that changes propagate before we 821 * attempt to use the new value.) 822 */ 823 t4_write_reg(adap, 824 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win), 825 pos | win_pf); 826 t4_read_reg(adap, 827 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win)); 828 829 /* Transfer data to/from the adapter as long as there's an integral 830 * number of 32-bit transfers to complete. 831 * 832 * A note on Endianness issues: 833 * 834 * The "register" reads and writes below from/to the PCI-E Memory 835 * Window invoke the standard adapter Big-Endian to PCI-E Link 836 * Little-Endian "swizzel." As a result, if we have the following 837 * data in adapter memory: 838 * 839 * Memory: ... | b0 | b1 | b2 | b3 | ... 840 * Address: i+0 i+1 i+2 i+3 841 * 842 * Then a read of the adapter memory via the PCI-E Memory Window 843 * will yield: 844 * 845 * x = readl(i) 846 * 31 0 847 * [ b3 | b2 | b1 | b0 ] 848 * 849 * If this value is stored into local memory on a Little-Endian system 850 * it will show up correctly in local memory as: 851 * 852 * ( ..., b0, b1, b2, b3, ... ) 853 * 854 * But on a Big-Endian system, the store will show up in memory 855 * incorrectly swizzled as: 856 * 857 * ( ..., b3, b2, b1, b0, ... ) 858 * 859 * So we need to account for this in the reads and writes to the 860 * PCI-E Memory Window below by undoing the register read/write 861 * swizzels. 862 */ 863 while (len > 0) { 864 if (dir == T4_MEMORY_READ) 865 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, 866 mem_base + offset)); 867 else 868 t4_write_reg(adap, mem_base + offset, 869 (__force u32)cpu_to_le32(*buf++)); 870 offset += sizeof(__be32); 871 len -= sizeof(__be32); 872 873 /* If we've reached the end of our current window aperture, 874 * move the PCI-E Memory Window on to the next. Note that 875 * doing this here after "len" may be 0 allows us to set up 876 * the PCI-E Memory Window for a possible final residual 877 * transfer below ... 878 */ 879 if (offset == mem_aperture) { 880 pos += mem_aperture; 881 offset = 0; 882 t4_write_reg(adap, 883 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 884 win), pos | win_pf); 885 t4_read_reg(adap, 886 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 887 win)); 888 } 889 } 890 891 /* If the original transfer had a length which wasn't a multiple of 892 * 32-bits, now's where we need to finish off the transfer of the 893 * residual amount. The PCI-E Memory Window has already been moved 894 * above (if necessary) to cover this final transfer. 895 */ 896 if (resid) { 897 union { 898 u32 word; 899 char byte[4]; 900 } last; 901 unsigned char *bp; 902 int i; 903 904 if (dir == T4_MEMORY_READ) { 905 last.word = le32_to_cpu( 906 (__force __le32)t4_read_reg(adap, 907 mem_base + offset)); 908 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 909 bp[i] = last.byte[i]; 910 } else { 911 last.word = *buf; 912 for (i = resid; i < 4; i++) 913 last.byte[i] = 0; 914 t4_write_reg(adap, mem_base + offset, 915 (__force u32)cpu_to_le32(last.word)); 916 } 917 } 918 919 return 0; 920 } 921 922 /** 923 * t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window 924 * @adap: the adapter 925 * @win: PCI-E Memory Window to use 926 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC 927 * @maddr: address within indicated memory type 928 * @len: amount of memory to transfer 929 * @hbuf: host memory buffer 930 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 931 * 932 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine 933 * provides an (memory type, address withing memory type) interface. 934 */ 935 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr, 936 u32 len, void *hbuf, int dir) 937 { 938 u32 mtype_offset; 939 u32 edc_size, mc_size; 940 941 /* Offset into the region of memory which is being accessed 942 * MEM_EDC0 = 0 943 * MEM_EDC1 = 1 944 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller 945 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) 946 * MEM_HMA = 4 947 */ 948 edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR)); 949 if (mtype == MEM_HMA) { 950 mtype_offset = 2 * (edc_size * 1024 * 1024); 951 } else if (mtype != MEM_MC1) 952 mtype_offset = (mtype * (edc_size * 1024 * 1024)); 953 else { 954 mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap, 955 A_MA_EXT_MEMORY0_BAR)); 956 mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 957 } 958 959 return t4_memory_rw_addr(adap, win, 960 mtype_offset + maddr, len, 961 hbuf, dir); 962 } 963 964 /* 965 * Return the specified PCI-E Configuration Space register from our Physical 966 * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 967 * since we prefer to let the firmware own all of these registers, but if that 968 * fails we go for it directly ourselves. 969 */ 970 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 971 { 972 u32 val; 973 974 /* 975 * If fw_attach != 0, construct and send the Firmware LDST Command to 976 * retrieve the specified PCI-E Configuration Space register. 977 */ 978 if (drv_fw_attach != 0) { 979 struct fw_ldst_cmd ldst_cmd; 980 int ret; 981 982 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 983 ldst_cmd.op_to_addrspace = 984 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 985 F_FW_CMD_REQUEST | 986 F_FW_CMD_READ | 987 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 988 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 989 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 990 ldst_cmd.u.pcie.ctrl_to_fn = 991 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 992 ldst_cmd.u.pcie.r = reg; 993 994 /* 995 * If the LDST Command succeeds, return the result, otherwise 996 * fall through to reading it directly ourselves ... 997 */ 998 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 999 &ldst_cmd); 1000 if (ret == 0) 1001 return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 1002 1003 CH_WARN(adap, "Firmware failed to return " 1004 "Configuration Space register %d, err = %d\n", 1005 reg, -ret); 1006 } 1007 1008 /* 1009 * Read the desired Configuration Space register via the PCI-E 1010 * Backdoor mechanism. 1011 */ 1012 t4_hw_pci_read_cfg4(adap, reg, &val); 1013 return val; 1014 } 1015 1016 /* 1017 * Get the window based on base passed to it. 1018 * Window aperture is currently unhandled, but there is no use case for it 1019 * right now 1020 */ 1021 static int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach) 1022 { 1023 if (is_t4(adap->params.chip)) { 1024 u32 bar0; 1025 1026 /* 1027 * Truncation intentional: we only read the bottom 32-bits of 1028 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor 1029 * mechanism to read BAR0 instead of using 1030 * pci_resource_start() because we could be operating from 1031 * within a Virtual Machine which is trapping our accesses to 1032 * our Configuration Space and we need to set up the PCI-E 1033 * Memory Window decoders with the actual addresses which will 1034 * be coming across the PCI-E link. 1035 */ 1036 bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach); 1037 bar0 &= pci_mask; 1038 adap->t4_bar0 = bar0; 1039 1040 return bar0 + memwin_base; 1041 } else { 1042 /* For T5, only relative offset inside the PCIe BAR is passed */ 1043 return memwin_base; 1044 } 1045 } 1046 1047 /* Get the default utility window (win0) used by everyone */ 1048 int t4_get_util_window(struct adapter *adap, int drv_fw_attach) 1049 { 1050 return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach); 1051 } 1052 1053 /* 1054 * Set up memory window for accessing adapter memory ranges. (Read 1055 * back MA register to ensure that changes propagate before we attempt 1056 * to use the new values.) 1057 */ 1058 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window) 1059 { 1060 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window), 1061 memwin_base | V_BIR(0) | 1062 V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT)); 1063 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window)); 1064 } 1065 1066 /** 1067 * t4_get_regs_len - return the size of the chips register set 1068 * @adapter: the adapter 1069 * 1070 * Returns the size of the chip's BAR0 register space. 1071 */ 1072 unsigned int t4_get_regs_len(struct adapter *adapter) 1073 { 1074 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 1075 1076 switch (chip_version) { 1077 case CHELSIO_T4: 1078 return T4_REGMAP_SIZE; 1079 1080 case CHELSIO_T5: 1081 case CHELSIO_T6: 1082 return T5_REGMAP_SIZE; 1083 } 1084 1085 CH_ERR(adapter, 1086 "Unsupported chip version %d\n", chip_version); 1087 return 0; 1088 } 1089 1090 /** 1091 * t4_get_regs - read chip registers into provided buffer 1092 * @adap: the adapter 1093 * @buf: register buffer 1094 * @buf_size: size (in bytes) of register buffer 1095 * 1096 * If the provided register buffer isn't large enough for the chip's 1097 * full register range, the register dump will be truncated to the 1098 * register buffer's size. 1099 */ 1100 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 1101 { 1102 static const unsigned int t4_reg_ranges[] = { 1103 0x1008, 0x1108, 1104 0x1180, 0x1184, 1105 0x1190, 0x1194, 1106 0x11a0, 0x11a4, 1107 0x11b0, 0x11b4, 1108 0x11fc, 0x123c, 1109 0x1300, 0x173c, 1110 0x1800, 0x18fc, 1111 0x3000, 0x30d8, 1112 0x30e0, 0x30e4, 1113 0x30ec, 0x5910, 1114 0x5920, 0x5924, 1115 0x5960, 0x5960, 1116 0x5968, 0x5968, 1117 0x5970, 0x5970, 1118 0x5978, 0x5978, 1119 0x5980, 0x5980, 1120 0x5988, 0x5988, 1121 0x5990, 0x5990, 1122 0x5998, 0x5998, 1123 0x59a0, 0x59d4, 1124 0x5a00, 0x5ae0, 1125 0x5ae8, 0x5ae8, 1126 0x5af0, 0x5af0, 1127 0x5af8, 0x5af8, 1128 0x6000, 0x6098, 1129 0x6100, 0x6150, 1130 0x6200, 0x6208, 1131 0x6240, 0x6248, 1132 0x6280, 0x62b0, 1133 0x62c0, 0x6338, 1134 0x6370, 0x638c, 1135 0x6400, 0x643c, 1136 0x6500, 0x6524, 1137 0x6a00, 0x6a04, 1138 0x6a14, 0x6a38, 1139 0x6a60, 0x6a70, 1140 0x6a78, 0x6a78, 1141 0x6b00, 0x6b0c, 1142 0x6b1c, 0x6b84, 1143 0x6bf0, 0x6bf8, 1144 0x6c00, 0x6c0c, 1145 0x6c1c, 0x6c84, 1146 0x6cf0, 0x6cf8, 1147 0x6d00, 0x6d0c, 1148 0x6d1c, 0x6d84, 1149 0x6df0, 0x6df8, 1150 0x6e00, 0x6e0c, 1151 0x6e1c, 0x6e84, 1152 0x6ef0, 0x6ef8, 1153 0x6f00, 0x6f0c, 1154 0x6f1c, 0x6f84, 1155 0x6ff0, 0x6ff8, 1156 0x7000, 0x700c, 1157 0x701c, 0x7084, 1158 0x70f0, 0x70f8, 1159 0x7100, 0x710c, 1160 0x711c, 0x7184, 1161 0x71f0, 0x71f8, 1162 0x7200, 0x720c, 1163 0x721c, 0x7284, 1164 0x72f0, 0x72f8, 1165 0x7300, 0x730c, 1166 0x731c, 0x7384, 1167 0x73f0, 0x73f8, 1168 0x7400, 0x7450, 1169 0x7500, 0x7530, 1170 0x7600, 0x760c, 1171 0x7614, 0x761c, 1172 0x7680, 0x76cc, 1173 0x7700, 0x7798, 1174 0x77c0, 0x77fc, 1175 0x7900, 0x79fc, 1176 0x7b00, 0x7b58, 1177 0x7b60, 0x7b84, 1178 0x7b8c, 0x7c38, 1179 0x7d00, 0x7d38, 1180 0x7d40, 0x7d80, 1181 0x7d8c, 0x7ddc, 1182 0x7de4, 0x7e04, 1183 0x7e10, 0x7e1c, 1184 0x7e24, 0x7e38, 1185 0x7e40, 0x7e44, 1186 0x7e4c, 0x7e78, 1187 0x7e80, 0x7ea4, 1188 0x7eac, 0x7edc, 1189 0x7ee8, 0x7efc, 1190 0x8dc0, 0x8e04, 1191 0x8e10, 0x8e1c, 1192 0x8e30, 0x8e78, 1193 0x8ea0, 0x8eb8, 1194 0x8ec0, 0x8f6c, 1195 0x8fc0, 0x9008, 1196 0x9010, 0x9058, 1197 0x9060, 0x9060, 1198 0x9068, 0x9074, 1199 0x90fc, 0x90fc, 1200 0x9400, 0x9408, 1201 0x9410, 0x9458, 1202 0x9600, 0x9600, 1203 0x9608, 0x9638, 1204 0x9640, 0x96bc, 1205 0x9800, 0x9808, 1206 0x9820, 0x983c, 1207 0x9850, 0x9864, 1208 0x9c00, 0x9c6c, 1209 0x9c80, 0x9cec, 1210 0x9d00, 0x9d6c, 1211 0x9d80, 0x9dec, 1212 0x9e00, 0x9e6c, 1213 0x9e80, 0x9eec, 1214 0x9f00, 0x9f6c, 1215 0x9f80, 0x9fec, 1216 0xd004, 0xd004, 1217 0xd010, 0xd03c, 1218 0xdfc0, 0xdfe0, 1219 0xe000, 0xea7c, 1220 0xf000, 0x11110, 1221 0x11118, 0x11190, 1222 0x19040, 0x1906c, 1223 0x19078, 0x19080, 1224 0x1908c, 0x190e4, 1225 0x190f0, 0x190f8, 1226 0x19100, 0x19110, 1227 0x19120, 0x19124, 1228 0x19150, 0x19194, 1229 0x1919c, 0x191b0, 1230 0x191d0, 0x191e8, 1231 0x19238, 0x1924c, 1232 0x193f8, 0x1943c, 1233 0x1944c, 0x19474, 1234 0x19490, 0x194e0, 1235 0x194f0, 0x194f8, 1236 0x19800, 0x19c08, 1237 0x19c10, 0x19c90, 1238 0x19ca0, 0x19ce4, 1239 0x19cf0, 0x19d40, 1240 0x19d50, 0x19d94, 1241 0x19da0, 0x19de8, 1242 0x19df0, 0x19e40, 1243 0x19e50, 0x19e90, 1244 0x19ea0, 0x19f4c, 1245 0x1a000, 0x1a004, 1246 0x1a010, 0x1a06c, 1247 0x1a0b0, 0x1a0e4, 1248 0x1a0ec, 0x1a0f4, 1249 0x1a100, 0x1a108, 1250 0x1a114, 0x1a120, 1251 0x1a128, 0x1a130, 1252 0x1a138, 0x1a138, 1253 0x1a190, 0x1a1c4, 1254 0x1a1fc, 0x1a1fc, 1255 0x1e040, 0x1e04c, 1256 0x1e284, 0x1e28c, 1257 0x1e2c0, 0x1e2c0, 1258 0x1e2e0, 0x1e2e0, 1259 0x1e300, 0x1e384, 1260 0x1e3c0, 0x1e3c8, 1261 0x1e440, 0x1e44c, 1262 0x1e684, 0x1e68c, 1263 0x1e6c0, 0x1e6c0, 1264 0x1e6e0, 0x1e6e0, 1265 0x1e700, 0x1e784, 1266 0x1e7c0, 0x1e7c8, 1267 0x1e840, 0x1e84c, 1268 0x1ea84, 0x1ea8c, 1269 0x1eac0, 0x1eac0, 1270 0x1eae0, 0x1eae0, 1271 0x1eb00, 0x1eb84, 1272 0x1ebc0, 0x1ebc8, 1273 0x1ec40, 0x1ec4c, 1274 0x1ee84, 0x1ee8c, 1275 0x1eec0, 0x1eec0, 1276 0x1eee0, 0x1eee0, 1277 0x1ef00, 0x1ef84, 1278 0x1efc0, 0x1efc8, 1279 0x1f040, 0x1f04c, 1280 0x1f284, 0x1f28c, 1281 0x1f2c0, 0x1f2c0, 1282 0x1f2e0, 0x1f2e0, 1283 0x1f300, 0x1f384, 1284 0x1f3c0, 0x1f3c8, 1285 0x1f440, 0x1f44c, 1286 0x1f684, 0x1f68c, 1287 0x1f6c0, 0x1f6c0, 1288 0x1f6e0, 0x1f6e0, 1289 0x1f700, 0x1f784, 1290 0x1f7c0, 0x1f7c8, 1291 0x1f840, 0x1f84c, 1292 0x1fa84, 0x1fa8c, 1293 0x1fac0, 0x1fac0, 1294 0x1fae0, 0x1fae0, 1295 0x1fb00, 0x1fb84, 1296 0x1fbc0, 0x1fbc8, 1297 0x1fc40, 0x1fc4c, 1298 0x1fe84, 0x1fe8c, 1299 0x1fec0, 0x1fec0, 1300 0x1fee0, 0x1fee0, 1301 0x1ff00, 0x1ff84, 1302 0x1ffc0, 0x1ffc8, 1303 0x20000, 0x2002c, 1304 0x20100, 0x2013c, 1305 0x20190, 0x201a0, 1306 0x201a8, 0x201b8, 1307 0x201c4, 0x201c8, 1308 0x20200, 0x20318, 1309 0x20400, 0x204b4, 1310 0x204c0, 0x20528, 1311 0x20540, 0x20614, 1312 0x21000, 0x21040, 1313 0x2104c, 0x21060, 1314 0x210c0, 0x210ec, 1315 0x21200, 0x21268, 1316 0x21270, 0x21284, 1317 0x212fc, 0x21388, 1318 0x21400, 0x21404, 1319 0x21500, 0x21500, 1320 0x21510, 0x21518, 1321 0x2152c, 0x21530, 1322 0x2153c, 0x2153c, 1323 0x21550, 0x21554, 1324 0x21600, 0x21600, 1325 0x21608, 0x2161c, 1326 0x21624, 0x21628, 1327 0x21630, 0x21634, 1328 0x2163c, 0x2163c, 1329 0x21700, 0x2171c, 1330 0x21780, 0x2178c, 1331 0x21800, 0x21818, 1332 0x21820, 0x21828, 1333 0x21830, 0x21848, 1334 0x21850, 0x21854, 1335 0x21860, 0x21868, 1336 0x21870, 0x21870, 1337 0x21878, 0x21898, 1338 0x218a0, 0x218a8, 1339 0x218b0, 0x218c8, 1340 0x218d0, 0x218d4, 1341 0x218e0, 0x218e8, 1342 0x218f0, 0x218f0, 1343 0x218f8, 0x21a18, 1344 0x21a20, 0x21a28, 1345 0x21a30, 0x21a48, 1346 0x21a50, 0x21a54, 1347 0x21a60, 0x21a68, 1348 0x21a70, 0x21a70, 1349 0x21a78, 0x21a98, 1350 0x21aa0, 0x21aa8, 1351 0x21ab0, 0x21ac8, 1352 0x21ad0, 0x21ad4, 1353 0x21ae0, 0x21ae8, 1354 0x21af0, 0x21af0, 1355 0x21af8, 0x21c18, 1356 0x21c20, 0x21c20, 1357 0x21c28, 0x21c30, 1358 0x21c38, 0x21c38, 1359 0x21c80, 0x21c98, 1360 0x21ca0, 0x21ca8, 1361 0x21cb0, 0x21cc8, 1362 0x21cd0, 0x21cd4, 1363 0x21ce0, 0x21ce8, 1364 0x21cf0, 0x21cf0, 1365 0x21cf8, 0x21d7c, 1366 0x21e00, 0x21e04, 1367 0x22000, 0x2202c, 1368 0x22100, 0x2213c, 1369 0x22190, 0x221a0, 1370 0x221a8, 0x221b8, 1371 0x221c4, 0x221c8, 1372 0x22200, 0x22318, 1373 0x22400, 0x224b4, 1374 0x224c0, 0x22528, 1375 0x22540, 0x22614, 1376 0x23000, 0x23040, 1377 0x2304c, 0x23060, 1378 0x230c0, 0x230ec, 1379 0x23200, 0x23268, 1380 0x23270, 0x23284, 1381 0x232fc, 0x23388, 1382 0x23400, 0x23404, 1383 0x23500, 0x23500, 1384 0x23510, 0x23518, 1385 0x2352c, 0x23530, 1386 0x2353c, 0x2353c, 1387 0x23550, 0x23554, 1388 0x23600, 0x23600, 1389 0x23608, 0x2361c, 1390 0x23624, 0x23628, 1391 0x23630, 0x23634, 1392 0x2363c, 0x2363c, 1393 0x23700, 0x2371c, 1394 0x23780, 0x2378c, 1395 0x23800, 0x23818, 1396 0x23820, 0x23828, 1397 0x23830, 0x23848, 1398 0x23850, 0x23854, 1399 0x23860, 0x23868, 1400 0x23870, 0x23870, 1401 0x23878, 0x23898, 1402 0x238a0, 0x238a8, 1403 0x238b0, 0x238c8, 1404 0x238d0, 0x238d4, 1405 0x238e0, 0x238e8, 1406 0x238f0, 0x238f0, 1407 0x238f8, 0x23a18, 1408 0x23a20, 0x23a28, 1409 0x23a30, 0x23a48, 1410 0x23a50, 0x23a54, 1411 0x23a60, 0x23a68, 1412 0x23a70, 0x23a70, 1413 0x23a78, 0x23a98, 1414 0x23aa0, 0x23aa8, 1415 0x23ab0, 0x23ac8, 1416 0x23ad0, 0x23ad4, 1417 0x23ae0, 0x23ae8, 1418 0x23af0, 0x23af0, 1419 0x23af8, 0x23c18, 1420 0x23c20, 0x23c20, 1421 0x23c28, 0x23c30, 1422 0x23c38, 0x23c38, 1423 0x23c80, 0x23c98, 1424 0x23ca0, 0x23ca8, 1425 0x23cb0, 0x23cc8, 1426 0x23cd0, 0x23cd4, 1427 0x23ce0, 0x23ce8, 1428 0x23cf0, 0x23cf0, 1429 0x23cf8, 0x23d7c, 1430 0x23e00, 0x23e04, 1431 0x24000, 0x2402c, 1432 0x24100, 0x2413c, 1433 0x24190, 0x241a0, 1434 0x241a8, 0x241b8, 1435 0x241c4, 0x241c8, 1436 0x24200, 0x24318, 1437 0x24400, 0x244b4, 1438 0x244c0, 0x24528, 1439 0x24540, 0x24614, 1440 0x25000, 0x25040, 1441 0x2504c, 0x25060, 1442 0x250c0, 0x250ec, 1443 0x25200, 0x25268, 1444 0x25270, 0x25284, 1445 0x252fc, 0x25388, 1446 0x25400, 0x25404, 1447 0x25500, 0x25500, 1448 0x25510, 0x25518, 1449 0x2552c, 0x25530, 1450 0x2553c, 0x2553c, 1451 0x25550, 0x25554, 1452 0x25600, 0x25600, 1453 0x25608, 0x2561c, 1454 0x25624, 0x25628, 1455 0x25630, 0x25634, 1456 0x2563c, 0x2563c, 1457 0x25700, 0x2571c, 1458 0x25780, 0x2578c, 1459 0x25800, 0x25818, 1460 0x25820, 0x25828, 1461 0x25830, 0x25848, 1462 0x25850, 0x25854, 1463 0x25860, 0x25868, 1464 0x25870, 0x25870, 1465 0x25878, 0x25898, 1466 0x258a0, 0x258a8, 1467 0x258b0, 0x258c8, 1468 0x258d0, 0x258d4, 1469 0x258e0, 0x258e8, 1470 0x258f0, 0x258f0, 1471 0x258f8, 0x25a18, 1472 0x25a20, 0x25a28, 1473 0x25a30, 0x25a48, 1474 0x25a50, 0x25a54, 1475 0x25a60, 0x25a68, 1476 0x25a70, 0x25a70, 1477 0x25a78, 0x25a98, 1478 0x25aa0, 0x25aa8, 1479 0x25ab0, 0x25ac8, 1480 0x25ad0, 0x25ad4, 1481 0x25ae0, 0x25ae8, 1482 0x25af0, 0x25af0, 1483 0x25af8, 0x25c18, 1484 0x25c20, 0x25c20, 1485 0x25c28, 0x25c30, 1486 0x25c38, 0x25c38, 1487 0x25c80, 0x25c98, 1488 0x25ca0, 0x25ca8, 1489 0x25cb0, 0x25cc8, 1490 0x25cd0, 0x25cd4, 1491 0x25ce0, 0x25ce8, 1492 0x25cf0, 0x25cf0, 1493 0x25cf8, 0x25d7c, 1494 0x25e00, 0x25e04, 1495 0x26000, 0x2602c, 1496 0x26100, 0x2613c, 1497 0x26190, 0x261a0, 1498 0x261a8, 0x261b8, 1499 0x261c4, 0x261c8, 1500 0x26200, 0x26318, 1501 0x26400, 0x264b4, 1502 0x264c0, 0x26528, 1503 0x26540, 0x26614, 1504 0x27000, 0x27040, 1505 0x2704c, 0x27060, 1506 0x270c0, 0x270ec, 1507 0x27200, 0x27268, 1508 0x27270, 0x27284, 1509 0x272fc, 0x27388, 1510 0x27400, 0x27404, 1511 0x27500, 0x27500, 1512 0x27510, 0x27518, 1513 0x2752c, 0x27530, 1514 0x2753c, 0x2753c, 1515 0x27550, 0x27554, 1516 0x27600, 0x27600, 1517 0x27608, 0x2761c, 1518 0x27624, 0x27628, 1519 0x27630, 0x27634, 1520 0x2763c, 0x2763c, 1521 0x27700, 0x2771c, 1522 0x27780, 0x2778c, 1523 0x27800, 0x27818, 1524 0x27820, 0x27828, 1525 0x27830, 0x27848, 1526 0x27850, 0x27854, 1527 0x27860, 0x27868, 1528 0x27870, 0x27870, 1529 0x27878, 0x27898, 1530 0x278a0, 0x278a8, 1531 0x278b0, 0x278c8, 1532 0x278d0, 0x278d4, 1533 0x278e0, 0x278e8, 1534 0x278f0, 0x278f0, 1535 0x278f8, 0x27a18, 1536 0x27a20, 0x27a28, 1537 0x27a30, 0x27a48, 1538 0x27a50, 0x27a54, 1539 0x27a60, 0x27a68, 1540 0x27a70, 0x27a70, 1541 0x27a78, 0x27a98, 1542 0x27aa0, 0x27aa8, 1543 0x27ab0, 0x27ac8, 1544 0x27ad0, 0x27ad4, 1545 0x27ae0, 0x27ae8, 1546 0x27af0, 0x27af0, 1547 0x27af8, 0x27c18, 1548 0x27c20, 0x27c20, 1549 0x27c28, 0x27c30, 1550 0x27c38, 0x27c38, 1551 0x27c80, 0x27c98, 1552 0x27ca0, 0x27ca8, 1553 0x27cb0, 0x27cc8, 1554 0x27cd0, 0x27cd4, 1555 0x27ce0, 0x27ce8, 1556 0x27cf0, 0x27cf0, 1557 0x27cf8, 0x27d7c, 1558 0x27e00, 0x27e04, 1559 }; 1560 1561 static const unsigned int t5_reg_ranges[] = { 1562 0x1008, 0x10c0, 1563 0x10cc, 0x10f8, 1564 0x1100, 0x1100, 1565 0x110c, 0x1148, 1566 0x1180, 0x1184, 1567 0x1190, 0x1194, 1568 0x11a0, 0x11a4, 1569 0x11b0, 0x11b4, 1570 0x11fc, 0x123c, 1571 0x1280, 0x173c, 1572 0x1800, 0x18fc, 1573 0x3000, 0x3028, 1574 0x3060, 0x30b0, 1575 0x30b8, 0x30d8, 1576 0x30e0, 0x30fc, 1577 0x3140, 0x357c, 1578 0x35a8, 0x35cc, 1579 0x35ec, 0x35ec, 1580 0x3600, 0x5624, 1581 0x56cc, 0x56ec, 1582 0x56f4, 0x5720, 1583 0x5728, 0x575c, 1584 0x580c, 0x5814, 1585 0x5890, 0x589c, 1586 0x58a4, 0x58ac, 1587 0x58b8, 0x58bc, 1588 0x5940, 0x59c8, 1589 0x59d0, 0x59dc, 1590 0x59fc, 0x5a18, 1591 0x5a60, 0x5a70, 1592 0x5a80, 0x5a9c, 1593 0x5b94, 0x5bfc, 1594 0x6000, 0x6020, 1595 0x6028, 0x6040, 1596 0x6058, 0x609c, 1597 0x60a8, 0x614c, 1598 0x7700, 0x7798, 1599 0x77c0, 0x78fc, 1600 0x7b00, 0x7b58, 1601 0x7b60, 0x7b84, 1602 0x7b8c, 0x7c54, 1603 0x7d00, 0x7d38, 1604 0x7d40, 0x7d80, 1605 0x7d8c, 0x7ddc, 1606 0x7de4, 0x7e04, 1607 0x7e10, 0x7e1c, 1608 0x7e24, 0x7e38, 1609 0x7e40, 0x7e44, 1610 0x7e4c, 0x7e78, 1611 0x7e80, 0x7edc, 1612 0x7ee8, 0x7efc, 1613 0x8dc0, 0x8de0, 1614 0x8df8, 0x8e04, 1615 0x8e10, 0x8e84, 1616 0x8ea0, 0x8f84, 1617 0x8fc0, 0x9058, 1618 0x9060, 0x9060, 1619 0x9068, 0x90f8, 1620 0x9400, 0x9408, 1621 0x9410, 0x9470, 1622 0x9600, 0x9600, 1623 0x9608, 0x9638, 1624 0x9640, 0x96f4, 1625 0x9800, 0x9808, 1626 0x9820, 0x983c, 1627 0x9850, 0x9864, 1628 0x9c00, 0x9c6c, 1629 0x9c80, 0x9cec, 1630 0x9d00, 0x9d6c, 1631 0x9d80, 0x9dec, 1632 0x9e00, 0x9e6c, 1633 0x9e80, 0x9eec, 1634 0x9f00, 0x9f6c, 1635 0x9f80, 0xa020, 1636 0xd004, 0xd004, 1637 0xd010, 0xd03c, 1638 0xdfc0, 0xdfe0, 1639 0xe000, 0x1106c, 1640 0x11074, 0x11088, 1641 0x1109c, 0x1117c, 1642 0x11190, 0x11204, 1643 0x19040, 0x1906c, 1644 0x19078, 0x19080, 1645 0x1908c, 0x190e8, 1646 0x190f0, 0x190f8, 1647 0x19100, 0x19110, 1648 0x19120, 0x19124, 1649 0x19150, 0x19194, 1650 0x1919c, 0x191b0, 1651 0x191d0, 0x191e8, 1652 0x19238, 0x19290, 1653 0x193f8, 0x19428, 1654 0x19430, 0x19444, 1655 0x1944c, 0x1946c, 1656 0x19474, 0x19474, 1657 0x19490, 0x194cc, 1658 0x194f0, 0x194f8, 1659 0x19c00, 0x19c08, 1660 0x19c10, 0x19c60, 1661 0x19c94, 0x19ce4, 1662 0x19cf0, 0x19d40, 1663 0x19d50, 0x19d94, 1664 0x19da0, 0x19de8, 1665 0x19df0, 0x19e10, 1666 0x19e50, 0x19e90, 1667 0x19ea0, 0x19f24, 1668 0x19f34, 0x19f34, 1669 0x19f40, 0x19f50, 1670 0x19f90, 0x19fb4, 1671 0x19fc4, 0x19fe4, 1672 0x1a000, 0x1a004, 1673 0x1a010, 0x1a06c, 1674 0x1a0b0, 0x1a0e4, 1675 0x1a0ec, 0x1a0f8, 1676 0x1a100, 0x1a108, 1677 0x1a114, 0x1a120, 1678 0x1a128, 0x1a130, 1679 0x1a138, 0x1a138, 1680 0x1a190, 0x1a1c4, 1681 0x1a1fc, 0x1a1fc, 1682 0x1e008, 0x1e00c, 1683 0x1e040, 0x1e044, 1684 0x1e04c, 0x1e04c, 1685 0x1e284, 0x1e290, 1686 0x1e2c0, 0x1e2c0, 1687 0x1e2e0, 0x1e2e0, 1688 0x1e300, 0x1e384, 1689 0x1e3c0, 0x1e3c8, 1690 0x1e408, 0x1e40c, 1691 0x1e440, 0x1e444, 1692 0x1e44c, 0x1e44c, 1693 0x1e684, 0x1e690, 1694 0x1e6c0, 0x1e6c0, 1695 0x1e6e0, 0x1e6e0, 1696 0x1e700, 0x1e784, 1697 0x1e7c0, 0x1e7c8, 1698 0x1e808, 0x1e80c, 1699 0x1e840, 0x1e844, 1700 0x1e84c, 0x1e84c, 1701 0x1ea84, 0x1ea90, 1702 0x1eac0, 0x1eac0, 1703 0x1eae0, 0x1eae0, 1704 0x1eb00, 0x1eb84, 1705 0x1ebc0, 0x1ebc8, 1706 0x1ec08, 0x1ec0c, 1707 0x1ec40, 0x1ec44, 1708 0x1ec4c, 0x1ec4c, 1709 0x1ee84, 0x1ee90, 1710 0x1eec0, 0x1eec0, 1711 0x1eee0, 0x1eee0, 1712 0x1ef00, 0x1ef84, 1713 0x1efc0, 0x1efc8, 1714 0x1f008, 0x1f00c, 1715 0x1f040, 0x1f044, 1716 0x1f04c, 0x1f04c, 1717 0x1f284, 0x1f290, 1718 0x1f2c0, 0x1f2c0, 1719 0x1f2e0, 0x1f2e0, 1720 0x1f300, 0x1f384, 1721 0x1f3c0, 0x1f3c8, 1722 0x1f408, 0x1f40c, 1723 0x1f440, 0x1f444, 1724 0x1f44c, 0x1f44c, 1725 0x1f684, 0x1f690, 1726 0x1f6c0, 0x1f6c0, 1727 0x1f6e0, 0x1f6e0, 1728 0x1f700, 0x1f784, 1729 0x1f7c0, 0x1f7c8, 1730 0x1f808, 0x1f80c, 1731 0x1f840, 0x1f844, 1732 0x1f84c, 0x1f84c, 1733 0x1fa84, 0x1fa90, 1734 0x1fac0, 0x1fac0, 1735 0x1fae0, 0x1fae0, 1736 0x1fb00, 0x1fb84, 1737 0x1fbc0, 0x1fbc8, 1738 0x1fc08, 0x1fc0c, 1739 0x1fc40, 0x1fc44, 1740 0x1fc4c, 0x1fc4c, 1741 0x1fe84, 0x1fe90, 1742 0x1fec0, 0x1fec0, 1743 0x1fee0, 0x1fee0, 1744 0x1ff00, 0x1ff84, 1745 0x1ffc0, 0x1ffc8, 1746 0x30000, 0x30030, 1747 0x30100, 0x30144, 1748 0x30190, 0x301a0, 1749 0x301a8, 0x301b8, 1750 0x301c4, 0x301c8, 1751 0x301d0, 0x301d0, 1752 0x30200, 0x30318, 1753 0x30400, 0x304b4, 1754 0x304c0, 0x3052c, 1755 0x30540, 0x3061c, 1756 0x30800, 0x30828, 1757 0x30834, 0x30834, 1758 0x308c0, 0x30908, 1759 0x30910, 0x309ac, 1760 0x30a00, 0x30a14, 1761 0x30a1c, 0x30a2c, 1762 0x30a44, 0x30a50, 1763 0x30a74, 0x30a74, 1764 0x30a7c, 0x30afc, 1765 0x30b08, 0x30c24, 1766 0x30d00, 0x30d00, 1767 0x30d08, 0x30d14, 1768 0x30d1c, 0x30d20, 1769 0x30d3c, 0x30d3c, 1770 0x30d48, 0x30d50, 1771 0x31200, 0x3120c, 1772 0x31220, 0x31220, 1773 0x31240, 0x31240, 1774 0x31600, 0x3160c, 1775 0x31a00, 0x31a1c, 1776 0x31e00, 0x31e20, 1777 0x31e38, 0x31e3c, 1778 0x31e80, 0x31e80, 1779 0x31e88, 0x31ea8, 1780 0x31eb0, 0x31eb4, 1781 0x31ec8, 0x31ed4, 1782 0x31fb8, 0x32004, 1783 0x32200, 0x32200, 1784 0x32208, 0x32240, 1785 0x32248, 0x32280, 1786 0x32288, 0x322c0, 1787 0x322c8, 0x322fc, 1788 0x32600, 0x32630, 1789 0x32a00, 0x32abc, 1790 0x32b00, 0x32b10, 1791 0x32b20, 0x32b30, 1792 0x32b40, 0x32b50, 1793 0x32b60, 0x32b70, 1794 0x33000, 0x33028, 1795 0x33030, 0x33048, 1796 0x33060, 0x33068, 1797 0x33070, 0x3309c, 1798 0x330f0, 0x33128, 1799 0x33130, 0x33148, 1800 0x33160, 0x33168, 1801 0x33170, 0x3319c, 1802 0x331f0, 0x33238, 1803 0x33240, 0x33240, 1804 0x33248, 0x33250, 1805 0x3325c, 0x33264, 1806 0x33270, 0x332b8, 1807 0x332c0, 0x332e4, 1808 0x332f8, 0x33338, 1809 0x33340, 0x33340, 1810 0x33348, 0x33350, 1811 0x3335c, 0x33364, 1812 0x33370, 0x333b8, 1813 0x333c0, 0x333e4, 1814 0x333f8, 0x33428, 1815 0x33430, 0x33448, 1816 0x33460, 0x33468, 1817 0x33470, 0x3349c, 1818 0x334f0, 0x33528, 1819 0x33530, 0x33548, 1820 0x33560, 0x33568, 1821 0x33570, 0x3359c, 1822 0x335f0, 0x33638, 1823 0x33640, 0x33640, 1824 0x33648, 0x33650, 1825 0x3365c, 0x33664, 1826 0x33670, 0x336b8, 1827 0x336c0, 0x336e4, 1828 0x336f8, 0x33738, 1829 0x33740, 0x33740, 1830 0x33748, 0x33750, 1831 0x3375c, 0x33764, 1832 0x33770, 0x337b8, 1833 0x337c0, 0x337e4, 1834 0x337f8, 0x337fc, 1835 0x33814, 0x33814, 1836 0x3382c, 0x3382c, 1837 0x33880, 0x3388c, 1838 0x338e8, 0x338ec, 1839 0x33900, 0x33928, 1840 0x33930, 0x33948, 1841 0x33960, 0x33968, 1842 0x33970, 0x3399c, 1843 0x339f0, 0x33a38, 1844 0x33a40, 0x33a40, 1845 0x33a48, 0x33a50, 1846 0x33a5c, 0x33a64, 1847 0x33a70, 0x33ab8, 1848 0x33ac0, 0x33ae4, 1849 0x33af8, 0x33b10, 1850 0x33b28, 0x33b28, 1851 0x33b3c, 0x33b50, 1852 0x33bf0, 0x33c10, 1853 0x33c28, 0x33c28, 1854 0x33c3c, 0x33c50, 1855 0x33cf0, 0x33cfc, 1856 0x34000, 0x34030, 1857 0x34100, 0x34144, 1858 0x34190, 0x341a0, 1859 0x341a8, 0x341b8, 1860 0x341c4, 0x341c8, 1861 0x341d0, 0x341d0, 1862 0x34200, 0x34318, 1863 0x34400, 0x344b4, 1864 0x344c0, 0x3452c, 1865 0x34540, 0x3461c, 1866 0x34800, 0x34828, 1867 0x34834, 0x34834, 1868 0x348c0, 0x34908, 1869 0x34910, 0x349ac, 1870 0x34a00, 0x34a14, 1871 0x34a1c, 0x34a2c, 1872 0x34a44, 0x34a50, 1873 0x34a74, 0x34a74, 1874 0x34a7c, 0x34afc, 1875 0x34b08, 0x34c24, 1876 0x34d00, 0x34d00, 1877 0x34d08, 0x34d14, 1878 0x34d1c, 0x34d20, 1879 0x34d3c, 0x34d3c, 1880 0x34d48, 0x34d50, 1881 0x35200, 0x3520c, 1882 0x35220, 0x35220, 1883 0x35240, 0x35240, 1884 0x35600, 0x3560c, 1885 0x35a00, 0x35a1c, 1886 0x35e00, 0x35e20, 1887 0x35e38, 0x35e3c, 1888 0x35e80, 0x35e80, 1889 0x35e88, 0x35ea8, 1890 0x35eb0, 0x35eb4, 1891 0x35ec8, 0x35ed4, 1892 0x35fb8, 0x36004, 1893 0x36200, 0x36200, 1894 0x36208, 0x36240, 1895 0x36248, 0x36280, 1896 0x36288, 0x362c0, 1897 0x362c8, 0x362fc, 1898 0x36600, 0x36630, 1899 0x36a00, 0x36abc, 1900 0x36b00, 0x36b10, 1901 0x36b20, 0x36b30, 1902 0x36b40, 0x36b50, 1903 0x36b60, 0x36b70, 1904 0x37000, 0x37028, 1905 0x37030, 0x37048, 1906 0x37060, 0x37068, 1907 0x37070, 0x3709c, 1908 0x370f0, 0x37128, 1909 0x37130, 0x37148, 1910 0x37160, 0x37168, 1911 0x37170, 0x3719c, 1912 0x371f0, 0x37238, 1913 0x37240, 0x37240, 1914 0x37248, 0x37250, 1915 0x3725c, 0x37264, 1916 0x37270, 0x372b8, 1917 0x372c0, 0x372e4, 1918 0x372f8, 0x37338, 1919 0x37340, 0x37340, 1920 0x37348, 0x37350, 1921 0x3735c, 0x37364, 1922 0x37370, 0x373b8, 1923 0x373c0, 0x373e4, 1924 0x373f8, 0x37428, 1925 0x37430, 0x37448, 1926 0x37460, 0x37468, 1927 0x37470, 0x3749c, 1928 0x374f0, 0x37528, 1929 0x37530, 0x37548, 1930 0x37560, 0x37568, 1931 0x37570, 0x3759c, 1932 0x375f0, 0x37638, 1933 0x37640, 0x37640, 1934 0x37648, 0x37650, 1935 0x3765c, 0x37664, 1936 0x37670, 0x376b8, 1937 0x376c0, 0x376e4, 1938 0x376f8, 0x37738, 1939 0x37740, 0x37740, 1940 0x37748, 0x37750, 1941 0x3775c, 0x37764, 1942 0x37770, 0x377b8, 1943 0x377c0, 0x377e4, 1944 0x377f8, 0x377fc, 1945 0x37814, 0x37814, 1946 0x3782c, 0x3782c, 1947 0x37880, 0x3788c, 1948 0x378e8, 0x378ec, 1949 0x37900, 0x37928, 1950 0x37930, 0x37948, 1951 0x37960, 0x37968, 1952 0x37970, 0x3799c, 1953 0x379f0, 0x37a38, 1954 0x37a40, 0x37a40, 1955 0x37a48, 0x37a50, 1956 0x37a5c, 0x37a64, 1957 0x37a70, 0x37ab8, 1958 0x37ac0, 0x37ae4, 1959 0x37af8, 0x37b10, 1960 0x37b28, 0x37b28, 1961 0x37b3c, 0x37b50, 1962 0x37bf0, 0x37c10, 1963 0x37c28, 0x37c28, 1964 0x37c3c, 0x37c50, 1965 0x37cf0, 0x37cfc, 1966 0x38000, 0x38030, 1967 0x38100, 0x38144, 1968 0x38190, 0x381a0, 1969 0x381a8, 0x381b8, 1970 0x381c4, 0x381c8, 1971 0x381d0, 0x381d0, 1972 0x38200, 0x38318, 1973 0x38400, 0x384b4, 1974 0x384c0, 0x3852c, 1975 0x38540, 0x3861c, 1976 0x38800, 0x38828, 1977 0x38834, 0x38834, 1978 0x388c0, 0x38908, 1979 0x38910, 0x389ac, 1980 0x38a00, 0x38a14, 1981 0x38a1c, 0x38a2c, 1982 0x38a44, 0x38a50, 1983 0x38a74, 0x38a74, 1984 0x38a7c, 0x38afc, 1985 0x38b08, 0x38c24, 1986 0x38d00, 0x38d00, 1987 0x38d08, 0x38d14, 1988 0x38d1c, 0x38d20, 1989 0x38d3c, 0x38d3c, 1990 0x38d48, 0x38d50, 1991 0x39200, 0x3920c, 1992 0x39220, 0x39220, 1993 0x39240, 0x39240, 1994 0x39600, 0x3960c, 1995 0x39a00, 0x39a1c, 1996 0x39e00, 0x39e20, 1997 0x39e38, 0x39e3c, 1998 0x39e80, 0x39e80, 1999 0x39e88, 0x39ea8, 2000 0x39eb0, 0x39eb4, 2001 0x39ec8, 0x39ed4, 2002 0x39fb8, 0x3a004, 2003 0x3a200, 0x3a200, 2004 0x3a208, 0x3a240, 2005 0x3a248, 0x3a280, 2006 0x3a288, 0x3a2c0, 2007 0x3a2c8, 0x3a2fc, 2008 0x3a600, 0x3a630, 2009 0x3aa00, 0x3aabc, 2010 0x3ab00, 0x3ab10, 2011 0x3ab20, 0x3ab30, 2012 0x3ab40, 0x3ab50, 2013 0x3ab60, 0x3ab70, 2014 0x3b000, 0x3b028, 2015 0x3b030, 0x3b048, 2016 0x3b060, 0x3b068, 2017 0x3b070, 0x3b09c, 2018 0x3b0f0, 0x3b128, 2019 0x3b130, 0x3b148, 2020 0x3b160, 0x3b168, 2021 0x3b170, 0x3b19c, 2022 0x3b1f0, 0x3b238, 2023 0x3b240, 0x3b240, 2024 0x3b248, 0x3b250, 2025 0x3b25c, 0x3b264, 2026 0x3b270, 0x3b2b8, 2027 0x3b2c0, 0x3b2e4, 2028 0x3b2f8, 0x3b338, 2029 0x3b340, 0x3b340, 2030 0x3b348, 0x3b350, 2031 0x3b35c, 0x3b364, 2032 0x3b370, 0x3b3b8, 2033 0x3b3c0, 0x3b3e4, 2034 0x3b3f8, 0x3b428, 2035 0x3b430, 0x3b448, 2036 0x3b460, 0x3b468, 2037 0x3b470, 0x3b49c, 2038 0x3b4f0, 0x3b528, 2039 0x3b530, 0x3b548, 2040 0x3b560, 0x3b568, 2041 0x3b570, 0x3b59c, 2042 0x3b5f0, 0x3b638, 2043 0x3b640, 0x3b640, 2044 0x3b648, 0x3b650, 2045 0x3b65c, 0x3b664, 2046 0x3b670, 0x3b6b8, 2047 0x3b6c0, 0x3b6e4, 2048 0x3b6f8, 0x3b738, 2049 0x3b740, 0x3b740, 2050 0x3b748, 0x3b750, 2051 0x3b75c, 0x3b764, 2052 0x3b770, 0x3b7b8, 2053 0x3b7c0, 0x3b7e4, 2054 0x3b7f8, 0x3b7fc, 2055 0x3b814, 0x3b814, 2056 0x3b82c, 0x3b82c, 2057 0x3b880, 0x3b88c, 2058 0x3b8e8, 0x3b8ec, 2059 0x3b900, 0x3b928, 2060 0x3b930, 0x3b948, 2061 0x3b960, 0x3b968, 2062 0x3b970, 0x3b99c, 2063 0x3b9f0, 0x3ba38, 2064 0x3ba40, 0x3ba40, 2065 0x3ba48, 0x3ba50, 2066 0x3ba5c, 0x3ba64, 2067 0x3ba70, 0x3bab8, 2068 0x3bac0, 0x3bae4, 2069 0x3baf8, 0x3bb10, 2070 0x3bb28, 0x3bb28, 2071 0x3bb3c, 0x3bb50, 2072 0x3bbf0, 0x3bc10, 2073 0x3bc28, 0x3bc28, 2074 0x3bc3c, 0x3bc50, 2075 0x3bcf0, 0x3bcfc, 2076 0x3c000, 0x3c030, 2077 0x3c100, 0x3c144, 2078 0x3c190, 0x3c1a0, 2079 0x3c1a8, 0x3c1b8, 2080 0x3c1c4, 0x3c1c8, 2081 0x3c1d0, 0x3c1d0, 2082 0x3c200, 0x3c318, 2083 0x3c400, 0x3c4b4, 2084 0x3c4c0, 0x3c52c, 2085 0x3c540, 0x3c61c, 2086 0x3c800, 0x3c828, 2087 0x3c834, 0x3c834, 2088 0x3c8c0, 0x3c908, 2089 0x3c910, 0x3c9ac, 2090 0x3ca00, 0x3ca14, 2091 0x3ca1c, 0x3ca2c, 2092 0x3ca44, 0x3ca50, 2093 0x3ca74, 0x3ca74, 2094 0x3ca7c, 0x3cafc, 2095 0x3cb08, 0x3cc24, 2096 0x3cd00, 0x3cd00, 2097 0x3cd08, 0x3cd14, 2098 0x3cd1c, 0x3cd20, 2099 0x3cd3c, 0x3cd3c, 2100 0x3cd48, 0x3cd50, 2101 0x3d200, 0x3d20c, 2102 0x3d220, 0x3d220, 2103 0x3d240, 0x3d240, 2104 0x3d600, 0x3d60c, 2105 0x3da00, 0x3da1c, 2106 0x3de00, 0x3de20, 2107 0x3de38, 0x3de3c, 2108 0x3de80, 0x3de80, 2109 0x3de88, 0x3dea8, 2110 0x3deb0, 0x3deb4, 2111 0x3dec8, 0x3ded4, 2112 0x3dfb8, 0x3e004, 2113 0x3e200, 0x3e200, 2114 0x3e208, 0x3e240, 2115 0x3e248, 0x3e280, 2116 0x3e288, 0x3e2c0, 2117 0x3e2c8, 0x3e2fc, 2118 0x3e600, 0x3e630, 2119 0x3ea00, 0x3eabc, 2120 0x3eb00, 0x3eb10, 2121 0x3eb20, 0x3eb30, 2122 0x3eb40, 0x3eb50, 2123 0x3eb60, 0x3eb70, 2124 0x3f000, 0x3f028, 2125 0x3f030, 0x3f048, 2126 0x3f060, 0x3f068, 2127 0x3f070, 0x3f09c, 2128 0x3f0f0, 0x3f128, 2129 0x3f130, 0x3f148, 2130 0x3f160, 0x3f168, 2131 0x3f170, 0x3f19c, 2132 0x3f1f0, 0x3f238, 2133 0x3f240, 0x3f240, 2134 0x3f248, 0x3f250, 2135 0x3f25c, 0x3f264, 2136 0x3f270, 0x3f2b8, 2137 0x3f2c0, 0x3f2e4, 2138 0x3f2f8, 0x3f338, 2139 0x3f340, 0x3f340, 2140 0x3f348, 0x3f350, 2141 0x3f35c, 0x3f364, 2142 0x3f370, 0x3f3b8, 2143 0x3f3c0, 0x3f3e4, 2144 0x3f3f8, 0x3f428, 2145 0x3f430, 0x3f448, 2146 0x3f460, 0x3f468, 2147 0x3f470, 0x3f49c, 2148 0x3f4f0, 0x3f528, 2149 0x3f530, 0x3f548, 2150 0x3f560, 0x3f568, 2151 0x3f570, 0x3f59c, 2152 0x3f5f0, 0x3f638, 2153 0x3f640, 0x3f640, 2154 0x3f648, 0x3f650, 2155 0x3f65c, 0x3f664, 2156 0x3f670, 0x3f6b8, 2157 0x3f6c0, 0x3f6e4, 2158 0x3f6f8, 0x3f738, 2159 0x3f740, 0x3f740, 2160 0x3f748, 0x3f750, 2161 0x3f75c, 0x3f764, 2162 0x3f770, 0x3f7b8, 2163 0x3f7c0, 0x3f7e4, 2164 0x3f7f8, 0x3f7fc, 2165 0x3f814, 0x3f814, 2166 0x3f82c, 0x3f82c, 2167 0x3f880, 0x3f88c, 2168 0x3f8e8, 0x3f8ec, 2169 0x3f900, 0x3f928, 2170 0x3f930, 0x3f948, 2171 0x3f960, 0x3f968, 2172 0x3f970, 0x3f99c, 2173 0x3f9f0, 0x3fa38, 2174 0x3fa40, 0x3fa40, 2175 0x3fa48, 0x3fa50, 2176 0x3fa5c, 0x3fa64, 2177 0x3fa70, 0x3fab8, 2178 0x3fac0, 0x3fae4, 2179 0x3faf8, 0x3fb10, 2180 0x3fb28, 0x3fb28, 2181 0x3fb3c, 0x3fb50, 2182 0x3fbf0, 0x3fc10, 2183 0x3fc28, 0x3fc28, 2184 0x3fc3c, 0x3fc50, 2185 0x3fcf0, 0x3fcfc, 2186 0x40000, 0x4000c, 2187 0x40040, 0x40050, 2188 0x40060, 0x40068, 2189 0x4007c, 0x4008c, 2190 0x40094, 0x400b0, 2191 0x400c0, 0x40144, 2192 0x40180, 0x4018c, 2193 0x40200, 0x40254, 2194 0x40260, 0x40264, 2195 0x40270, 0x40288, 2196 0x40290, 0x40298, 2197 0x402ac, 0x402c8, 2198 0x402d0, 0x402e0, 2199 0x402f0, 0x402f0, 2200 0x40300, 0x4033c, 2201 0x403f8, 0x403fc, 2202 0x41304, 0x413c4, 2203 0x41400, 0x4140c, 2204 0x41414, 0x4141c, 2205 0x41480, 0x414d0, 2206 0x44000, 0x44054, 2207 0x4405c, 0x44078, 2208 0x440c0, 0x44174, 2209 0x44180, 0x441ac, 2210 0x441b4, 0x441b8, 2211 0x441c0, 0x44254, 2212 0x4425c, 0x44278, 2213 0x442c0, 0x44374, 2214 0x44380, 0x443ac, 2215 0x443b4, 0x443b8, 2216 0x443c0, 0x44454, 2217 0x4445c, 0x44478, 2218 0x444c0, 0x44574, 2219 0x44580, 0x445ac, 2220 0x445b4, 0x445b8, 2221 0x445c0, 0x44654, 2222 0x4465c, 0x44678, 2223 0x446c0, 0x44774, 2224 0x44780, 0x447ac, 2225 0x447b4, 0x447b8, 2226 0x447c0, 0x44854, 2227 0x4485c, 0x44878, 2228 0x448c0, 0x44974, 2229 0x44980, 0x449ac, 2230 0x449b4, 0x449b8, 2231 0x449c0, 0x449fc, 2232 0x45000, 0x45004, 2233 0x45010, 0x45030, 2234 0x45040, 0x45060, 2235 0x45068, 0x45068, 2236 0x45080, 0x45084, 2237 0x450a0, 0x450b0, 2238 0x45200, 0x45204, 2239 0x45210, 0x45230, 2240 0x45240, 0x45260, 2241 0x45268, 0x45268, 2242 0x45280, 0x45284, 2243 0x452a0, 0x452b0, 2244 0x460c0, 0x460e4, 2245 0x47000, 0x4703c, 2246 0x47044, 0x4708c, 2247 0x47200, 0x47250, 2248 0x47400, 0x47408, 2249 0x47414, 0x47420, 2250 0x47600, 0x47618, 2251 0x47800, 0x47814, 2252 0x48000, 0x4800c, 2253 0x48040, 0x48050, 2254 0x48060, 0x48068, 2255 0x4807c, 0x4808c, 2256 0x48094, 0x480b0, 2257 0x480c0, 0x48144, 2258 0x48180, 0x4818c, 2259 0x48200, 0x48254, 2260 0x48260, 0x48264, 2261 0x48270, 0x48288, 2262 0x48290, 0x48298, 2263 0x482ac, 0x482c8, 2264 0x482d0, 0x482e0, 2265 0x482f0, 0x482f0, 2266 0x48300, 0x4833c, 2267 0x483f8, 0x483fc, 2268 0x49304, 0x493c4, 2269 0x49400, 0x4940c, 2270 0x49414, 0x4941c, 2271 0x49480, 0x494d0, 2272 0x4c000, 0x4c054, 2273 0x4c05c, 0x4c078, 2274 0x4c0c0, 0x4c174, 2275 0x4c180, 0x4c1ac, 2276 0x4c1b4, 0x4c1b8, 2277 0x4c1c0, 0x4c254, 2278 0x4c25c, 0x4c278, 2279 0x4c2c0, 0x4c374, 2280 0x4c380, 0x4c3ac, 2281 0x4c3b4, 0x4c3b8, 2282 0x4c3c0, 0x4c454, 2283 0x4c45c, 0x4c478, 2284 0x4c4c0, 0x4c574, 2285 0x4c580, 0x4c5ac, 2286 0x4c5b4, 0x4c5b8, 2287 0x4c5c0, 0x4c654, 2288 0x4c65c, 0x4c678, 2289 0x4c6c0, 0x4c774, 2290 0x4c780, 0x4c7ac, 2291 0x4c7b4, 0x4c7b8, 2292 0x4c7c0, 0x4c854, 2293 0x4c85c, 0x4c878, 2294 0x4c8c0, 0x4c974, 2295 0x4c980, 0x4c9ac, 2296 0x4c9b4, 0x4c9b8, 2297 0x4c9c0, 0x4c9fc, 2298 0x4d000, 0x4d004, 2299 0x4d010, 0x4d030, 2300 0x4d040, 0x4d060, 2301 0x4d068, 0x4d068, 2302 0x4d080, 0x4d084, 2303 0x4d0a0, 0x4d0b0, 2304 0x4d200, 0x4d204, 2305 0x4d210, 0x4d230, 2306 0x4d240, 0x4d260, 2307 0x4d268, 0x4d268, 2308 0x4d280, 0x4d284, 2309 0x4d2a0, 0x4d2b0, 2310 0x4e0c0, 0x4e0e4, 2311 0x4f000, 0x4f03c, 2312 0x4f044, 0x4f08c, 2313 0x4f200, 0x4f250, 2314 0x4f400, 0x4f408, 2315 0x4f414, 0x4f420, 2316 0x4f600, 0x4f618, 2317 0x4f800, 0x4f814, 2318 0x50000, 0x50084, 2319 0x50090, 0x500cc, 2320 0x50400, 0x50400, 2321 0x50800, 0x50884, 2322 0x50890, 0x508cc, 2323 0x50c00, 0x50c00, 2324 0x51000, 0x5101c, 2325 0x51300, 0x51308, 2326 }; 2327 2328 static const unsigned int t6_reg_ranges[] = { 2329 0x1008, 0x101c, 2330 0x1024, 0x10a8, 2331 0x10b4, 0x10f8, 2332 0x1100, 0x1114, 2333 0x111c, 0x112c, 2334 0x1138, 0x113c, 2335 0x1144, 0x114c, 2336 0x1180, 0x1184, 2337 0x1190, 0x1194, 2338 0x11a0, 0x11a4, 2339 0x11b0, 0x11c4, 2340 0x11fc, 0x1274, 2341 0x1280, 0x133c, 2342 0x1800, 0x18fc, 2343 0x3000, 0x302c, 2344 0x3060, 0x30b0, 2345 0x30b8, 0x30d8, 2346 0x30e0, 0x30fc, 2347 0x3140, 0x357c, 2348 0x35a8, 0x35cc, 2349 0x35ec, 0x35ec, 2350 0x3600, 0x5624, 2351 0x56cc, 0x56ec, 2352 0x56f4, 0x5720, 2353 0x5728, 0x575c, 2354 0x580c, 0x5814, 2355 0x5890, 0x589c, 2356 0x58a4, 0x58ac, 2357 0x58b8, 0x58bc, 2358 0x5940, 0x595c, 2359 0x5980, 0x598c, 2360 0x59b0, 0x59c8, 2361 0x59d0, 0x59dc, 2362 0x59fc, 0x5a18, 2363 0x5a60, 0x5a6c, 2364 0x5a80, 0x5a8c, 2365 0x5a94, 0x5a9c, 2366 0x5b94, 0x5bfc, 2367 0x5c10, 0x5e48, 2368 0x5e50, 0x5e94, 2369 0x5ea0, 0x5eb0, 2370 0x5ec0, 0x5ec0, 2371 0x5ec8, 0x5ed0, 2372 0x5ee0, 0x5ee0, 2373 0x5ef0, 0x5ef0, 2374 0x5f00, 0x5f00, 2375 0x6000, 0x6020, 2376 0x6028, 0x6040, 2377 0x6058, 0x609c, 2378 0x60a8, 0x619c, 2379 0x7700, 0x7798, 2380 0x77c0, 0x7880, 2381 0x78cc, 0x78fc, 2382 0x7b00, 0x7b58, 2383 0x7b60, 0x7b84, 2384 0x7b8c, 0x7c54, 2385 0x7d00, 0x7d38, 2386 0x7d40, 0x7d84, 2387 0x7d8c, 0x7ddc, 2388 0x7de4, 0x7e04, 2389 0x7e10, 0x7e1c, 2390 0x7e24, 0x7e38, 2391 0x7e40, 0x7e44, 2392 0x7e4c, 0x7e78, 2393 0x7e80, 0x7edc, 2394 0x7ee8, 0x7efc, 2395 0x8dc0, 0x8de0, 2396 0x8df8, 0x8e04, 2397 0x8e10, 0x8e84, 2398 0x8ea0, 0x8f88, 2399 0x8fb8, 0x9058, 2400 0x9060, 0x9060, 2401 0x9068, 0x90f8, 2402 0x9100, 0x9124, 2403 0x9400, 0x9470, 2404 0x9600, 0x9600, 2405 0x9608, 0x9638, 2406 0x9640, 0x9704, 2407 0x9710, 0x971c, 2408 0x9800, 0x9808, 2409 0x9820, 0x983c, 2410 0x9850, 0x9864, 2411 0x9c00, 0x9c6c, 2412 0x9c80, 0x9cec, 2413 0x9d00, 0x9d6c, 2414 0x9d80, 0x9dec, 2415 0x9e00, 0x9e6c, 2416 0x9e80, 0x9eec, 2417 0x9f00, 0x9f6c, 2418 0x9f80, 0xa020, 2419 0xd004, 0xd03c, 2420 0xd100, 0xd118, 2421 0xd200, 0xd214, 2422 0xd220, 0xd234, 2423 0xd240, 0xd254, 2424 0xd260, 0xd274, 2425 0xd280, 0xd294, 2426 0xd2a0, 0xd2b4, 2427 0xd2c0, 0xd2d4, 2428 0xd2e0, 0xd2f4, 2429 0xd300, 0xd31c, 2430 0xdfc0, 0xdfe0, 2431 0xe000, 0xf008, 2432 0xf010, 0xf018, 2433 0xf020, 0xf028, 2434 0x11000, 0x11014, 2435 0x11048, 0x1106c, 2436 0x11074, 0x11088, 2437 0x11098, 0x11120, 2438 0x1112c, 0x1117c, 2439 0x11190, 0x112e0, 2440 0x11300, 0x1130c, 2441 0x12000, 0x1206c, 2442 0x19040, 0x1906c, 2443 0x19078, 0x19080, 2444 0x1908c, 0x190e8, 2445 0x190f0, 0x190f8, 2446 0x19100, 0x19110, 2447 0x19120, 0x19124, 2448 0x19150, 0x19194, 2449 0x1919c, 0x191b0, 2450 0x191d0, 0x191e8, 2451 0x19238, 0x19290, 2452 0x192a4, 0x192b0, 2453 0x19348, 0x1934c, 2454 0x193f8, 0x19418, 2455 0x19420, 0x19428, 2456 0x19430, 0x19444, 2457 0x1944c, 0x1946c, 2458 0x19474, 0x19474, 2459 0x19490, 0x194cc, 2460 0x194f0, 0x194f8, 2461 0x19c00, 0x19c48, 2462 0x19c50, 0x19c80, 2463 0x19c94, 0x19c98, 2464 0x19ca0, 0x19cbc, 2465 0x19ce4, 0x19ce4, 2466 0x19cf0, 0x19cf8, 2467 0x19d00, 0x19d28, 2468 0x19d50, 0x19d78, 2469 0x19d94, 0x19d98, 2470 0x19da0, 0x19de0, 2471 0x19df0, 0x19e10, 2472 0x19e50, 0x19e6c, 2473 0x19ea0, 0x19ebc, 2474 0x19ec4, 0x19ef4, 2475 0x19f04, 0x19f2c, 2476 0x19f34, 0x19f34, 2477 0x19f40, 0x19f50, 2478 0x19f90, 0x19fac, 2479 0x19fc4, 0x19fc8, 2480 0x19fd0, 0x19fe4, 2481 0x1a000, 0x1a004, 2482 0x1a010, 0x1a06c, 2483 0x1a0b0, 0x1a0e4, 2484 0x1a0ec, 0x1a0f8, 2485 0x1a100, 0x1a108, 2486 0x1a114, 0x1a120, 2487 0x1a128, 0x1a130, 2488 0x1a138, 0x1a138, 2489 0x1a190, 0x1a1c4, 2490 0x1a1fc, 0x1a1fc, 2491 0x1e008, 0x1e00c, 2492 0x1e040, 0x1e044, 2493 0x1e04c, 0x1e04c, 2494 0x1e284, 0x1e290, 2495 0x1e2c0, 0x1e2c0, 2496 0x1e2e0, 0x1e2e0, 2497 0x1e300, 0x1e384, 2498 0x1e3c0, 0x1e3c8, 2499 0x1e408, 0x1e40c, 2500 0x1e440, 0x1e444, 2501 0x1e44c, 0x1e44c, 2502 0x1e684, 0x1e690, 2503 0x1e6c0, 0x1e6c0, 2504 0x1e6e0, 0x1e6e0, 2505 0x1e700, 0x1e784, 2506 0x1e7c0, 0x1e7c8, 2507 0x1e808, 0x1e80c, 2508 0x1e840, 0x1e844, 2509 0x1e84c, 0x1e84c, 2510 0x1ea84, 0x1ea90, 2511 0x1eac0, 0x1eac0, 2512 0x1eae0, 0x1eae0, 2513 0x1eb00, 0x1eb84, 2514 0x1ebc0, 0x1ebc8, 2515 0x1ec08, 0x1ec0c, 2516 0x1ec40, 0x1ec44, 2517 0x1ec4c, 0x1ec4c, 2518 0x1ee84, 0x1ee90, 2519 0x1eec0, 0x1eec0, 2520 0x1eee0, 0x1eee0, 2521 0x1ef00, 0x1ef84, 2522 0x1efc0, 0x1efc8, 2523 0x1f008, 0x1f00c, 2524 0x1f040, 0x1f044, 2525 0x1f04c, 0x1f04c, 2526 0x1f284, 0x1f290, 2527 0x1f2c0, 0x1f2c0, 2528 0x1f2e0, 0x1f2e0, 2529 0x1f300, 0x1f384, 2530 0x1f3c0, 0x1f3c8, 2531 0x1f408, 0x1f40c, 2532 0x1f440, 0x1f444, 2533 0x1f44c, 0x1f44c, 2534 0x1f684, 0x1f690, 2535 0x1f6c0, 0x1f6c0, 2536 0x1f6e0, 0x1f6e0, 2537 0x1f700, 0x1f784, 2538 0x1f7c0, 0x1f7c8, 2539 0x1f808, 0x1f80c, 2540 0x1f840, 0x1f844, 2541 0x1f84c, 0x1f84c, 2542 0x1fa84, 0x1fa90, 2543 0x1fac0, 0x1fac0, 2544 0x1fae0, 0x1fae0, 2545 0x1fb00, 0x1fb84, 2546 0x1fbc0, 0x1fbc8, 2547 0x1fc08, 0x1fc0c, 2548 0x1fc40, 0x1fc44, 2549 0x1fc4c, 0x1fc4c, 2550 0x1fe84, 0x1fe90, 2551 0x1fec0, 0x1fec0, 2552 0x1fee0, 0x1fee0, 2553 0x1ff00, 0x1ff84, 2554 0x1ffc0, 0x1ffc8, 2555 0x30000, 0x30030, 2556 0x30100, 0x30168, 2557 0x30190, 0x301a0, 2558 0x301a8, 0x301b8, 2559 0x301c4, 0x301c8, 2560 0x301d0, 0x301d0, 2561 0x30200, 0x30320, 2562 0x30400, 0x304b4, 2563 0x304c0, 0x3052c, 2564 0x30540, 0x3061c, 2565 0x30800, 0x308a0, 2566 0x308c0, 0x30908, 2567 0x30910, 0x309b8, 2568 0x30a00, 0x30a04, 2569 0x30a0c, 0x30a14, 2570 0x30a1c, 0x30a2c, 2571 0x30a44, 0x30a50, 2572 0x30a74, 0x30a74, 2573 0x30a7c, 0x30afc, 2574 0x30b08, 0x30c24, 2575 0x30d00, 0x30d14, 2576 0x30d1c, 0x30d3c, 2577 0x30d44, 0x30d4c, 2578 0x30d54, 0x30d74, 2579 0x30d7c, 0x30d7c, 2580 0x30de0, 0x30de0, 2581 0x30e00, 0x30ed4, 2582 0x30f00, 0x30fa4, 2583 0x30fc0, 0x30fc4, 2584 0x31000, 0x31004, 2585 0x31080, 0x310fc, 2586 0x31208, 0x31220, 2587 0x3123c, 0x31254, 2588 0x31300, 0x31300, 2589 0x31308, 0x3131c, 2590 0x31338, 0x3133c, 2591 0x31380, 0x31380, 2592 0x31388, 0x313a8, 2593 0x313b4, 0x313b4, 2594 0x31400, 0x31420, 2595 0x31438, 0x3143c, 2596 0x31480, 0x31480, 2597 0x314a8, 0x314a8, 2598 0x314b0, 0x314b4, 2599 0x314c8, 0x314d4, 2600 0x31a40, 0x31a4c, 2601 0x31af0, 0x31b20, 2602 0x31b38, 0x31b3c, 2603 0x31b80, 0x31b80, 2604 0x31ba8, 0x31ba8, 2605 0x31bb0, 0x31bb4, 2606 0x31bc8, 0x31bd4, 2607 0x32140, 0x3218c, 2608 0x321f0, 0x321f4, 2609 0x32200, 0x32200, 2610 0x32218, 0x32218, 2611 0x32400, 0x32400, 2612 0x32408, 0x3241c, 2613 0x32618, 0x32620, 2614 0x32664, 0x32664, 2615 0x326a8, 0x326a8, 2616 0x326ec, 0x326ec, 2617 0x32a00, 0x32abc, 2618 0x32b00, 0x32b18, 2619 0x32b20, 0x32b38, 2620 0x32b40, 0x32b58, 2621 0x32b60, 0x32b78, 2622 0x32c00, 0x32c00, 2623 0x32c08, 0x32c3c, 2624 0x33000, 0x3302c, 2625 0x33034, 0x33050, 2626 0x33058, 0x33058, 2627 0x33060, 0x3308c, 2628 0x3309c, 0x330ac, 2629 0x330c0, 0x330c0, 2630 0x330c8, 0x330d0, 2631 0x330d8, 0x330e0, 2632 0x330ec, 0x3312c, 2633 0x33134, 0x33150, 2634 0x33158, 0x33158, 2635 0x33160, 0x3318c, 2636 0x3319c, 0x331ac, 2637 0x331c0, 0x331c0, 2638 0x331c8, 0x331d0, 2639 0x331d8, 0x331e0, 2640 0x331ec, 0x33290, 2641 0x33298, 0x332c4, 2642 0x332e4, 0x33390, 2643 0x33398, 0x333c4, 2644 0x333e4, 0x3342c, 2645 0x33434, 0x33450, 2646 0x33458, 0x33458, 2647 0x33460, 0x3348c, 2648 0x3349c, 0x334ac, 2649 0x334c0, 0x334c0, 2650 0x334c8, 0x334d0, 2651 0x334d8, 0x334e0, 2652 0x334ec, 0x3352c, 2653 0x33534, 0x33550, 2654 0x33558, 0x33558, 2655 0x33560, 0x3358c, 2656 0x3359c, 0x335ac, 2657 0x335c0, 0x335c0, 2658 0x335c8, 0x335d0, 2659 0x335d8, 0x335e0, 2660 0x335ec, 0x33690, 2661 0x33698, 0x336c4, 2662 0x336e4, 0x33790, 2663 0x33798, 0x337c4, 2664 0x337e4, 0x337fc, 2665 0x33814, 0x33814, 2666 0x33854, 0x33868, 2667 0x33880, 0x3388c, 2668 0x338c0, 0x338d0, 2669 0x338e8, 0x338ec, 2670 0x33900, 0x3392c, 2671 0x33934, 0x33950, 2672 0x33958, 0x33958, 2673 0x33960, 0x3398c, 2674 0x3399c, 0x339ac, 2675 0x339c0, 0x339c0, 2676 0x339c8, 0x339d0, 2677 0x339d8, 0x339e0, 2678 0x339ec, 0x33a90, 2679 0x33a98, 0x33ac4, 2680 0x33ae4, 0x33b10, 2681 0x33b24, 0x33b28, 2682 0x33b38, 0x33b50, 2683 0x33bf0, 0x33c10, 2684 0x33c24, 0x33c28, 2685 0x33c38, 0x33c50, 2686 0x33cf0, 0x33cfc, 2687 0x34000, 0x34030, 2688 0x34100, 0x34168, 2689 0x34190, 0x341a0, 2690 0x341a8, 0x341b8, 2691 0x341c4, 0x341c8, 2692 0x341d0, 0x341d0, 2693 0x34200, 0x34320, 2694 0x34400, 0x344b4, 2695 0x344c0, 0x3452c, 2696 0x34540, 0x3461c, 2697 0x34800, 0x348a0, 2698 0x348c0, 0x34908, 2699 0x34910, 0x349b8, 2700 0x34a00, 0x34a04, 2701 0x34a0c, 0x34a14, 2702 0x34a1c, 0x34a2c, 2703 0x34a44, 0x34a50, 2704 0x34a74, 0x34a74, 2705 0x34a7c, 0x34afc, 2706 0x34b08, 0x34c24, 2707 0x34d00, 0x34d14, 2708 0x34d1c, 0x34d3c, 2709 0x34d44, 0x34d4c, 2710 0x34d54, 0x34d74, 2711 0x34d7c, 0x34d7c, 2712 0x34de0, 0x34de0, 2713 0x34e00, 0x34ed4, 2714 0x34f00, 0x34fa4, 2715 0x34fc0, 0x34fc4, 2716 0x35000, 0x35004, 2717 0x35080, 0x350fc, 2718 0x35208, 0x35220, 2719 0x3523c, 0x35254, 2720 0x35300, 0x35300, 2721 0x35308, 0x3531c, 2722 0x35338, 0x3533c, 2723 0x35380, 0x35380, 2724 0x35388, 0x353a8, 2725 0x353b4, 0x353b4, 2726 0x35400, 0x35420, 2727 0x35438, 0x3543c, 2728 0x35480, 0x35480, 2729 0x354a8, 0x354a8, 2730 0x354b0, 0x354b4, 2731 0x354c8, 0x354d4, 2732 0x35a40, 0x35a4c, 2733 0x35af0, 0x35b20, 2734 0x35b38, 0x35b3c, 2735 0x35b80, 0x35b80, 2736 0x35ba8, 0x35ba8, 2737 0x35bb0, 0x35bb4, 2738 0x35bc8, 0x35bd4, 2739 0x36140, 0x3618c, 2740 0x361f0, 0x361f4, 2741 0x36200, 0x36200, 2742 0x36218, 0x36218, 2743 0x36400, 0x36400, 2744 0x36408, 0x3641c, 2745 0x36618, 0x36620, 2746 0x36664, 0x36664, 2747 0x366a8, 0x366a8, 2748 0x366ec, 0x366ec, 2749 0x36a00, 0x36abc, 2750 0x36b00, 0x36b18, 2751 0x36b20, 0x36b38, 2752 0x36b40, 0x36b58, 2753 0x36b60, 0x36b78, 2754 0x36c00, 0x36c00, 2755 0x36c08, 0x36c3c, 2756 0x37000, 0x3702c, 2757 0x37034, 0x37050, 2758 0x37058, 0x37058, 2759 0x37060, 0x3708c, 2760 0x3709c, 0x370ac, 2761 0x370c0, 0x370c0, 2762 0x370c8, 0x370d0, 2763 0x370d8, 0x370e0, 2764 0x370ec, 0x3712c, 2765 0x37134, 0x37150, 2766 0x37158, 0x37158, 2767 0x37160, 0x3718c, 2768 0x3719c, 0x371ac, 2769 0x371c0, 0x371c0, 2770 0x371c8, 0x371d0, 2771 0x371d8, 0x371e0, 2772 0x371ec, 0x37290, 2773 0x37298, 0x372c4, 2774 0x372e4, 0x37390, 2775 0x37398, 0x373c4, 2776 0x373e4, 0x3742c, 2777 0x37434, 0x37450, 2778 0x37458, 0x37458, 2779 0x37460, 0x3748c, 2780 0x3749c, 0x374ac, 2781 0x374c0, 0x374c0, 2782 0x374c8, 0x374d0, 2783 0x374d8, 0x374e0, 2784 0x374ec, 0x3752c, 2785 0x37534, 0x37550, 2786 0x37558, 0x37558, 2787 0x37560, 0x3758c, 2788 0x3759c, 0x375ac, 2789 0x375c0, 0x375c0, 2790 0x375c8, 0x375d0, 2791 0x375d8, 0x375e0, 2792 0x375ec, 0x37690, 2793 0x37698, 0x376c4, 2794 0x376e4, 0x37790, 2795 0x37798, 0x377c4, 2796 0x377e4, 0x377fc, 2797 0x37814, 0x37814, 2798 0x37854, 0x37868, 2799 0x37880, 0x3788c, 2800 0x378c0, 0x378d0, 2801 0x378e8, 0x378ec, 2802 0x37900, 0x3792c, 2803 0x37934, 0x37950, 2804 0x37958, 0x37958, 2805 0x37960, 0x3798c, 2806 0x3799c, 0x379ac, 2807 0x379c0, 0x379c0, 2808 0x379c8, 0x379d0, 2809 0x379d8, 0x379e0, 2810 0x379ec, 0x37a90, 2811 0x37a98, 0x37ac4, 2812 0x37ae4, 0x37b10, 2813 0x37b24, 0x37b28, 2814 0x37b38, 0x37b50, 2815 0x37bf0, 0x37c10, 2816 0x37c24, 0x37c28, 2817 0x37c38, 0x37c50, 2818 0x37cf0, 0x37cfc, 2819 0x40040, 0x40040, 2820 0x40080, 0x40084, 2821 0x40100, 0x40100, 2822 0x40140, 0x401bc, 2823 0x40200, 0x40214, 2824 0x40228, 0x40228, 2825 0x40240, 0x40258, 2826 0x40280, 0x40280, 2827 0x40304, 0x40304, 2828 0x40330, 0x4033c, 2829 0x41304, 0x413c8, 2830 0x413d0, 0x413dc, 2831 0x413f0, 0x413f0, 2832 0x41400, 0x4140c, 2833 0x41414, 0x4141c, 2834 0x41480, 0x414d0, 2835 0x44000, 0x4407c, 2836 0x440c0, 0x441ac, 2837 0x441b4, 0x4427c, 2838 0x442c0, 0x443ac, 2839 0x443b4, 0x4447c, 2840 0x444c0, 0x445ac, 2841 0x445b4, 0x4467c, 2842 0x446c0, 0x447ac, 2843 0x447b4, 0x4487c, 2844 0x448c0, 0x449ac, 2845 0x449b4, 0x44a7c, 2846 0x44ac0, 0x44bac, 2847 0x44bb4, 0x44c7c, 2848 0x44cc0, 0x44dac, 2849 0x44db4, 0x44e7c, 2850 0x44ec0, 0x44fac, 2851 0x44fb4, 0x4507c, 2852 0x450c0, 0x451ac, 2853 0x451b4, 0x451fc, 2854 0x45800, 0x45804, 2855 0x45810, 0x45830, 2856 0x45840, 0x45860, 2857 0x45868, 0x45868, 2858 0x45880, 0x45884, 2859 0x458a0, 0x458b0, 2860 0x45a00, 0x45a04, 2861 0x45a10, 0x45a30, 2862 0x45a40, 0x45a60, 2863 0x45a68, 0x45a68, 2864 0x45a80, 0x45a84, 2865 0x45aa0, 0x45ab0, 2866 0x460c0, 0x460e4, 2867 0x47000, 0x4703c, 2868 0x47044, 0x4708c, 2869 0x47200, 0x47250, 2870 0x47400, 0x47408, 2871 0x47414, 0x47420, 2872 0x47600, 0x47618, 2873 0x47800, 0x47814, 2874 0x47820, 0x4782c, 2875 0x50000, 0x50084, 2876 0x50090, 0x500cc, 2877 0x50300, 0x50384, 2878 0x50400, 0x50400, 2879 0x50800, 0x50884, 2880 0x50890, 0x508cc, 2881 0x50b00, 0x50b84, 2882 0x50c00, 0x50c00, 2883 0x51000, 0x51020, 2884 0x51028, 0x510b0, 2885 0x51300, 0x51324, 2886 }; 2887 2888 u32 *buf_end = (u32 *)((char *)buf + buf_size); 2889 const unsigned int *reg_ranges; 2890 int reg_ranges_size, range; 2891 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); 2892 2893 /* Select the right set of register ranges to dump depending on the 2894 * adapter chip type. 2895 */ 2896 switch (chip_version) { 2897 case CHELSIO_T4: 2898 reg_ranges = t4_reg_ranges; 2899 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); 2900 break; 2901 2902 case CHELSIO_T5: 2903 reg_ranges = t5_reg_ranges; 2904 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); 2905 break; 2906 2907 case CHELSIO_T6: 2908 reg_ranges = t6_reg_ranges; 2909 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); 2910 break; 2911 2912 default: 2913 CH_ERR(adap, 2914 "Unsupported chip version %d\n", chip_version); 2915 return; 2916 } 2917 2918 /* Clear the register buffer and insert the appropriate register 2919 * values selected by the above register ranges. 2920 */ 2921 memset(buf, 0, buf_size); 2922 for (range = 0; range < reg_ranges_size; range += 2) { 2923 unsigned int reg = reg_ranges[range]; 2924 unsigned int last_reg = reg_ranges[range + 1]; 2925 u32 *bufp = (u32 *)((char *)buf + reg); 2926 2927 /* Iterate across the register range filling in the register 2928 * buffer but don't write past the end of the register buffer. 2929 */ 2930 while (reg <= last_reg && bufp < buf_end) { 2931 *bufp++ = t4_read_reg(adap, reg); 2932 reg += sizeof(u32); 2933 } 2934 } 2935 } 2936 2937 /* 2938 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 2939 */ 2940 #define EEPROM_DELAY 10 // 10us per poll spin 2941 #define EEPROM_MAX_POLL 5000 // x 5000 == 50ms 2942 2943 #define EEPROM_STAT_ADDR 0x7bfc 2944 #define VPD_SIZE 0x800 2945 #define VPD_BASE 0x400 2946 #define VPD_BASE_OLD 0 2947 #define VPD_LEN 1024 2948 #define VPD_INFO_FLD_HDR_SIZE 3 2949 #define CHELSIO_VPD_UNIQUE_ID 0x82 2950 2951 /* 2952 * Small utility function to wait till any outstanding VPD Access is complete. 2953 * We have a per-adapter state variable "VPD Busy" to indicate when we have a 2954 * VPD Access in flight. This allows us to handle the problem of having a 2955 * previous VPD Access time out and prevent an attempt to inject a new VPD 2956 * Request before any in-flight VPD reguest has completed. 2957 */ 2958 static int t4_seeprom_wait(struct adapter *adapter) 2959 { 2960 unsigned int base = adapter->params.pci.vpd_cap_addr; 2961 int max_poll; 2962 2963 /* 2964 * If no VPD Access is in flight, we can just return success right 2965 * away. 2966 */ 2967 if (!adapter->vpd_busy) 2968 return 0; 2969 2970 /* 2971 * Poll the VPD Capability Address/Flag register waiting for it 2972 * to indicate that the operation is complete. 2973 */ 2974 max_poll = EEPROM_MAX_POLL; 2975 do { 2976 u16 val; 2977 2978 udelay(EEPROM_DELAY); 2979 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 2980 2981 /* 2982 * If the operation is complete, mark the VPD as no longer 2983 * busy and return success. 2984 */ 2985 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { 2986 adapter->vpd_busy = 0; 2987 return 0; 2988 } 2989 } while (--max_poll); 2990 2991 /* 2992 * Failure! Note that we leave the VPD Busy status set in order to 2993 * avoid pushing a new VPD Access request into the VPD Capability till 2994 * the current operation eventually succeeds. It's a bug to issue a 2995 * new request when an existing request is in flight and will result 2996 * in corrupt hardware state. 2997 */ 2998 return -ETIMEDOUT; 2999 } 3000 3001 /** 3002 * t4_seeprom_read - read a serial EEPROM location 3003 * @adapter: adapter to read 3004 * @addr: EEPROM virtual address 3005 * @data: where to store the read data 3006 * 3007 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 3008 * VPD capability. Note that this function must be called with a virtual 3009 * address. 3010 */ 3011 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 3012 { 3013 unsigned int base = adapter->params.pci.vpd_cap_addr; 3014 int ret; 3015 3016 /* 3017 * VPD Accesses must alway be 4-byte aligned! 3018 */ 3019 if (addr >= EEPROMVSIZE || (addr & 3)) 3020 return -EINVAL; 3021 3022 /* 3023 * Wait for any previous operation which may still be in flight to 3024 * complete. 3025 */ 3026 ret = t4_seeprom_wait(adapter); 3027 if (ret) { 3028 CH_ERR(adapter, "VPD still busy from previous operation\n"); 3029 return ret; 3030 } 3031 3032 /* 3033 * Issue our new VPD Read request, mark the VPD as being busy and wait 3034 * for our request to complete. If it doesn't complete, note the 3035 * error and return it to our caller. Note that we do not reset the 3036 * VPD Busy status! 3037 */ 3038 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 3039 adapter->vpd_busy = 1; 3040 adapter->vpd_flag = PCI_VPD_ADDR_F; 3041 ret = t4_seeprom_wait(adapter); 3042 if (ret) { 3043 CH_ERR(adapter, "VPD read of address %#x failed\n", addr); 3044 return ret; 3045 } 3046 3047 /* 3048 * Grab the returned data, swizzle it into our endianess and 3049 * return success. 3050 */ 3051 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 3052 *data = le32_to_cpu(*data); 3053 return 0; 3054 } 3055 3056 /** 3057 * t4_seeprom_write - write a serial EEPROM location 3058 * @adapter: adapter to write 3059 * @addr: virtual EEPROM address 3060 * @data: value to write 3061 * 3062 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 3063 * VPD capability. Note that this function must be called with a virtual 3064 * address. 3065 */ 3066 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 3067 { 3068 unsigned int base = adapter->params.pci.vpd_cap_addr; 3069 int ret; 3070 u32 stats_reg; 3071 int max_poll; 3072 3073 /* 3074 * VPD Accesses must alway be 4-byte aligned! 3075 */ 3076 if (addr >= EEPROMVSIZE || (addr & 3)) 3077 return -EINVAL; 3078 3079 /* 3080 * Wait for any previous operation which may still be in flight to 3081 * complete. 3082 */ 3083 ret = t4_seeprom_wait(adapter); 3084 if (ret) { 3085 CH_ERR(adapter, "VPD still busy from previous operation\n"); 3086 return ret; 3087 } 3088 3089 /* 3090 * Issue our new VPD Read request, mark the VPD as being busy and wait 3091 * for our request to complete. If it doesn't complete, note the 3092 * error and return it to our caller. Note that we do not reset the 3093 * VPD Busy status! 3094 */ 3095 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 3096 cpu_to_le32(data)); 3097 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 3098 (u16)addr | PCI_VPD_ADDR_F); 3099 adapter->vpd_busy = 1; 3100 adapter->vpd_flag = 0; 3101 ret = t4_seeprom_wait(adapter); 3102 if (ret) { 3103 CH_ERR(adapter, "VPD write of address %#x failed\n", addr); 3104 return ret; 3105 } 3106 3107 /* 3108 * Reset PCI_VPD_DATA register after a transaction and wait for our 3109 * request to complete. If it doesn't complete, return error. 3110 */ 3111 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); 3112 max_poll = EEPROM_MAX_POLL; 3113 do { 3114 udelay(EEPROM_DELAY); 3115 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); 3116 } while ((stats_reg & 0x1) && --max_poll); 3117 if (!max_poll) 3118 return -ETIMEDOUT; 3119 3120 /* Return success! */ 3121 return 0; 3122 } 3123 3124 /** 3125 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 3126 * @phys_addr: the physical EEPROM address 3127 * @fn: the PCI function number 3128 * @sz: size of function-specific area 3129 * 3130 * Translate a physical EEPROM address to virtual. The first 1K is 3131 * accessed through virtual addresses starting at 31K, the rest is 3132 * accessed through virtual addresses starting at 0. 3133 * 3134 * The mapping is as follows: 3135 * [0..1K) -> [31K..32K) 3136 * [1K..1K+A) -> [ES-A..ES) 3137 * [1K+A..ES) -> [0..ES-A-1K) 3138 * 3139 * where A = @fn * @sz, and ES = EEPROM size. 3140 */ 3141 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 3142 { 3143 fn *= sz; 3144 if (phys_addr < 1024) 3145 return phys_addr + (31 << 10); 3146 if (phys_addr < 1024 + fn) 3147 return EEPROMSIZE - fn + phys_addr - 1024; 3148 if (phys_addr < EEPROMSIZE) 3149 return phys_addr - 1024 - fn; 3150 return -EINVAL; 3151 } 3152 3153 /** 3154 * t4_seeprom_wp - enable/disable EEPROM write protection 3155 * @adapter: the adapter 3156 * @enable: whether to enable or disable write protection 3157 * 3158 * Enables or disables write protection on the serial EEPROM. 3159 */ 3160 int t4_seeprom_wp(struct adapter *adapter, int enable) 3161 { 3162 return t4_os_pci_write_seeprom(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 3163 } 3164 3165 /** 3166 * get_vpd_keyword_val - Locates an information field keyword in the VPD 3167 * @v: Pointer to buffered vpd data structure 3168 * @kw: The keyword to search for 3169 * 3170 * Returns the value of the information field keyword or 3171 * -ENOENT otherwise. 3172 */ 3173 int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 3174 { 3175 int i; 3176 unsigned int offset , len; 3177 const u8 *buf = (const u8 *)v; 3178 const u8 *vpdr_len = &v->vpdr_len[0]; 3179 offset = sizeof(struct t4_vpd_hdr); 3180 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8); 3181 3182 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 3183 return -ENOENT; 3184 } 3185 3186 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 3187 if(memcmp(buf + i , kw , 2) == 0){ 3188 i += VPD_INFO_FLD_HDR_SIZE; 3189 return i; 3190 } 3191 3192 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 3193 } 3194 3195 return -ENOENT; 3196 } 3197 3198 /* 3199 * str_strip 3200 * Removes trailing whitespaces from string "s" 3201 * Based on strstrip() implementation in string.c 3202 */ 3203 static void str_strip(char *s) 3204 { 3205 size_t size; 3206 char *end; 3207 3208 size = strlen(s); 3209 if (!size) 3210 return; 3211 3212 end = s + size - 1; 3213 while (end >= s && isspace(*end)) 3214 end--; 3215 *(end + 1) = '\0'; 3216 } 3217 3218 /** 3219 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM 3220 * @adapter: adapter to read 3221 * @p: where to store the parameters 3222 * 3223 * Reads card parameters stored in VPD EEPROM. 3224 */ 3225 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) 3226 { 3227 int i, ret = 0, addr; 3228 int ec, sn, pn, na; 3229 u8 *vpd, csum; 3230 const struct t4_vpd_hdr *v; 3231 3232 vpd = (u8 *)t4_os_alloc(sizeof(u8) * VPD_LEN); 3233 if (!vpd) 3234 return -ENOMEM; 3235 3236 /* We have two VPD data structures stored in the adapter VPD area. 3237 * By default, Linux calculates the size of the VPD area by traversing 3238 * the first VPD area at offset 0x0, so we need to tell the OS what 3239 * our real VPD size is. 3240 */ 3241 ret = t4_os_pci_set_vpd_size(adapter, VPD_SIZE); 3242 if (ret < 0) 3243 goto out; 3244 3245 /* Card information normally starts at VPD_BASE but early cards had 3246 * it at 0. 3247 */ 3248 ret = t4_os_pci_read_seeprom(adapter, VPD_BASE, (u32 *)(vpd)); 3249 if (ret) 3250 goto out; 3251 3252 /* The VPD shall have a unique identifier specified by the PCI SIG. 3253 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 3254 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 3255 * is expected to automatically put this entry at the 3256 * beginning of the VPD. 3257 */ 3258 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 3259 3260 for (i = 0; i < VPD_LEN; i += 4) { 3261 ret = t4_os_pci_read_seeprom(adapter, addr+i, (u32 *)(vpd+i)); 3262 if (ret) 3263 goto out; 3264 } 3265 v = (const struct t4_vpd_hdr *)vpd; 3266 3267 #define FIND_VPD_KW(var,name) do { \ 3268 var = get_vpd_keyword_val(v , name); \ 3269 if (var < 0) { \ 3270 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 3271 ret = -EINVAL; \ 3272 goto out; \ 3273 } \ 3274 } while (0) 3275 3276 FIND_VPD_KW(i, "RV"); 3277 for (csum = 0; i >= 0; i--) 3278 csum += vpd[i]; 3279 3280 if (csum) { 3281 CH_ERR(adapter, 3282 "corrupted VPD EEPROM, actual csum %u\n", csum); 3283 ret = -EINVAL; 3284 goto out; 3285 } 3286 3287 FIND_VPD_KW(ec, "EC"); 3288 FIND_VPD_KW(sn, "SN"); 3289 FIND_VPD_KW(pn, "PN"); 3290 FIND_VPD_KW(na, "NA"); 3291 #undef FIND_VPD_KW 3292 3293 memcpy(p->id, v->id_data, ID_LEN); 3294 str_strip((char *)p->id); 3295 memcpy(p->ec, vpd + ec, EC_LEN); 3296 str_strip((char *)p->ec); 3297 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 3298 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 3299 str_strip((char *)p->sn); 3300 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 3301 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 3302 str_strip((char *)p->pn); 3303 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 3304 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 3305 str_strip((char *)p->na); 3306 3307 out: 3308 kmem_free(vpd, sizeof(u8) * VPD_LEN); 3309 return ret < 0 ? ret : 0; 3310 } 3311 3312 /** 3313 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock 3314 * @adapter: adapter to read 3315 * @p: where to store the parameters 3316 * 3317 * Reads card parameters stored in VPD EEPROM and retrieves the Core 3318 * Clock. This can only be called after a connection to the firmware 3319 * is established. 3320 */ 3321 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p) 3322 { 3323 u32 cclk_param, cclk_val; 3324 int ret; 3325 3326 /* 3327 * Grab the raw VPD parameters. 3328 */ 3329 ret = t4_get_raw_vpd_params(adapter, p); 3330 if (ret) 3331 return ret; 3332 3333 /* 3334 * Ask firmware for the Core Clock since it knows how to translate the 3335 * Reference Clock ('V2') VPD field into a Core Clock value ... 3336 */ 3337 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3338 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 3339 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3340 1, &cclk_param, &cclk_val); 3341 3342 if (ret) 3343 return ret; 3344 p->cclk = cclk_val; 3345 3346 return 0; 3347 } 3348 3349 /** 3350 * t4_get_pfres - retrieve VF resource limits 3351 * @adapter: the adapter 3352 * 3353 * Retrieves configured resource limits and capabilities for a physical 3354 * function. The results are stored in @adapter->pfres. 3355 */ 3356 int t4_get_pfres(struct adapter *adapter) 3357 { 3358 struct pf_resources *pfres = &adapter->params.pfres; 3359 struct fw_pfvf_cmd cmd, rpl; 3360 int v; 3361 u32 word; 3362 3363 /* 3364 * Execute PFVF Read command to get VF resource limits; bail out early 3365 * with error on command failure. 3366 */ 3367 memset(&cmd, 0, sizeof(cmd)); 3368 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | 3369 F_FW_CMD_REQUEST | 3370 F_FW_CMD_READ | 3371 V_FW_PFVF_CMD_PFN(adapter->pf) | 3372 V_FW_PFVF_CMD_VFN(0)); 3373 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 3374 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl); 3375 if (v != FW_SUCCESS) 3376 return v; 3377 3378 /* 3379 * Extract PF resource limits and return success. 3380 */ 3381 word = be32_to_cpu(rpl.niqflint_niq); 3382 pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word); 3383 pfres->niq = G_FW_PFVF_CMD_NIQ(word); 3384 3385 word = be32_to_cpu(rpl.type_to_neq); 3386 pfres->neq = G_FW_PFVF_CMD_NEQ(word); 3387 pfres->pmask = G_FW_PFVF_CMD_PMASK(word); 3388 3389 word = be32_to_cpu(rpl.tc_to_nexactf); 3390 pfres->tc = G_FW_PFVF_CMD_TC(word); 3391 pfres->nvi = G_FW_PFVF_CMD_NVI(word); 3392 pfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word); 3393 3394 word = be32_to_cpu(rpl.r_caps_to_nethctrl); 3395 pfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word); 3396 pfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word); 3397 pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word); 3398 3399 return 0; 3400 } 3401 3402 /* serial flash and firmware constants and flash config file constants */ 3403 enum { 3404 SF_ATTEMPTS = 10, /* max retries for SF operations */ 3405 3406 /* flash command opcodes */ 3407 SF_PROG_PAGE = 2, /* program page */ 3408 SF_WR_DISABLE = 4, /* disable writes */ 3409 SF_RD_STATUS = 5, /* read status register */ 3410 SF_WR_ENABLE = 6, /* enable writes */ 3411 SF_RD_DATA_FAST = 0xb, /* read flash */ 3412 SF_RD_ID = 0x9f, /* read ID */ 3413 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 3414 }; 3415 3416 /** 3417 * sf1_read - read data from the serial flash 3418 * @adapter: the adapter 3419 * @byte_cnt: number of bytes to read 3420 * @cont: whether another operation will be chained 3421 * @lock: whether to lock SF for PL access only 3422 * @valp: where to store the read data 3423 * 3424 * Reads up to 4 bytes of data from the serial flash. The location of 3425 * the read needs to be specified prior to calling this by issuing the 3426 * appropriate commands to the serial flash. 3427 */ 3428 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 3429 int lock, u32 *valp) 3430 { 3431 int ret; 3432 3433 if (!byte_cnt || byte_cnt > 4) 3434 return -EINVAL; 3435 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3436 return -EBUSY; 3437 t4_write_reg(adapter, A_SF_OP, 3438 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 3439 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3440 if (!ret) 3441 *valp = t4_read_reg(adapter, A_SF_DATA); 3442 return ret; 3443 } 3444 3445 /** 3446 * sf1_write - write data to the serial flash 3447 * @adapter: the adapter 3448 * @byte_cnt: number of bytes to write 3449 * @cont: whether another operation will be chained 3450 * @lock: whether to lock SF for PL access only 3451 * @val: value to write 3452 * 3453 * Writes up to 4 bytes of data to the serial flash. The location of 3454 * the write needs to be specified prior to calling this by issuing the 3455 * appropriate commands to the serial flash. 3456 */ 3457 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 3458 int lock, u32 val) 3459 { 3460 if (!byte_cnt || byte_cnt > 4) 3461 return -EINVAL; 3462 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3463 return -EBUSY; 3464 t4_write_reg(adapter, A_SF_DATA, val); 3465 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 3466 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 3467 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3468 } 3469 3470 /** 3471 * flash_wait_op - wait for a flash operation to complete 3472 * @adapter: the adapter 3473 * @attempts: max number of polls of the status register 3474 * @delay: delay between polls in ms 3475 * 3476 * Wait for a flash operation to complete by polling the status register. 3477 */ 3478 static int flash_wait_op(struct adapter *adapter, int attempts, int ch_delay) 3479 { 3480 int ret; 3481 u32 status; 3482 3483 while (1) { 3484 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 3485 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 3486 return ret; 3487 if (!(status & 1)) 3488 return 0; 3489 if (--attempts == 0) 3490 return -EAGAIN; 3491 if (ch_delay) { 3492 #ifdef CONFIG_CUDBG 3493 if (adapter->flags & K_CRASH) 3494 mdelay(ch_delay); 3495 else 3496 #endif 3497 msleep(ch_delay); 3498 } 3499 } 3500 } 3501 3502 /** 3503 * t4_read_flash - read words from serial flash 3504 * @adapter: the adapter 3505 * @addr: the start address for the read 3506 * @nwords: how many 32-bit words to read 3507 * @data: where to store the read data 3508 * @byte_oriented: whether to store data as bytes or as words 3509 * 3510 * Read the specified number of 32-bit words from the serial flash. 3511 * If @byte_oriented is set the read data is stored as a byte array 3512 * (i.e., big-endian), otherwise as 32-bit words in the platform's 3513 * natural endianness. 3514 */ 3515 int t4_read_flash(struct adapter *adapter, unsigned int addr, 3516 unsigned int nwords, u32 *data, int byte_oriented) 3517 { 3518 int ret; 3519 3520 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 3521 return -EINVAL; 3522 3523 addr = swab32(addr) | SF_RD_DATA_FAST; 3524 3525 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 3526 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 3527 return ret; 3528 3529 for ( ; nwords; nwords--, data++) { 3530 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 3531 if (nwords == 1) 3532 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3533 if (ret) 3534 return ret; 3535 if (byte_oriented) 3536 *data = (__force __u32)(cpu_to_be32(*data)); 3537 } 3538 return 0; 3539 } 3540 3541 /** 3542 * t4_write_flash - write up to a page of data to the serial flash 3543 * @adapter: the adapter 3544 * @addr: the start address to write 3545 * @n: length of data to write in bytes 3546 * @data: the data to write 3547 * @byte_oriented: whether to store data as bytes or as words 3548 * 3549 * Writes up to a page of data (256 bytes) to the serial flash starting 3550 * at the given address. All the data must be written to the same page. 3551 * If @byte_oriented is set the write data is stored as byte stream 3552 * (i.e. matches what on disk), otherwise in big-endian. 3553 */ 3554 int t4_write_flash(struct adapter *adapter, unsigned int addr, 3555 unsigned int n, const u8 *data, int byte_oriented) 3556 { 3557 int ret; 3558 u32 buf[64]; 3559 unsigned int i, c, left, val, offset = addr & 0xff; 3560 3561 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 3562 return -EINVAL; 3563 3564 val = swab32(addr) | SF_PROG_PAGE; 3565 3566 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3567 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 3568 goto unlock; 3569 3570 for (left = n; left; left -= c) { 3571 c = min(left, 4U); 3572 for (val = 0, i = 0; i < c; ++i) 3573 val = (val << 8) + *data++; 3574 3575 if (!byte_oriented) 3576 val = cpu_to_be32(val); 3577 3578 ret = sf1_write(adapter, c, c != left, 1, val); 3579 if (ret) 3580 goto unlock; 3581 } 3582 ret = flash_wait_op(adapter, 8, 1); 3583 if (ret) 3584 goto unlock; 3585 3586 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3587 3588 /* Read the page to verify the write succeeded */ 3589 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 3590 byte_oriented); 3591 if (ret) 3592 return ret; 3593 3594 if (memcmp(data - n, (u8 *)buf + offset, n)) { 3595 CH_ERR(adapter, 3596 "failed to correctly write the flash page at %#x\n", 3597 addr); 3598 return -EIO; 3599 } 3600 return 0; 3601 3602 unlock: 3603 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3604 return ret; 3605 } 3606 3607 /** 3608 * t4_get_fw_version - read the firmware version 3609 * @adapter: the adapter 3610 * @vers: where to place the version 3611 * 3612 * Reads the FW version from flash. 3613 */ 3614 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 3615 { 3616 return t4_read_flash(adapter, FLASH_FW_START + 3617 offsetof(struct fw_hdr, fw_ver), 1, 3618 vers, 0); 3619 } 3620 3621 /** 3622 * t4_get_bs_version - read the firmware bootstrap version 3623 * @adapter: the adapter 3624 * @vers: where to place the version 3625 * 3626 * Reads the FW Bootstrap version from flash. 3627 */ 3628 int t4_get_bs_version(struct adapter *adapter, u32 *vers) 3629 { 3630 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + 3631 offsetof(struct fw_hdr, fw_ver), 1, 3632 vers, 0); 3633 } 3634 3635 /** 3636 * t4_get_tp_version - read the TP microcode version 3637 * @adapter: the adapter 3638 * @vers: where to place the version 3639 * 3640 * Reads the TP microcode version from flash. 3641 */ 3642 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 3643 { 3644 return t4_read_flash(adapter, FLASH_FW_START + 3645 offsetof(struct fw_hdr, tp_microcode_ver), 3646 1, vers, 0); 3647 } 3648 3649 /** 3650 * t4_get_exprom_version - return the Expansion ROM version (if any) 3651 * @adapter: the adapter 3652 * @vers: where to place the version 3653 * 3654 * Reads the Expansion ROM header from FLASH and returns the version 3655 * number (if present) through the @vers return value pointer. We return 3656 * this in the Firmware Version Format since it's convenient. Return 3657 * 0 on success, -ENOENT if no Expansion ROM is present. 3658 */ 3659 int t4_get_exprom_version(struct adapter *adapter, u32 *vers) 3660 { 3661 struct exprom_header { 3662 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 3663 unsigned char hdr_ver[4]; /* Expansion ROM version */ 3664 } *hdr; 3665 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 3666 sizeof(u32))]; 3667 int ret; 3668 3669 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START, 3670 ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 3671 0); 3672 if (ret) 3673 return ret; 3674 3675 hdr = (struct exprom_header *)exprom_header_buf; 3676 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 3677 return -ENOENT; 3678 3679 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | 3680 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | 3681 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | 3682 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); 3683 return 0; 3684 } 3685 3686 /** 3687 * t4_get_scfg_version - return the Serial Configuration version 3688 * @adapter: the adapter 3689 * @vers: where to place the version 3690 * 3691 * Reads the Serial Configuration Version via the Firmware interface 3692 * (thus this can only be called once we're ready to issue Firmware 3693 * commands). The format of the Serial Configuration version is 3694 * adapter specific. Returns 0 on success, an error on failure. 3695 * 3696 * Note that early versions of the Firmware didn't include the ability 3697 * to retrieve the Serial Configuration version, so we zero-out the 3698 * return-value parameter in that case to avoid leaving it with 3699 * garbage in it. 3700 * 3701 * Also note that the Firmware will return its cached copy of the Serial 3702 * Initialization Revision ID, not the actual Revision ID as written in 3703 * the Serial EEPROM. This is only an issue if a new VPD has been written 3704 * and the Firmware/Chip haven't yet gone through a RESET sequence. So 3705 * it's best to defer calling this routine till after a FW_RESET_CMD has 3706 * been issued if the Host Driver will be performing a full adapter 3707 * initialization. 3708 */ 3709 int t4_get_scfg_version(struct adapter *adapter, u32 *vers) 3710 { 3711 u32 scfgrev_param; 3712 int ret; 3713 3714 scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3715 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV)); 3716 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3717 1, &scfgrev_param, vers); 3718 if (ret) 3719 *vers = 0; 3720 return ret; 3721 } 3722 3723 /** 3724 * t4_get_vpd_version - return the VPD version 3725 * @adapter: the adapter 3726 * @vers: where to place the version 3727 * 3728 * Reads the VPD via the Firmware interface (thus this can only be called 3729 * once we're ready to issue Firmware commands). The format of the 3730 * VPD version is adapter specific. Returns 0 on success, an error on 3731 * failure. 3732 * 3733 * Note that early versions of the Firmware didn't include the ability 3734 * to retrieve the VPD version, so we zero-out the return-value parameter 3735 * in that case to avoid leaving it with garbage in it. 3736 * 3737 * Also note that the Firmware will return its cached copy of the VPD 3738 * Revision ID, not the actual Revision ID as written in the Serial 3739 * EEPROM. This is only an issue if a new VPD has been written and the 3740 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best 3741 * to defer calling this routine till after a FW_RESET_CMD has been issued 3742 * if the Host Driver will be performing a full adapter initialization. 3743 */ 3744 int t4_get_vpd_version(struct adapter *adapter, u32 *vers) 3745 { 3746 u32 vpdrev_param; 3747 int ret; 3748 3749 vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3750 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV)); 3751 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 3752 1, &vpdrev_param, vers); 3753 if (ret) 3754 *vers = 0; 3755 return ret; 3756 } 3757 3758 /** 3759 * t4_get_version_info - extract various chip/firmware version information 3760 * @adapter: the adapter 3761 * 3762 * Reads various chip/firmware version numbers and stores them into the 3763 * adapter Adapter Parameters structure. If any of the efforts fails 3764 * the first failure will be returned, but all of the version numbers 3765 * will be read. 3766 */ 3767 int t4_get_version_info(struct adapter *adapter) 3768 { 3769 int ret = 0; 3770 3771 #define FIRST_RET(__getvinfo) \ 3772 do { \ 3773 int __ret = __getvinfo; \ 3774 if (__ret && !ret) \ 3775 ret = __ret; \ 3776 } while (0) 3777 3778 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); 3779 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); 3780 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); 3781 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); 3782 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); 3783 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); 3784 3785 #undef FIRST_RET 3786 3787 return ret; 3788 } 3789 3790 /** 3791 * t4_dump_version_info - dump all of the adapter configuration IDs 3792 * @adapter: the adapter 3793 * 3794 * Dumps all of the various bits of adapter configuration version/revision 3795 * IDs information. This is typically called at some point after 3796 * t4_get_version_info() has been called. 3797 */ 3798 void t4_dump_version_info(struct adapter *adapter) 3799 { 3800 /* 3801 * Device information. 3802 */ 3803 CH_INFO(adapter, "Chelsio %s rev %d\n", 3804 adapter->params.vpd.id, 3805 CHELSIO_CHIP_RELEASE(adapter->params.chip)); 3806 CH_INFO(adapter, "S/N: %s, P/N: %s\n", 3807 adapter->params.vpd.sn, 3808 adapter->params.vpd.pn); 3809 3810 /* 3811 * Firmware Version. 3812 */ 3813 if (!adapter->params.fw_vers) 3814 CH_WARN(adapter, "No firmware loaded\n"); 3815 else 3816 CH_INFO(adapter, "Firmware version: %u.%u.%u.%u\n", 3817 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers), 3818 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers), 3819 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers), 3820 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers)); 3821 3822 /* 3823 * Bootstrap Firmware Version. (Some adapters don't have Bootstrap 3824 * Firmware, so dev_info() is more appropriate here.) 3825 */ 3826 if (!adapter->params.bs_vers) 3827 CH_INFO(adapter, "No bootstrap loaded\n"); 3828 else 3829 CH_INFO(adapter, "Bootstrap version: %u.%u.%u.%u\n", 3830 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers), 3831 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers), 3832 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers), 3833 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers)); 3834 3835 /* 3836 * TP Microcode Version. 3837 */ 3838 if (!adapter->params.tp_vers) 3839 CH_WARN(adapter, "No TP Microcode loaded\n"); 3840 else 3841 CH_INFO(adapter, "TP Microcode version: %u.%u.%u.%u\n", 3842 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers), 3843 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers), 3844 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers), 3845 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers)); 3846 3847 /* 3848 * Expansion ROM version. 3849 */ 3850 if (!adapter->params.er_vers) 3851 CH_INFO(adapter, "No Expansion ROM loaded\n"); 3852 else 3853 CH_INFO(adapter, "Expansion ROM version: %u.%u.%u.%u\n", 3854 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers), 3855 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers), 3856 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers), 3857 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers)); 3858 3859 3860 /* 3861 * Serial Configuration version. 3862 */ 3863 CH_INFO(adapter, "Serial Configuration version: %x\n", 3864 adapter->params.scfg_vers); 3865 3866 /* 3867 * VPD version. 3868 */ 3869 CH_INFO(adapter, "VPD version: %x\n", 3870 adapter->params.vpd_vers); 3871 } 3872 3873 /** 3874 * t4_check_fw_version - check if the FW is supported with this driver 3875 * @adap: the adapter 3876 * 3877 * Checks if an adapter's FW is compatible with the driver. Returns 0 3878 * if there's exact match, a negative error if the version could not be 3879 * read or there's a major version mismatch 3880 */ 3881 int t4_check_fw_version(struct adapter *adap) 3882 { 3883 int ret, major, minor, micro; 3884 int exp_major, exp_minor, exp_micro; 3885 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); 3886 3887 ret = t4_get_fw_version(adap, &adap->params.fw_vers); 3888 if (ret) 3889 return ret; 3890 3891 major = G_FW_HDR_FW_VER_MAJOR(adap->params.fw_vers); 3892 minor = G_FW_HDR_FW_VER_MINOR(adap->params.fw_vers); 3893 micro = G_FW_HDR_FW_VER_MICRO(adap->params.fw_vers); 3894 3895 switch (chip_version) { 3896 case CHELSIO_T4: 3897 exp_major = T4FW_MIN_VERSION_MAJOR; 3898 exp_minor = T4FW_MIN_VERSION_MINOR; 3899 exp_micro = T4FW_MIN_VERSION_MICRO; 3900 break; 3901 case CHELSIO_T5: 3902 exp_major = T5FW_MIN_VERSION_MAJOR; 3903 exp_minor = T5FW_MIN_VERSION_MINOR; 3904 exp_micro = T5FW_MIN_VERSION_MICRO; 3905 break; 3906 case CHELSIO_T6: 3907 exp_major = T6FW_MIN_VERSION_MAJOR; 3908 exp_minor = T6FW_MIN_VERSION_MINOR; 3909 exp_micro = T6FW_MIN_VERSION_MICRO; 3910 break; 3911 default: 3912 CH_ERR(adap, "Unsupported chip type, %x\n", 3913 adap->params.chip); 3914 return -EINVAL; 3915 } 3916 3917 if (major < exp_major || (major == exp_major && minor < exp_minor) || 3918 (major == exp_major && minor == exp_minor && micro < exp_micro)) { 3919 CH_ERR(adap, "Card has firmware version %u.%u.%u, minimum " 3920 "supported firmware is %u.%u.%u.\n", major, minor, 3921 micro, exp_major, exp_minor, exp_micro); 3922 return -EFAULT; 3923 } 3924 return 0; 3925 } 3926 3927 /* Is the given firmware API compatible with the one the driver was compiled 3928 * with? 3929 */ 3930 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 3931 { 3932 3933 /* short circuit if it's the exact same firmware version */ 3934 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 3935 return 1; 3936 3937 /* 3938 * XXX: Is this too conservative? Perhaps I should limit this to the 3939 * features that are supported in the driver. 3940 */ 3941 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 3942 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 3943 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 3944 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 3945 return 1; 3946 #undef SAME_INTF 3947 3948 return 0; 3949 } 3950 3951 /* The firmware in the filesystem is usable, but should it be installed? 3952 * This routine explains itself in detail if it indicates the filesystem 3953 * firmware should be installed. 3954 */ 3955 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, 3956 int k, int c, int t4_fw_install) 3957 { 3958 const char *reason; 3959 3960 if (!card_fw_usable) { 3961 reason = "incompatible or unusable"; 3962 goto install; 3963 } 3964 3965 if (k > c) { 3966 reason = "older than the version bundled with this driver"; 3967 goto install; 3968 } 3969 3970 if (t4_fw_install == 2 && k != c) { 3971 reason = "different than the version bundled with this driver"; 3972 goto install; 3973 } 3974 3975 return 0; 3976 3977 install: 3978 if (t4_fw_install == 0) { 3979 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, " 3980 "but the driver is prohibited from installing a " 3981 "different firmware on the card.\n", 3982 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3983 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 3984 reason); 3985 3986 return (0); 3987 } 3988 3989 CH_ERR(adap, "firmware on card (%u.%u.%u.%u) is %s, " 3990 "installing firmware %u.%u.%u.%u on card.\n", 3991 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3992 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 3993 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3994 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 3995 3996 return 1; 3997 } 3998 3999 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 4000 const u8 *fw_data, unsigned int fw_size, 4001 struct fw_hdr *card_fw, const int t4_fw_install, 4002 enum dev_state state, int *reset) 4003 { 4004 int ret, card_fw_usable, fs_fw_usable; 4005 const struct fw_hdr *fs_fw; 4006 const struct fw_hdr *drv_fw; 4007 4008 drv_fw = &fw_info->fw_hdr; 4009 4010 /* Read the header of the firmware on the card */ 4011 ret = -t4_read_flash(adap, FLASH_FW_START, 4012 sizeof(*card_fw) / sizeof(uint32_t), 4013 (uint32_t *)card_fw, 1); 4014 if (ret == 0) { 4015 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 4016 } else { 4017 CH_ERR(adap, 4018 "Unable to read card's firmware header: %d\n", ret); 4019 card_fw_usable = 0; 4020 } 4021 4022 if (fw_data != NULL) { 4023 fs_fw = (const void *)fw_data; 4024 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 4025 } else { 4026 fs_fw = NULL; 4027 fs_fw_usable = 0; 4028 } 4029 4030 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 4031 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 4032 /* Common case: the firmware on the card is an exact match and 4033 * the filesystem one is an exact match too, or the filesystem 4034 * one is absent/incompatible. Note that t4_fw_install = 2 4035 * is ignored here -- use cxgbtool loadfw if you want to 4036 * reinstall the same firmware as the one on the card. 4037 */ 4038 } else if (fs_fw_usable && state == DEV_STATE_UNINIT && 4039 should_install_fs_fw(adap, card_fw_usable, 4040 be32_to_cpu(fs_fw->fw_ver), 4041 be32_to_cpu(card_fw->fw_ver), 4042 t4_fw_install)) { 4043 4044 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, 4045 fw_size, 0); 4046 if (ret != 0) { 4047 CH_ERR(adap, 4048 "failed to install firmware: %d\n", ret); 4049 goto bye; 4050 } 4051 4052 /* Installed successfully, update cached information */ 4053 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 4054 (void)t4_init_devlog_params(adap, 1); 4055 card_fw_usable = 1; 4056 *reset = 0; /* already reset as part of load_fw */ 4057 } 4058 4059 if (!card_fw_usable) { 4060 uint32_t d, c, k; 4061 4062 d = be32_to_cpu(drv_fw->fw_ver); 4063 c = be32_to_cpu(card_fw->fw_ver); 4064 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 4065 4066 CH_ERR(adap, "Cannot find a usable firmware: " 4067 "fw_install %d, chip state %d, " 4068 "driver compiled with %d.%d.%d.%d, " 4069 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 4070 t4_fw_install, state, 4071 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 4072 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 4073 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 4074 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 4075 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 4076 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 4077 ret = EINVAL; 4078 goto bye; 4079 } 4080 4081 /* We're using whatever's on the card and it's known to be good. */ 4082 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); 4083 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 4084 4085 bye: 4086 return ret; 4087 4088 } 4089 4090 /** 4091 * t4_flash_erase_sectors - erase a range of flash sectors 4092 * @adapter: the adapter 4093 * @start: the first sector to erase 4094 * @end: the last sector to erase 4095 * 4096 * Erases the sectors in the given inclusive range. 4097 */ 4098 int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 4099 { 4100 int ret = 0; 4101 4102 if (end >= adapter->params.sf_nsec) 4103 return -EINVAL; 4104 4105 while (start <= end) { 4106 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 4107 (ret = sf1_write(adapter, 4, 0, 1, 4108 SF_ERASE_SECTOR | (start << 8))) != 0 || 4109 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 4110 CH_ERR(adapter, 4111 "erase of flash sector %d failed, error %d\n", 4112 start, ret); 4113 break; 4114 } 4115 start++; 4116 } 4117 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 4118 return ret; 4119 } 4120 4121 /** 4122 * t4_flash_cfg_addr - return the address of the flash configuration file 4123 * @adapter: the adapter 4124 * 4125 * Return the address within the flash where the Firmware Configuration 4126 * File is stored, or an error if the device FLASH is too small to contain 4127 * a Firmware Configuration File. 4128 */ 4129 int t4_flash_cfg_addr(struct adapter *adapter) 4130 { 4131 /* 4132 * If the device FLASH isn't large enough to hold a Firmware 4133 * Configuration File, return an error. 4134 */ 4135 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 4136 return -ENOSPC; 4137 4138 return FLASH_CFG_START; 4139 } 4140 4141 /* Return TRUE if the specified firmware matches the adapter. I.e. T4 4142 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 4143 * and emit an error message for mismatched firmware to save our caller the 4144 * effort ... 4145 */ 4146 static int t4_fw_matches_chip(const struct adapter *adap, 4147 const struct fw_hdr *hdr) 4148 { 4149 /* 4150 * The expression below will return FALSE for any unsupported adapter 4151 * which will keep us "honest" in the future ... 4152 */ 4153 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || 4154 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) || 4155 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6)) 4156 return 1; 4157 4158 CH_ERR(adap, 4159 "FW image (%d) is not suitable for this adapter (%d)\n", 4160 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip)); 4161 return 0; 4162 } 4163 4164 /** 4165 * t4_load_fw - download firmware 4166 * @adap: the adapter 4167 * @fw_data: the firmware image to write 4168 * @size: image size 4169 * @bootstrap: indicates if the binary is a bootstrap fw 4170 * 4171 * Write the supplied firmware image to the card's serial flash. 4172 */ 4173 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size, 4174 unsigned int bootstrap) 4175 { 4176 u32 csum; 4177 int ret, addr; 4178 unsigned int i; 4179 u8 first_page[SF_PAGE_SIZE]; 4180 const __be32 *p = (const __be32 *)fw_data; 4181 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 4182 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 4183 unsigned int fw_start_sec; 4184 unsigned int fw_start; 4185 unsigned int fw_size; 4186 4187 if (bootstrap) { 4188 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 4189 fw_start = FLASH_FWBOOTSTRAP_START; 4190 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 4191 } else { 4192 fw_start_sec = FLASH_FW_START_SEC; 4193 fw_start = FLASH_FW_START; 4194 fw_size = FLASH_FW_MAX_SIZE; 4195 } 4196 4197 if (!size) { 4198 CH_ERR(adap, "FW image has no data\n"); 4199 return -EINVAL; 4200 } 4201 if (size & 511) { 4202 CH_ERR(adap, 4203 "FW image size not multiple of 512 bytes\n"); 4204 return -EINVAL; 4205 } 4206 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { 4207 CH_ERR(adap, 4208 "FW image size differs from size in FW header\n"); 4209 return -EINVAL; 4210 } 4211 if (size > fw_size) { 4212 CH_ERR(adap, "FW image too large, max is %u bytes\n", 4213 fw_size); 4214 return -EFBIG; 4215 } 4216 if (!t4_fw_matches_chip(adap, hdr)) 4217 return -EINVAL; 4218 4219 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 4220 csum += be32_to_cpu(p[i]); 4221 4222 if (csum != 0xffffffff) { 4223 CH_ERR(adap, 4224 "corrupted firmware image, checksum %#x\n", csum); 4225 return -EINVAL; 4226 } 4227 4228 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 4229 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 4230 if (ret) 4231 goto out; 4232 4233 /* 4234 * We write the correct version at the end so the driver can see a bad 4235 * version if the FW write fails. Start by writing a copy of the 4236 * first page with a bad version. 4237 */ 4238 memcpy(first_page, fw_data, SF_PAGE_SIZE); 4239 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 4240 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 4241 if (ret) 4242 goto out; 4243 4244 addr = fw_start; 4245 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 4246 addr += SF_PAGE_SIZE; 4247 fw_data += SF_PAGE_SIZE; 4248 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 4249 if (ret) 4250 goto out; 4251 } 4252 4253 ret = t4_write_flash(adap, 4254 fw_start + offsetof(struct fw_hdr, fw_ver), 4255 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 4256 out: 4257 if (ret) 4258 CH_ERR(adap, "firmware download failed, error %d\n", 4259 ret); 4260 else { 4261 if (bootstrap) 4262 ret = t4_get_bs_version(adap, &adap->params.bs_vers); 4263 else 4264 ret = t4_get_fw_version(adap, &adap->params.fw_vers); 4265 } 4266 return ret; 4267 } 4268 4269 /** 4270 * t4_phy_fw_ver - return current PHY firmware version 4271 * @adap: the adapter 4272 * @phy_fw_ver: return value buffer for PHY firmware version 4273 * 4274 * Returns the current version of external PHY firmware on the 4275 * adapter. 4276 */ 4277 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) 4278 { 4279 u32 param, val; 4280 int ret; 4281 4282 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4283 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) | 4284 V_FW_PARAMS_PARAM_Y(adap->params.portvec) | 4285 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_VERSION)); 4286 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, 4287 ¶m, &val); 4288 if (ret < 0) 4289 return ret; 4290 *phy_fw_ver = val; 4291 return 0; 4292 } 4293 4294 /** 4295 * t4_load_phy_fw - download port PHY firmware 4296 * @adap: the adapter 4297 * @win: the PCI-E Memory Window index to use for t4_memory_rw() 4298 * @lock: the lock to use to guard the memory copy 4299 * @phy_fw_version: function to check PHY firmware versions 4300 * @phy_fw_data: the PHY firmware image to write 4301 * @phy_fw_size: image size 4302 * 4303 * Transfer the specified PHY firmware to the adapter. If a non-NULL 4304 * @phy_fw_version is supplied, then it will be used to determine if 4305 * it's necessary to perform the transfer by comparing the version 4306 * of any existing adapter PHY firmware with that of the passed in 4307 * PHY firmware image. If @lock is non-NULL then it will be used 4308 * around the call to t4_memory_rw() which transfers the PHY firmware 4309 * to the adapter. 4310 * 4311 * A negative error number will be returned if an error occurs. If 4312 * version number support is available and there's no need to upgrade 4313 * the firmware, 0 will be returned. If firmware is successfully 4314 * transferred to the adapter, 1 will be retured. 4315 * 4316 * NOTE: some adapters only have local RAM to store the PHY firmware. As 4317 * a result, a RESET of the adapter would cause that RAM to lose its 4318 * contents. Thus, loading PHY firmware on such adapters must happen after any 4319 * FW_RESET_CMDs ... 4320 */ 4321 int t4_load_phy_fw(struct adapter *adap, 4322 int win, t4_os_lock_t *lock, 4323 int (*phy_fw_version)(const u8 *, size_t), 4324 const u8 *phy_fw_data, size_t phy_fw_size) 4325 { 4326 unsigned long mtype = 0, maddr = 0; 4327 u32 param, val; 4328 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0; 4329 int ret; 4330 4331 /* 4332 * If we have version number support, then check to see if the adapter 4333 * already has up-to-date PHY firmware loaded. 4334 */ 4335 if (phy_fw_version) { 4336 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); 4337 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); 4338 if (ret < 0) 4339 return ret;; 4340 4341 if (cur_phy_fw_ver >= new_phy_fw_vers) { 4342 CH_WARN(adap, "PHY Firmware already up-to-date, " 4343 "version %#x\n", cur_phy_fw_ver); 4344 return 0; 4345 } 4346 } 4347 4348 /* 4349 * Ask the firmware where it wants us to copy the PHY firmware image. 4350 * The size of the file requires a special version of the READ coommand 4351 * which will pass the file size via the values field in PARAMS_CMD and 4352 * retreive the return value from firmware and place it in the same 4353 * buffer values 4354 */ 4355 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4356 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) | 4357 V_FW_PARAMS_PARAM_Y(adap->params.portvec) | 4358 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); 4359 val = phy_fw_size; 4360 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1, 4361 ¶m, &val, 1, true); 4362 if (ret < 0) 4363 return ret; 4364 mtype = val >> 8; 4365 maddr = (val & 0xff) << 16; 4366 4367 /* 4368 * Copy the supplied PHY Firmware image to the adapter memory location 4369 * allocated by the adapter firmware. 4370 */ 4371 if (lock) 4372 t4_os_lock(lock); 4373 ret = t4_memory_rw(adap, win, mtype, maddr, 4374 phy_fw_size, (__be32*)phy_fw_data, 4375 T4_MEMORY_WRITE); 4376 if (lock) 4377 t4_os_unlock(lock); 4378 if (ret) 4379 return ret; 4380 4381 /* 4382 * Tell the firmware that the PHY firmware image has been written to 4383 * RAM and it can now start copying it over to the PHYs. The chip 4384 * firmware will RESET the affected PHYs as part of this operation 4385 * leaving them running the new PHY firmware image. 4386 */ 4387 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4388 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PHYFW) | 4389 V_FW_PARAMS_PARAM_Y(adap->params.portvec) | 4390 V_FW_PARAMS_PARAM_Z(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); 4391 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, 4392 ¶m, &val, 30000); 4393 4394 /* 4395 * If we have version number support, then check to see that the new 4396 * firmware got loaded properly. 4397 */ 4398 if (phy_fw_version) { 4399 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); 4400 if (ret < 0) 4401 return ret; 4402 4403 if (cur_phy_fw_ver != new_phy_fw_vers) { 4404 CH_WARN(adap, "PHY Firmware did not update: " 4405 "version on adapter %#x, " 4406 "version flashed %#x\n", 4407 cur_phy_fw_ver, new_phy_fw_vers); 4408 return -ENXIO; 4409 } 4410 } 4411 4412 return 1; 4413 } 4414 4415 /** 4416 * t4_fwcache - firmware cache operation 4417 * @adap: the adapter 4418 * @op : the operation (flush or flush and invalidate) 4419 */ 4420 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 4421 { 4422 struct fw_params_cmd c; 4423 4424 memset(&c, 0, sizeof(c)); 4425 c.op_to_vfn = 4426 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 4427 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4428 V_FW_PARAMS_CMD_PFN(adap->pf) | 4429 V_FW_PARAMS_CMD_VFN(0)); 4430 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4431 c.param[0].mnem = 4432 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4433 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); 4434 c.param[0].val = (__force __be32)op; 4435 4436 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 4437 } 4438 4439 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 4440 unsigned int *pif_req_wrptr, 4441 unsigned int *pif_rsp_wrptr) 4442 { 4443 int i, j; 4444 u32 cfg, val, req, rsp; 4445 4446 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 4447 if (cfg & F_LADBGEN) 4448 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 4449 4450 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 4451 req = G_POLADBGWRPTR(val); 4452 rsp = G_PILADBGWRPTR(val); 4453 if (pif_req_wrptr) 4454 *pif_req_wrptr = req; 4455 if (pif_rsp_wrptr) 4456 *pif_rsp_wrptr = rsp; 4457 4458 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 4459 for (j = 0; j < 6; j++) { 4460 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 4461 V_PILADBGRDPTR(rsp)); 4462 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 4463 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 4464 req++; 4465 rsp++; 4466 } 4467 req = (req + 2) & M_POLADBGRDPTR; 4468 rsp = (rsp + 2) & M_PILADBGRDPTR; 4469 } 4470 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 4471 } 4472 4473 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 4474 { 4475 u32 cfg; 4476 int i, j, idx; 4477 4478 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 4479 if (cfg & F_LADBGEN) 4480 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 4481 4482 for (i = 0; i < CIM_MALA_SIZE; i++) { 4483 for (j = 0; j < 5; j++) { 4484 idx = 8 * i + j; 4485 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 4486 V_PILADBGRDPTR(idx)); 4487 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 4488 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 4489 } 4490 } 4491 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 4492 } 4493 4494 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 4495 { 4496 unsigned int i, j; 4497 4498 for (i = 0; i < 8; i++) { 4499 u32 *p = la_buf + i; 4500 4501 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 4502 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 4503 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 4504 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 4505 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 4506 } 4507 } 4508 4509 typedef void (*int_handler_t)(struct adapter *adap); 4510 4511 struct intr_info { 4512 unsigned int mask; /* bits to check in interrupt status */ 4513 const char *msg; /* message to print or NULL */ 4514 short stat_idx; /* stat counter to increment or -1 */ 4515 unsigned short fatal; /* whether the condition reported is fatal */ 4516 int_handler_t int_handler; /* platform-specific int handler */ 4517 }; 4518 4519 /** 4520 * t4_handle_intr_status - table driven interrupt handler 4521 * @adapter: the adapter that generated the interrupt 4522 * @reg: the interrupt status register to process 4523 * @acts: table of interrupt actions 4524 * 4525 * A table driven interrupt handler that applies a set of masks to an 4526 * interrupt status word and performs the corresponding actions if the 4527 * interrupts described by the mask have occurred. The actions include 4528 * optionally emitting a warning or alert message. The table is terminated 4529 * by an entry specifying mask 0. Returns the number of fatal interrupt 4530 * conditions. 4531 */ 4532 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 4533 const struct intr_info *acts) 4534 { 4535 int fatal = 0; 4536 unsigned int mask = 0; 4537 unsigned int status = t4_read_reg(adapter, reg); 4538 4539 for ( ; acts->mask; ++acts) { 4540 if (!(status & acts->mask)) 4541 continue; 4542 if (acts->fatal) { 4543 fatal++; 4544 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg, 4545 status & acts->mask); 4546 } else if (acts->msg) 4547 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg, 4548 status & acts->mask); 4549 if (acts->int_handler) 4550 acts->int_handler(adapter); 4551 mask |= acts->mask; 4552 } 4553 status &= mask; 4554 if (status) /* clear processed interrupts */ 4555 t4_write_reg(adapter, reg, status); 4556 return fatal; 4557 } 4558 4559 /* 4560 * Interrupt handler for the PCIE module. 4561 */ 4562 static void pcie_intr_handler(struct adapter *adapter) 4563 { 4564 static const struct intr_info sysbus_intr_info[] = { 4565 { F_RNPP, "RXNP array parity error", -1, 1 }, 4566 { F_RPCP, "RXPC array parity error", -1, 1 }, 4567 { F_RCIP, "RXCIF array parity error", -1, 1 }, 4568 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 4569 { F_RFTP, "RXFT array parity error", -1, 1 }, 4570 { 0 } 4571 }; 4572 static const struct intr_info pcie_port_intr_info[] = { 4573 { F_TPCP, "TXPC array parity error", -1, 1 }, 4574 { F_TNPP, "TXNP array parity error", -1, 1 }, 4575 { F_TFTP, "TXFT array parity error", -1, 1 }, 4576 { F_TCAP, "TXCA array parity error", -1, 1 }, 4577 { F_TCIP, "TXCIF array parity error", -1, 1 }, 4578 { F_RCAP, "RXCA array parity error", -1, 1 }, 4579 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 4580 { F_RDPE, "Rx data parity error", -1, 1 }, 4581 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 4582 { 0 } 4583 }; 4584 static const struct intr_info pcie_intr_info[] = { 4585 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 4586 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 4587 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 4588 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 4589 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 4590 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 4591 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 4592 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 4593 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 4594 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 4595 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 4596 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 4597 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 4598 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 4599 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 4600 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 4601 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 4602 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 4603 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 4604 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 4605 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 4606 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 4607 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 4608 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 4609 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 4610 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 4611 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 4612 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 4613 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 4614 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 4615 0 }, 4616 { 0 } 4617 }; 4618 4619 static struct intr_info t5_pcie_intr_info[] = { 4620 { F_MSTGRPPERR, "Master Response Read Queue parity error", 4621 -1, 1 }, 4622 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 4623 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 4624 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 4625 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 4626 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 4627 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 4628 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 4629 -1, 1 }, 4630 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 4631 -1, 1 }, 4632 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 4633 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 4634 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 4635 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 4636 { F_DREQWRPERR, "PCI DMA channel write request parity error", 4637 -1, 1 }, 4638 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 4639 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 4640 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 4641 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 4642 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 4643 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 4644 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 4645 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 4646 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 4647 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 4648 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 4649 -1, 1 }, 4650 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 4651 -1, 1 }, 4652 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 4653 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 4654 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 4655 { F_READRSPERR, "Outbound read error", -1, 4656 0 }, 4657 { 0 } 4658 }; 4659 4660 int fat; 4661 4662 if (is_t4(adapter->params.chip)) 4663 fat = t4_handle_intr_status(adapter, 4664 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 4665 sysbus_intr_info) + 4666 t4_handle_intr_status(adapter, 4667 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 4668 pcie_port_intr_info) + 4669 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 4670 pcie_intr_info); 4671 else 4672 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 4673 t5_pcie_intr_info); 4674 if (fat) 4675 t4_fatal_err(adapter); 4676 } 4677 4678 /* 4679 * TP interrupt handler. 4680 */ 4681 static void tp_intr_handler(struct adapter *adapter) 4682 { 4683 static const struct intr_info tp_intr_info[] = { 4684 { 0x3fffffff, "TP parity error", -1, 1 }, 4685 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 4686 { 0 } 4687 }; 4688 4689 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 4690 t4_fatal_err(adapter); 4691 } 4692 4693 /* 4694 * SGE interrupt handler. 4695 */ 4696 static void sge_intr_handler(struct adapter *adapter) 4697 { 4698 u32 v = 0, perr; 4699 u32 err; 4700 4701 static const struct intr_info sge_intr_info[] = { 4702 { F_ERR_CPL_EXCEED_IQE_SIZE, 4703 "SGE received CPL exceeding IQE size", -1, 1 }, 4704 { F_ERR_INVALID_CIDX_INC, 4705 "SGE GTS CIDX increment too large", -1, 0 }, 4706 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 4707 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 4708 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 4709 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 4710 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 4711 0 }, 4712 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 4713 0 }, 4714 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 4715 0 }, 4716 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 4717 0 }, 4718 { F_ERR_ING_CTXT_PRIO, 4719 "SGE too many priority ingress contexts", -1, 0 }, 4720 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 4721 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 4722 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | 4723 F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3, 4724 "SGE PCIe error for a DBP thread", -1, 0 }, 4725 { 0 } 4726 }; 4727 4728 static struct intr_info t4t5_sge_intr_info[] = { 4729 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 4730 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 4731 { F_ERR_EGR_CTXT_PRIO, 4732 "SGE too many priority egress contexts", -1, 0 }, 4733 { 0 } 4734 }; 4735 4736 /* 4737 * For now, treat below interrupts as fatal so that we disable SGE and 4738 * get better debug */ 4739 static struct intr_info t6_sge_intr_info[] = { 4740 { F_FATAL_WRE_LEN, 4741 "SGE Actual WRE packet is less than advertized length", 4742 -1, 1 }, 4743 { 0 } 4744 }; 4745 4746 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE1); 4747 if (perr) { 4748 v |= perr; 4749 CH_ALERT(adapter, "SGE Cause1 Parity Error %#x\n", perr); 4750 } 4751 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE2); 4752 if (perr) { 4753 v |= perr; 4754 CH_ALERT(adapter, "SGE Cause2 Parity Error %#x\n", perr); 4755 } 4756 if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) { 4757 perr = t4_read_reg(adapter, A_SGE_INT_CAUSE5); 4758 if (perr) { 4759 v |= perr; 4760 CH_ALERT(adapter, "SGE Cause5 Parity Error %#x\n", perr); 4761 } 4762 } 4763 4764 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 4765 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 4766 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 4767 t4t5_sge_intr_info); 4768 else 4769 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 4770 t6_sge_intr_info); 4771 4772 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 4773 if (err & F_ERROR_QID_VALID) { 4774 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 4775 if (err & F_UNCAPTURED_ERROR) 4776 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 4777 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 4778 F_UNCAPTURED_ERROR); 4779 } 4780 4781 if (v != 0) 4782 t4_fatal_err(adapter); 4783 } 4784 4785 #define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 4786 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 4787 #define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 4788 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 4789 4790 /* 4791 * CIM interrupt handler. 4792 */ 4793 static void cim_intr_handler(struct adapter *adapter) 4794 { 4795 static const struct intr_info cim_intr_info[] = { 4796 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 4797 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 4798 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 4799 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 4800 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 4801 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 4802 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 4803 { F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 }, 4804 { 0 } 4805 }; 4806 static const struct intr_info cim_upintr_info[] = { 4807 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 4808 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 4809 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 4810 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 4811 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 4812 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 4813 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 4814 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 4815 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 4816 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 4817 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 4818 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 4819 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 4820 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 4821 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 4822 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 4823 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 4824 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 4825 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 4826 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 4827 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 4828 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 4829 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 4830 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 4831 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 4832 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 4833 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 4834 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 4835 { 0 } 4836 }; 4837 u32 val, fw_err; 4838 int fat; 4839 4840 fw_err = t4_read_reg(adapter, A_PCIE_FW); 4841 if (fw_err & F_PCIE_FW_ERR) 4842 t4_report_fw_error(adapter); 4843 4844 /* When the Firmware detects an internal error which normally wouldn't 4845 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order 4846 * to make sure the Host sees the Firmware Crash. So if we have a 4847 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0 4848 * interrupt. 4849 */ 4850 val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE); 4851 if (val & F_TIMER0INT) 4852 if (!(fw_err & F_PCIE_FW_ERR) || 4853 (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH)) 4854 t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE, 4855 F_TIMER0INT); 4856 4857 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 4858 cim_intr_info) + 4859 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 4860 cim_upintr_info); 4861 if (fat) 4862 t4_fatal_err(adapter); 4863 } 4864 4865 /* 4866 * ULP RX interrupt handler. 4867 */ 4868 static void ulprx_intr_handler(struct adapter *adapter) 4869 { 4870 static const struct intr_info ulprx_intr_info[] = { 4871 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 4872 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 4873 { 0x7fffff, "ULPRX parity error", -1, 1 }, 4874 { 0 } 4875 }; 4876 4877 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 4878 t4_fatal_err(adapter); 4879 } 4880 4881 /* 4882 * ULP TX interrupt handler. 4883 */ 4884 static void ulptx_intr_handler(struct adapter *adapter) 4885 { 4886 static const struct intr_info ulptx_intr_info[] = { 4887 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 4888 0 }, 4889 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 4890 0 }, 4891 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 4892 0 }, 4893 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 4894 0 }, 4895 { 0xfffffff, "ULPTX parity error", -1, 1 }, 4896 { 0 } 4897 }; 4898 4899 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 4900 t4_fatal_err(adapter); 4901 } 4902 4903 /* 4904 * PM TX interrupt handler. 4905 */ 4906 static void pmtx_intr_handler(struct adapter *adapter) 4907 { 4908 static const struct intr_info pmtx_intr_info[] = { 4909 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 4910 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 4911 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 4912 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 4913 { 0xffffff0, "PMTX framing error", -1, 1 }, 4914 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 4915 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 4916 1 }, 4917 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 4918 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 4919 { 0 } 4920 }; 4921 4922 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 4923 t4_fatal_err(adapter); 4924 } 4925 4926 /* 4927 * PM RX interrupt handler. 4928 */ 4929 static void pmrx_intr_handler(struct adapter *adapter) 4930 { 4931 static const struct intr_info pmrx_intr_info[] = { 4932 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 4933 { 0x3ffff0, "PMRX framing error", -1, 1 }, 4934 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 4935 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 4936 1 }, 4937 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 4938 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 4939 { 0 } 4940 }; 4941 4942 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 4943 t4_fatal_err(adapter); 4944 } 4945 4946 /* 4947 * CPL switch interrupt handler. 4948 */ 4949 static void cplsw_intr_handler(struct adapter *adapter) 4950 { 4951 static const struct intr_info cplsw_intr_info[] = { 4952 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 4953 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 4954 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 4955 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 4956 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 4957 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 4958 { 0 } 4959 }; 4960 4961 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 4962 t4_fatal_err(adapter); 4963 } 4964 4965 /* 4966 * LE interrupt handler. 4967 */ 4968 static void le_intr_handler(struct adapter *adap) 4969 { 4970 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); 4971 static const struct intr_info le_intr_info[] = { 4972 { F_LIPMISS, "LE LIP miss", -1, 0 }, 4973 { F_LIP0, "LE 0 LIP error", -1, 0 }, 4974 { F_PARITYERR, "LE parity error", -1, 1 }, 4975 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 4976 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 4977 { 0 } 4978 }; 4979 4980 static struct intr_info t6_le_intr_info[] = { 4981 /* log an error for HASHTBLMEMCRCERR and clear the bit */ 4982 { F_T6_HASHTBLMEMCRCERR, "LE hash table mem crc error", -1, 0 }, 4983 { F_T6_LIPMISS, "LE LIP miss", -1, 0 }, 4984 { F_T6_LIP0, "LE 0 LIP error", -1, 0 }, 4985 { F_TCAMINTPERR, "LE parity error", -1, 1 }, 4986 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 }, 4987 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 }, 4988 { 0 } 4989 }; 4990 4991 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, 4992 (chip_ver <= CHELSIO_T5) ? 4993 le_intr_info : t6_le_intr_info)) 4994 t4_fatal_err(adap); 4995 } 4996 4997 /* 4998 * MPS interrupt handler. 4999 */ 5000 static void mps_intr_handler(struct adapter *adapter) 5001 { 5002 static const struct intr_info mps_rx_intr_info[] = { 5003 { 0xffffff, "MPS Rx parity error", -1, 1 }, 5004 { 0 } 5005 }; 5006 static const struct intr_info mps_tx_intr_info[] = { 5007 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 5008 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 5009 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 5010 -1, 1 }, 5011 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 5012 -1, 1 }, 5013 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 5014 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 5015 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 5016 { 0 } 5017 }; 5018 static const struct intr_info t6_mps_tx_intr_info[] = { 5019 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 5020 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 5021 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 5022 -1, 1 }, 5023 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 5024 -1, 1 }, 5025 /* MPS Tx Bubble is normal for T6 */ 5026 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 5027 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 5028 { 0 } 5029 }; 5030 static const struct intr_info mps_trc_intr_info[] = { 5031 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 5032 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 5033 1 }, 5034 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 5035 { 0 } 5036 }; 5037 static const struct intr_info mps_stat_sram_intr_info[] = { 5038 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 5039 { 0 } 5040 }; 5041 static const struct intr_info mps_stat_tx_intr_info[] = { 5042 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 5043 { 0 } 5044 }; 5045 static const struct intr_info mps_stat_rx_intr_info[] = { 5046 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 5047 { 0 } 5048 }; 5049 static const struct intr_info mps_cls_intr_info[] = { 5050 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 5051 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 5052 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 5053 { 0 } 5054 }; 5055 5056 int fat; 5057 5058 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 5059 mps_rx_intr_info) + 5060 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 5061 is_t6(adapter->params.chip) 5062 ? t6_mps_tx_intr_info 5063 : mps_tx_intr_info) + 5064 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 5065 mps_trc_intr_info) + 5066 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 5067 mps_stat_sram_intr_info) + 5068 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 5069 mps_stat_tx_intr_info) + 5070 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 5071 mps_stat_rx_intr_info) + 5072 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 5073 mps_cls_intr_info); 5074 5075 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 5076 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 5077 if (fat) 5078 t4_fatal_err(adapter); 5079 } 5080 5081 #define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \ 5082 F_ECC_UE_INT_CAUSE) 5083 5084 /* 5085 * EDC/MC interrupt handler. 5086 */ 5087 static void mem_intr_handler(struct adapter *adapter, int idx) 5088 { 5089 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; 5090 5091 unsigned int addr, cnt_addr, v; 5092 5093 if (idx <= MEM_EDC1) { 5094 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 5095 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 5096 } else if (idx == MEM_MC) { 5097 if (is_t4(adapter->params.chip)) { 5098 addr = A_MC_INT_CAUSE; 5099 cnt_addr = A_MC_ECC_STATUS; 5100 } else { 5101 addr = A_MC_P_INT_CAUSE; 5102 cnt_addr = A_MC_P_ECC_STATUS; 5103 } 5104 } else { 5105 addr = MC_REG(A_MC_P_INT_CAUSE, 1); 5106 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1); 5107 } 5108 5109 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 5110 if (v & F_PERR_INT_CAUSE) 5111 CH_ALERT(adapter, "%s FIFO parity error\n", 5112 name[idx]); 5113 if (v & F_ECC_CE_INT_CAUSE) { 5114 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 5115 5116 if (idx <= MEM_EDC1) 5117 t4_edc_err_read(adapter, idx); 5118 5119 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 5120 CH_WARN_RATELIMIT(adapter, 5121 "%u %s correctable ECC data error%s\n", 5122 cnt, name[idx], cnt > 1 ? "s" : ""); 5123 } 5124 if (v & F_ECC_UE_INT_CAUSE) 5125 CH_ALERT(adapter, 5126 "%s uncorrectable ECC data error\n", name[idx]); 5127 5128 t4_write_reg(adapter, addr, v); 5129 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 5130 t4_fatal_err(adapter); 5131 } 5132 5133 /* 5134 * MA interrupt handler. 5135 */ 5136 static void ma_intr_handler(struct adapter *adapter) 5137 { 5138 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 5139 5140 if (status & F_MEM_PERR_INT_CAUSE) { 5141 CH_ALERT(adapter, 5142 "MA parity error, parity status %#x\n", 5143 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 5144 if (is_t5(adapter->params.chip)) 5145 CH_ALERT(adapter, 5146 "MA parity error, parity status %#x\n", 5147 t4_read_reg(adapter, 5148 A_MA_PARITY_ERROR_STATUS2)); 5149 } 5150 if (status & F_MEM_WRAP_INT_CAUSE) { 5151 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 5152 CH_ALERT(adapter, "MA address wrap-around error by " 5153 "client %u to address %#x\n", 5154 G_MEM_WRAP_CLIENT_NUM(v), 5155 G_MEM_WRAP_ADDRESS(v) << 4); 5156 } 5157 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 5158 t4_fatal_err(adapter); 5159 } 5160 5161 /* 5162 * SMB interrupt handler. 5163 */ 5164 static void smb_intr_handler(struct adapter *adap) 5165 { 5166 static const struct intr_info smb_intr_info[] = { 5167 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 5168 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 5169 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 5170 { 0 } 5171 }; 5172 5173 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 5174 t4_fatal_err(adap); 5175 } 5176 5177 /* 5178 * NC-SI interrupt handler. 5179 */ 5180 static void ncsi_intr_handler(struct adapter *adap) 5181 { 5182 static const struct intr_info ncsi_intr_info[] = { 5183 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 5184 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 5185 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 5186 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 5187 { 0 } 5188 }; 5189 5190 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 5191 t4_fatal_err(adap); 5192 } 5193 5194 /* 5195 * XGMAC interrupt handler. 5196 */ 5197 static void xgmac_intr_handler(struct adapter *adap, int port) 5198 { 5199 u32 v, int_cause_reg; 5200 5201 if (is_t4(adap->params.chip)) 5202 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 5203 else 5204 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 5205 5206 v = t4_read_reg(adap, int_cause_reg); 5207 5208 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 5209 if (!v) 5210 return; 5211 5212 if (v & F_TXFIFO_PRTY_ERR) 5213 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", 5214 port); 5215 if (v & F_RXFIFO_PRTY_ERR) 5216 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", 5217 port); 5218 t4_write_reg(adap, int_cause_reg, v); 5219 t4_fatal_err(adap); 5220 } 5221 5222 /* 5223 * PL Parity Error interrupt handler. 5224 */ 5225 static void pl_perr_intr_handler(struct adapter *adap) 5226 { 5227 static const struct intr_info pl_perr_info[] = { 5228 { F_UART, "UART Parity Error", -1, }, 5229 { F_ULP_TX, "ULP TX Parity Error", -1 }, 5230 { F_SGE, "SGE Parity Error", -1 }, 5231 { F_HMA, "HMA Parity Error", -1 }, 5232 { F_CPL_SWITCH, "CPL Switch Parity Error", -1 }, 5233 { F_ULP_RX, "ULP RX Parity Error", -1 }, 5234 { F_PM_RX, "PM RX Parity Error", -1 }, 5235 { F_PM_TX, "PM TX Parity Error", -1 }, 5236 { F_MA, "MA Parity Error", -1 }, 5237 { F_TP, "TP Parity Error", -1 }, 5238 { F_LE, "LE Parity Error", -1 }, 5239 { F_EDC1, "EDC1 Parity Error", -1 }, 5240 { F_EDC0, "EDC0 Parity Error", -1 }, 5241 { F_MC, "MC Parity Error", -1 }, 5242 { F_PCIE, "PCIE Parity Error", -1 }, 5243 { F_PMU, "PMU Parity Error", -1 }, 5244 { F_XGMAC_KR1, "XGMAC_KR1 Parity Error", -1 }, 5245 { F_XGMAC_KR0, "XGMAC_KR0 Parity Error", -1 }, 5246 { F_XGMAC1, "XGMAC1 Parity Error", -1 }, 5247 { F_XGMAC0, "XGMAC0 Parity Error", -1 }, 5248 { F_SMB, "SMB Parity Error", -1 }, 5249 { F_SF, "SF Parity Error", -1 }, 5250 { F_PL, "PL Parity Error", -1 }, 5251 { F_NCSI, "NCSI Parity Error", -1 }, 5252 { F_MPS, "MPS Parity Error", -1 }, 5253 { F_MI, "MI Parity Error", -1 }, 5254 { F_DBG, "DBG Parity Error", -1 }, 5255 { F_I2CM, "I2CM Parity Error", -1 }, 5256 { F_CIM, "CIM Parity Error", -1 }, 5257 }; 5258 5259 t4_handle_intr_status(adap, A_PL_PERR_CAUSE, pl_perr_info); 5260 /* pl_intr_handler() will do the t4_fatal_err(adap) */ 5261 } 5262 5263 /* 5264 * PL interrupt handler. 5265 */ 5266 static void pl_intr_handler(struct adapter *adap) 5267 { 5268 static const struct intr_info pl_intr_info[] = { 5269 { F_FATALPERR, "Fatal parity error", -1, 1, 5270 pl_perr_intr_handler }, 5271 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 5272 { 0 } 5273 }; 5274 5275 static struct intr_info t5_pl_intr_info[] = { 5276 { F_FATALPERR, "Fatal parity error", -1, 1, 5277 pl_perr_intr_handler }, 5278 { 0 } 5279 }; 5280 5281 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 5282 is_t4(adap->params.chip) ? 5283 pl_intr_info : t5_pl_intr_info)) 5284 t4_fatal_err(adap); 5285 } 5286 5287 #define PF_INTR_MASK (F_PFSW | F_PFCIM) 5288 5289 /** 5290 * t4_slow_intr_handler - control path interrupt handler 5291 * @adapter: the adapter 5292 * 5293 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 5294 * The designation 'slow' is because it involves register reads, while 5295 * data interrupts typically don't involve any MMIOs. 5296 */ 5297 int t4_slow_intr_handler(struct adapter *adapter) 5298 { 5299 /* There are rare cases where a PL_INT_CAUSE bit may end up getting 5300 * set when the corresponding PL_INT_ENABLE bit isn't set. It's 5301 * easiest just to mask that case here. 5302 */ 5303 u32 raw_cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 5304 u32 enable = t4_read_reg(adapter, A_PL_INT_ENABLE); 5305 u32 cause = raw_cause & enable; 5306 5307 if (!(cause & GLBL_INTR_MASK)) 5308 return 0; 5309 5310 /* Disable all the interrupt(bits) in PL_INT_ENABLE */ 5311 t4_write_reg(adapter, A_PL_INT_ENABLE, 0); 5312 (void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */ 5313 5314 if (cause & F_CIM) 5315 cim_intr_handler(adapter); 5316 if (cause & F_MPS) 5317 mps_intr_handler(adapter); 5318 if (cause & F_NCSI) 5319 ncsi_intr_handler(adapter); 5320 if (cause & F_PL) 5321 pl_intr_handler(adapter); 5322 if (cause & F_SMB) 5323 smb_intr_handler(adapter); 5324 if (cause & F_MAC0) 5325 xgmac_intr_handler(adapter, 0); 5326 if (cause & F_MAC1) 5327 xgmac_intr_handler(adapter, 1); 5328 if (cause & F_MAC2) 5329 xgmac_intr_handler(adapter, 2); 5330 if (cause & F_MAC3) 5331 xgmac_intr_handler(adapter, 3); 5332 if (cause & F_PCIE) 5333 pcie_intr_handler(adapter); 5334 if (cause & F_MC0) 5335 mem_intr_handler(adapter, MEM_MC); 5336 if (is_t5(adapter->params.chip) && (cause & F_MC1)) 5337 mem_intr_handler(adapter, MEM_MC1); 5338 if (cause & F_EDC0) 5339 mem_intr_handler(adapter, MEM_EDC0); 5340 if (cause & F_EDC1) 5341 mem_intr_handler(adapter, MEM_EDC1); 5342 if (cause & F_LE) 5343 le_intr_handler(adapter); 5344 if (cause & F_TP) 5345 tp_intr_handler(adapter); 5346 if (cause & F_MA) 5347 ma_intr_handler(adapter); 5348 if (cause & F_PM_TX) 5349 pmtx_intr_handler(adapter); 5350 if (cause & F_PM_RX) 5351 pmrx_intr_handler(adapter); 5352 if (cause & F_ULP_RX) 5353 ulprx_intr_handler(adapter); 5354 if (cause & F_CPL_SWITCH) 5355 cplsw_intr_handler(adapter); 5356 if (cause & F_SGE) 5357 sge_intr_handler(adapter); 5358 if (cause & F_ULP_TX) 5359 ulptx_intr_handler(adapter); 5360 5361 /* Clear the interrupts just processed for which we are the master. */ 5362 t4_write_reg(adapter, A_PL_INT_CAUSE, raw_cause & GLBL_INTR_MASK); 5363 5364 /* re-enable the interrupts (bits that were disabled 5365 * earlier in PL_INT_ENABLE) 5366 */ 5367 t4_write_reg(adapter, A_PL_INT_ENABLE, enable); 5368 (void)t4_read_reg(adapter, A_PL_INT_ENABLE); /* flush */ 5369 return 1; 5370 } 5371 5372 /** 5373 * t4_intr_enable - enable interrupts 5374 * @adapter: the adapter whose interrupts should be enabled 5375 * 5376 * Enable PF-specific interrupts for the calling function and the top-level 5377 * interrupt concentrator for global interrupts. Interrupts are already 5378 * enabled at each module, here we just enable the roots of the interrupt 5379 * hierarchies. 5380 * 5381 * Note: this function should be called only when the driver manages 5382 * non PF-specific interrupts from the various HW modules. Only one PCI 5383 * function at a time should be doing this. 5384 */ 5385 void t4_intr_enable(struct adapter *adapter) 5386 { 5387 u32 val = 0; 5388 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 5389 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 5390 ? G_SOURCEPF(whoami) 5391 : G_T6_SOURCEPF(whoami)); 5392 5393 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) 5394 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; 5395 else 5396 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; 5397 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 5398 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 5399 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | 5400 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 5401 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 5402 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 5403 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); 5404 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 5405 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 5406 } 5407 5408 /** 5409 * t4_intr_disable - disable interrupts 5410 * @adapter: the adapter whose interrupts should be disabled 5411 * 5412 * Disable interrupts. We only disable the top-level interrupt 5413 * concentrators. The caller must be a PCI function managing global 5414 * interrupts. 5415 */ 5416 void t4_intr_disable(struct adapter *adapter) 5417 { 5418 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 5419 u32 pf = (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 5420 ? G_SOURCEPF(whoami) 5421 : G_T6_SOURCEPF(whoami)); 5422 5423 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 5424 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 5425 } 5426 5427 unsigned int t4_chip_rss_size(struct adapter *adap) 5428 { 5429 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 5430 return RSS_NENTRIES; 5431 else 5432 return T6_RSS_NENTRIES; 5433 } 5434 5435 /** 5436 * t4_config_rss_range - configure a portion of the RSS mapping table 5437 * @adapter: the adapter 5438 * @mbox: mbox to use for the FW command 5439 * @viid: virtual interface whose RSS subtable is to be written 5440 * @start: start entry in the table to write 5441 * @n: how many table entries to write 5442 * @rspq: values for the "response queue" (Ingress Queue) lookup table 5443 * @nrspq: number of values in @rspq 5444 * 5445 * Programs the selected part of the VI's RSS mapping table with the 5446 * provided values. If @nrspq < @n the supplied values are used repeatedly 5447 * until the full table range is populated. 5448 * 5449 * The caller must ensure the values in @rspq are in the range allowed for 5450 * @viid. 5451 */ 5452 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 5453 int start, int n, const u16 *rspq, unsigned int nrspq) 5454 { 5455 int ret; 5456 const u16 *rsp = rspq; 5457 const u16 *rsp_end = rspq + nrspq; 5458 struct fw_rss_ind_tbl_cmd cmd; 5459 5460 memset(&cmd, 0, sizeof(cmd)); 5461 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 5462 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 5463 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 5464 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 5465 5466 /* Each firmware RSS command can accommodate up to 32 RSS Ingress 5467 * Queue Identifiers. These Ingress Queue IDs are packed three to 5468 * a 32-bit word as 10-bit values with the upper remaining 2 bits 5469 * reserved. 5470 */ 5471 while (n > 0) { 5472 int nq = min(n, 32); 5473 int nq_packed = 0; 5474 __be32 *qp = &cmd.iq0_to_iq2; 5475 5476 /* Set up the firmware RSS command header to send the next 5477 * "nq" Ingress Queue IDs to the firmware. 5478 */ 5479 cmd.niqid = cpu_to_be16(nq); 5480 cmd.startidx = cpu_to_be16(start); 5481 5482 /* "nq" more done for the start of the next loop. 5483 */ 5484 start += nq; 5485 n -= nq; 5486 5487 /* While there are still Ingress Queue IDs to stuff into the 5488 * current firmware RSS command, retrieve them from the 5489 * Ingress Queue ID array and insert them into the command. 5490 */ 5491 while (nq > 0) { 5492 /* Grab up to the next 3 Ingress Queue IDs (wrapping 5493 * around the Ingress Queue ID array if necessary) and 5494 * insert them into the firmware RSS command at the 5495 * current 3-tuple position within the commad. 5496 */ 5497 u16 qbuf[3]; 5498 u16 *qbp = qbuf; 5499 int nqbuf = min(3, nq); 5500 5501 nq -= nqbuf; 5502 qbuf[0] = qbuf[1] = qbuf[2] = 0; 5503 while (nqbuf && nq_packed < 32) { 5504 nqbuf--; 5505 nq_packed++; 5506 *qbp++ = *rsp++; 5507 if (rsp >= rsp_end) 5508 rsp = rspq; 5509 } 5510 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 5511 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 5512 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 5513 } 5514 5515 /* Send this portion of the RRS table update to the firmware; 5516 * bail out on any errors. 5517 */ 5518 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 5519 if (ret) 5520 return ret; 5521 } 5522 return 0; 5523 } 5524 5525 /** 5526 * t4_config_glbl_rss - configure the global RSS mode 5527 * @adapter: the adapter 5528 * @mbox: mbox to use for the FW command 5529 * @mode: global RSS mode 5530 * @flags: mode-specific flags 5531 * 5532 * Sets the global RSS mode. 5533 */ 5534 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 5535 unsigned int flags) 5536 { 5537 struct fw_rss_glb_config_cmd c; 5538 5539 memset(&c, 0, sizeof(c)); 5540 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 5541 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 5542 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 5543 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 5544 c.u.manual.mode_pkd = 5545 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 5546 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 5547 c.u.basicvirtual.mode_keymode = 5548 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 5549 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); 5550 } else 5551 return -EINVAL; 5552 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 5553 } 5554 5555 /** 5556 * t4_config_vi_rss - configure per VI RSS settings 5557 * @adapter: the adapter 5558 * @mbox: mbox to use for the FW command 5559 * @viid: the VI id 5560 * @flags: RSS flags 5561 * @defq: id of the default RSS queue for the VI. 5562 * @skeyidx: RSS secret key table index for non-global mode 5563 * @skey: RSS vf_scramble key for VI. 5564 * 5565 * Configures VI-specific RSS properties. 5566 */ 5567 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 5568 unsigned int flags, unsigned int defq, unsigned int skeyidx, 5569 unsigned int skey) 5570 { 5571 struct fw_rss_vi_config_cmd c; 5572 5573 memset(&c, 0, sizeof(c)); 5574 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 5575 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 5576 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 5577 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 5578 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | 5579 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 5580 c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32( 5581 V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx)); 5582 c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey); 5583 5584 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 5585 } 5586 5587 /* Read an RSS table row */ 5588 static int rd_rss_row(struct adapter *adap, int row, u32 *val) 5589 { 5590 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 5591 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 5592 5, 0, val); 5593 } 5594 5595 /** 5596 * t4_read_rss - read the contents of the RSS mapping table 5597 * @adapter: the adapter 5598 * @map: holds the contents of the RSS mapping table 5599 * 5600 * Reads the contents of the RSS hash->queue mapping table. 5601 */ 5602 int t4_read_rss(struct adapter *adapter, u16 *map) 5603 { 5604 u32 val; 5605 int i, ret, nentries; 5606 5607 nentries = t4_chip_rss_size(adapter); 5608 for (i = 0; i < nentries / 2; ++i) { 5609 ret = rd_rss_row(adapter, i, &val); 5610 if (ret) 5611 return ret; 5612 *map++ = G_LKPTBLQUEUE0(val); 5613 *map++ = G_LKPTBLQUEUE1(val); 5614 } 5615 return 0; 5616 } 5617 5618 /** 5619 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST 5620 * @adap: the adapter 5621 * @cmd: TP fw ldst address space type 5622 * @vals: where the indirect register values are stored/written 5623 * @nregs: how many indirect registers to read/write 5624 * @start_idx: index of first indirect register to read/write 5625 * @rw: Read (1) or Write (0) 5626 * @sleep_ok: if true we may sleep while awaiting command completion 5627 * 5628 * Access TP indirect registers through LDST 5629 **/ 5630 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, 5631 unsigned int nregs, unsigned int start_index, 5632 unsigned int rw, bool sleep_ok) 5633 { 5634 int ret = 0; 5635 unsigned int i; 5636 struct fw_ldst_cmd c; 5637 5638 for (i = 0; i < nregs; i++) { 5639 memset(&c, 0, sizeof(c)); 5640 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 5641 F_FW_CMD_REQUEST | 5642 (rw ? F_FW_CMD_READ : 5643 F_FW_CMD_WRITE) | 5644 V_FW_LDST_CMD_ADDRSPACE(cmd)); 5645 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 5646 5647 c.u.addrval.addr = cpu_to_be32(start_index + i); 5648 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); 5649 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, 5650 sleep_ok); 5651 if (ret) 5652 return ret; 5653 5654 if (rw) 5655 vals[i] = be32_to_cpu(c.u.addrval.val); 5656 } 5657 return 0; 5658 } 5659 5660 /** 5661 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor 5662 * @adap: the adapter 5663 * @reg_addr: Address Register 5664 * @reg_data: Data register 5665 * @buff: where the indirect register values are stored/written 5666 * @nregs: how many indirect registers to read/write 5667 * @start_index: index of first indirect register to read/write 5668 * @rw: READ(1) or WRITE(0) 5669 * @sleep_ok: if true we may sleep while awaiting command completion 5670 * 5671 * Read/Write TP indirect registers through LDST if possible. 5672 * Else, use backdoor access 5673 **/ 5674 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, 5675 u32 *buff, u32 nregs, u32 start_index, int rw, 5676 bool sleep_ok) 5677 { 5678 int rc = -EINVAL; 5679 int cmd; 5680 5681 switch (reg_addr) { 5682 case A_TP_PIO_ADDR: 5683 cmd = FW_LDST_ADDRSPC_TP_PIO; 5684 break; 5685 case A_TP_TM_PIO_ADDR: 5686 cmd = FW_LDST_ADDRSPC_TP_TM_PIO; 5687 break; 5688 case A_TP_MIB_INDEX: 5689 cmd = FW_LDST_ADDRSPC_TP_MIB; 5690 break; 5691 default: 5692 goto indirect_access; 5693 } 5694 5695 if (t4_use_ldst(adap)) 5696 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, 5697 sleep_ok); 5698 5699 indirect_access: 5700 5701 if (rc) { 5702 if (rw) 5703 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, 5704 start_index); 5705 else 5706 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, 5707 start_index); 5708 } 5709 } 5710 5711 /** 5712 * t4_tp_pio_read - Read TP PIO registers 5713 * @adap: the adapter 5714 * @buff: where the indirect register values are written 5715 * @nregs: how many indirect registers to read 5716 * @start_index: index of first indirect register to read 5717 * @sleep_ok: if true we may sleep while awaiting command completion 5718 * 5719 * Read TP PIO Registers 5720 **/ 5721 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 5722 u32 start_index, bool sleep_ok) 5723 { 5724 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, 5725 start_index, 1, sleep_ok); 5726 } 5727 5728 /** 5729 * t4_tp_pio_write - Write TP PIO registers 5730 * @adap: the adapter 5731 * @buff: where the indirect register values are stored 5732 * @nregs: how many indirect registers to write 5733 * @start_index: index of first indirect register to write 5734 * @sleep_ok: if true we may sleep while awaiting command completion 5735 * 5736 * Write TP PIO Registers 5737 **/ 5738 void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs, 5739 u32 start_index, bool sleep_ok) 5740 { 5741 t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs, 5742 start_index, 0, sleep_ok); 5743 } 5744 5745 /** 5746 * t4_tp_tm_pio_read - Read TP TM PIO registers 5747 * @adap: the adapter 5748 * @buff: where the indirect register values are written 5749 * @nregs: how many indirect registers to read 5750 * @start_index: index of first indirect register to read 5751 * @sleep_ok: if true we may sleep while awaiting command completion 5752 * 5753 * Read TP TM PIO Registers 5754 **/ 5755 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, 5756 u32 start_index, bool sleep_ok) 5757 { 5758 t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff, 5759 nregs, start_index, 1, sleep_ok); 5760 } 5761 5762 /** 5763 * t4_tp_mib_read - Read TP MIB registers 5764 * @adap: the adapter 5765 * @buff: where the indirect register values are written 5766 * @nregs: how many indirect registers to read 5767 * @start_index: index of first indirect register to read 5768 * @sleep_ok: if true we may sleep while awaiting command completion 5769 * 5770 * Read TP MIB Registers 5771 **/ 5772 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, 5773 bool sleep_ok) 5774 { 5775 t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs, 5776 start_index, 1, sleep_ok); 5777 } 5778 5779 /** 5780 * t4_read_rss_key - read the global RSS key 5781 * @adap: the adapter 5782 * @key: 10-entry array holding the 320-bit RSS key 5783 * @sleep_ok: if true we may sleep while awaiting command completion 5784 * 5785 * Reads the global 320-bit RSS key. 5786 */ 5787 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) 5788 { 5789 t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 5790 } 5791 5792 /** 5793 * t4_write_rss_key - program one of the RSS keys 5794 * @adap: the adapter 5795 * @key: 10-entry array holding the 320-bit RSS key 5796 * @idx: which RSS key to write 5797 * @sleep_ok: if true we may sleep while awaiting command completion 5798 * 5799 * Writes one of the RSS keys with the given 320-bit value. If @idx is 5800 * 0..15 the corresponding entry in the RSS key table is written, 5801 * otherwise the global RSS key is written. 5802 */ 5803 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, 5804 bool sleep_ok) 5805 { 5806 u8 rss_key_addr_cnt = 16; 5807 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); 5808 5809 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble), 5810 * allows access to key addresses 16-63 by using KeyWrAddrX 5811 * as index[5:4](upper 2) into key table 5812 */ 5813 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) && 5814 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) 5815 rss_key_addr_cnt = 32; 5816 5817 t4_tp_pio_write(adap, (void *)key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok); 5818 5819 if (idx >= 0 && idx < rss_key_addr_cnt) { 5820 if (rss_key_addr_cnt > 16) 5821 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 5822 vrt | V_KEYWRADDRX(idx >> 4) | 5823 V_T6_VFWRADDR(idx) | F_KEYWREN); 5824 else 5825 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 5826 vrt| V_KEYWRADDR(idx) | F_KEYWREN); 5827 } 5828 } 5829 5830 /** 5831 * t4_read_rss_pf_config - read PF RSS Configuration Table 5832 * @adapter: the adapter 5833 * @index: the entry in the PF RSS table to read 5834 * @valp: where to store the returned value 5835 * @sleep_ok: if true we may sleep while awaiting command completion 5836 * 5837 * Reads the PF RSS Configuration Table at the specified index and returns 5838 * the value found there. 5839 */ 5840 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 5841 u32 *valp, bool sleep_ok) 5842 { 5843 t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok); 5844 } 5845 5846 /** 5847 * t4_write_rss_pf_config - write PF RSS Configuration Table 5848 * @adapter: the adapter 5849 * @index: the entry in the VF RSS table to read 5850 * @val: the value to store 5851 * @sleep_ok: if true we may sleep while awaiting command completion 5852 * 5853 * Writes the PF RSS Configuration Table at the specified index with the 5854 * specified value. 5855 */ 5856 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, 5857 u32 val, bool sleep_ok) 5858 { 5859 t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index, 5860 sleep_ok); 5861 } 5862 5863 /** 5864 * t4_read_rss_vf_config - read VF RSS Configuration Table 5865 * @adapter: the adapter 5866 * @index: the entry in the VF RSS table to read 5867 * @vfl: where to store the returned VFL 5868 * @vfh: where to store the returned VFH 5869 * @sleep_ok: if true we may sleep while awaiting command completion 5870 * 5871 * Reads the VF RSS Configuration Table at the specified index and returns 5872 * the (VFL, VFH) values found there. 5873 */ 5874 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 5875 u32 *vfl, u32 *vfh, bool sleep_ok) 5876 { 5877 u32 vrt, mask, data; 5878 5879 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { 5880 mask = V_VFWRADDR(M_VFWRADDR); 5881 data = V_VFWRADDR(index); 5882 } else { 5883 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 5884 data = V_T6_VFWRADDR(index); 5885 } 5886 /* 5887 * Request that the index'th VF Table values be read into VFL/VFH. 5888 */ 5889 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 5890 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 5891 vrt |= data | F_VFRDEN; 5892 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 5893 5894 /* 5895 * Grab the VFL/VFH values ... 5896 */ 5897 t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok); 5898 t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok); 5899 } 5900 5901 /** 5902 * t4_read_rss_pf_map - read PF RSS Map 5903 * @adapter: the adapter 5904 * @sleep_ok: if true we may sleep while awaiting command completion 5905 * 5906 * Reads the PF RSS Map register and returns its value. 5907 */ 5908 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) 5909 { 5910 u32 pfmap; 5911 5912 t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok); 5913 5914 return pfmap; 5915 } 5916 5917 /** 5918 * t4_read_rss_pf_mask - read PF RSS Mask 5919 * @adapter: the adapter 5920 * @sleep_ok: if true we may sleep while awaiting command completion 5921 * 5922 * Reads the PF RSS Mask register and returns its value. 5923 */ 5924 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) 5925 { 5926 u32 pfmask; 5927 5928 t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok); 5929 5930 return pfmask; 5931 } 5932 5933 /** 5934 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 5935 * @adap: the adapter 5936 * @v4: holds the TCP/IP counter values 5937 * @v6: holds the TCP/IPv6 counter values 5938 * @sleep_ok: if true we may sleep while awaiting command completion 5939 * 5940 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 5941 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 5942 */ 5943 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 5944 struct tp_tcp_stats *v6, bool sleep_ok) 5945 { 5946 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 5947 5948 #define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 5949 #define STAT(x) val[STAT_IDX(x)] 5950 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 5951 5952 if (v4) { 5953 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 5954 A_TP_MIB_TCP_OUT_RST, sleep_ok); 5955 v4->tcp_out_rsts = STAT(OUT_RST); 5956 v4->tcp_in_segs = STAT64(IN_SEG); 5957 v4->tcp_out_segs = STAT64(OUT_SEG); 5958 v4->tcp_retrans_segs = STAT64(RXT_SEG); 5959 } 5960 if (v6) { 5961 t4_tp_mib_read(adap, val, ARRAY_SIZE(val), 5962 A_TP_MIB_TCP_V6OUT_RST, sleep_ok); 5963 v6->tcp_out_rsts = STAT(OUT_RST); 5964 v6->tcp_in_segs = STAT64(IN_SEG); 5965 v6->tcp_out_segs = STAT64(OUT_SEG); 5966 v6->tcp_retrans_segs = STAT64(RXT_SEG); 5967 } 5968 #undef STAT64 5969 #undef STAT 5970 #undef STAT_IDX 5971 } 5972 5973 /** 5974 * t4_tp_get_err_stats - read TP's error MIB counters 5975 * @adap: the adapter 5976 * @st: holds the counter values 5977 * @sleep_ok: if true we may sleep while awaiting command completion 5978 * 5979 * Returns the values of TP's error counters. 5980 */ 5981 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, 5982 bool sleep_ok) 5983 { 5984 int nchan = adap->params.arch.nchan; 5985 5986 t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0, 5987 sleep_ok); 5988 5989 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0, 5990 sleep_ok); 5991 5992 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0, 5993 sleep_ok); 5994 5995 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, 5996 A_TP_MIB_TNL_CNG_DROP_0, sleep_ok); 5997 5998 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, 5999 A_TP_MIB_OFD_CHN_DROP_0, sleep_ok); 6000 6001 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0, 6002 sleep_ok); 6003 6004 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, 6005 A_TP_MIB_OFD_VLN_DROP_0, sleep_ok); 6006 6007 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, 6008 A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok); 6009 6010 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP, 6011 sleep_ok); 6012 } 6013 6014 /** 6015 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 6016 * @adap: the adapter 6017 * @st: holds the counter values 6018 * @sleep_ok: if true we may sleep while awaiting command completion 6019 * 6020 * Returns the values of TP's CPL counters. 6021 */ 6022 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, 6023 bool sleep_ok) 6024 { 6025 int nchan = adap->params.arch.nchan; 6026 6027 t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok); 6028 6029 t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok); 6030 } 6031 6032 /** 6033 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 6034 * @adap: the adapter 6035 * @st: holds the counter values 6036 * 6037 * Returns the values of TP's RDMA counters. 6038 */ 6039 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, 6040 bool sleep_ok) 6041 { 6042 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT, 6043 sleep_ok); 6044 } 6045 6046 /** 6047 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 6048 * @adap: the adapter 6049 * @idx: the port index 6050 * @st: holds the counter values 6051 * @sleep_ok: if true we may sleep while awaiting command completion 6052 * 6053 * Returns the values of TP's FCoE counters for the selected port. 6054 */ 6055 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 6056 struct tp_fcoe_stats *st, bool sleep_ok) 6057 { 6058 u32 val[2]; 6059 6060 t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx, 6061 sleep_ok); 6062 6063 t4_tp_mib_read(adap, &st->frames_drop, 1, 6064 A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok); 6065 6066 t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx, 6067 sleep_ok); 6068 6069 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 6070 } 6071 6072 /** 6073 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 6074 * @adap: the adapter 6075 * @st: holds the counter values 6076 * @sleep_ok: if true we may sleep while awaiting command completion 6077 * 6078 * Returns the values of TP's counters for non-TCP directly-placed packets. 6079 */ 6080 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, 6081 bool sleep_ok) 6082 { 6083 u32 val[4]; 6084 6085 t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok); 6086 6087 st->frames = val[0]; 6088 st->drops = val[1]; 6089 st->octets = ((u64)val[2] << 32) | val[3]; 6090 } 6091 6092 /** 6093 * t4_read_mtu_tbl - returns the values in the HW path MTU table 6094 * @adap: the adapter 6095 * @mtus: where to store the MTU values 6096 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 6097 * 6098 * Reads the HW path MTU table. 6099 */ 6100 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 6101 { 6102 u32 v; 6103 int i; 6104 6105 for (i = 0; i < NMTUS; ++i) { 6106 t4_write_reg(adap, A_TP_MTU_TABLE, 6107 V_MTUINDEX(0xffU) | V_MTUVALUE(i)); 6108 v = t4_read_reg(adap, A_TP_MTU_TABLE); 6109 mtus[i] = G_MTUVALUE(v); 6110 if (mtu_log) 6111 mtu_log[i] = G_MTUWIDTH(v); 6112 } 6113 } 6114 6115 /** 6116 * t4_read_cong_tbl - reads the congestion control table 6117 * @adap: the adapter 6118 * @incr: where to store the alpha values 6119 * 6120 * Reads the additive increments programmed into the HW congestion 6121 * control table. 6122 */ 6123 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 6124 { 6125 unsigned int mtu, w; 6126 6127 for (mtu = 0; mtu < NMTUS; ++mtu) 6128 for (w = 0; w < NCCTRL_WIN; ++w) { 6129 t4_write_reg(adap, A_TP_CCTRL_TABLE, 6130 V_ROWINDEX(0xffffU) | (mtu << 5) | w); 6131 incr[mtu][w] = (u16)t4_read_reg(adap, 6132 A_TP_CCTRL_TABLE) & 0x1fff; 6133 } 6134 } 6135 6136 /** 6137 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 6138 * @adap: the adapter 6139 * @addr: the indirect TP register address 6140 * @mask: specifies the field within the register to modify 6141 * @val: new value for the field 6142 * 6143 * Sets a field of an indirect TP register to the given value. 6144 */ 6145 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 6146 unsigned int mask, unsigned int val) 6147 { 6148 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 6149 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 6150 t4_write_reg(adap, A_TP_PIO_DATA, val); 6151 } 6152 6153 /** 6154 * init_cong_ctrl - initialize congestion control parameters 6155 * @a: the alpha values for congestion control 6156 * @b: the beta values for congestion control 6157 * 6158 * Initialize the congestion control parameters. 6159 */ 6160 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 6161 { 6162 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 6163 a[9] = 2; 6164 a[10] = 3; 6165 a[11] = 4; 6166 a[12] = 5; 6167 a[13] = 6; 6168 a[14] = 7; 6169 a[15] = 8; 6170 a[16] = 9; 6171 a[17] = 10; 6172 a[18] = 14; 6173 a[19] = 17; 6174 a[20] = 21; 6175 a[21] = 25; 6176 a[22] = 30; 6177 a[23] = 35; 6178 a[24] = 45; 6179 a[25] = 60; 6180 a[26] = 80; 6181 a[27] = 100; 6182 a[28] = 200; 6183 a[29] = 300; 6184 a[30] = 400; 6185 a[31] = 500; 6186 6187 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 6188 b[9] = b[10] = 1; 6189 b[11] = b[12] = 2; 6190 b[13] = b[14] = b[15] = b[16] = 3; 6191 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 6192 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 6193 b[28] = b[29] = 6; 6194 b[30] = b[31] = 7; 6195 } 6196 6197 /* The minimum additive increment value for the congestion control table */ 6198 #define CC_MIN_INCR 2U 6199 6200 /** 6201 * t4_load_mtus - write the MTU and congestion control HW tables 6202 * @adap: the adapter 6203 * @mtus: the values for the MTU table 6204 * @alpha: the values for the congestion control alpha parameter 6205 * @beta: the values for the congestion control beta parameter 6206 * 6207 * Write the HW MTU table with the supplied MTUs and the high-speed 6208 * congestion control table with the supplied alpha, beta, and MTUs. 6209 * We write the two tables together because the additive increments 6210 * depend on the MTUs. 6211 */ 6212 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 6213 const unsigned short *alpha, const unsigned short *beta) 6214 { 6215 static const unsigned int avg_pkts[NCCTRL_WIN] = { 6216 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 6217 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 6218 28672, 40960, 57344, 81920, 114688, 163840, 229376 6219 }; 6220 6221 unsigned int i, w; 6222 6223 for (i = 0; i < NMTUS; ++i) { 6224 unsigned int mtu = mtus[i]; 6225 unsigned int log2 = fls(mtu); 6226 6227 if (!(mtu & ((1 << log2) >> 2))) /* round */ 6228 log2--; 6229 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 6230 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 6231 6232 for (w = 0; w < NCCTRL_WIN; ++w) { 6233 unsigned int inc; 6234 6235 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 6236 CC_MIN_INCR); 6237 6238 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 6239 (w << 16) | (beta[w] << 13) | inc); 6240 } 6241 } 6242 } 6243 6244 /* 6245 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 6246 * clocks. The formula is 6247 * 6248 * bytes/s = bytes256 * 256 * ClkFreq / 4096 6249 * 6250 * which is equivalent to 6251 * 6252 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 6253 */ 6254 static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 6255 { 6256 u64 v = bytes256 * adap->params.vpd.cclk; 6257 6258 return v * 62 + v / 2; 6259 } 6260 6261 /** 6262 * t4_get_chan_txrate - get the current per channel Tx rates 6263 * @adap: the adapter 6264 * @nic_rate: rates for NIC traffic 6265 * @ofld_rate: rates for offloaded traffic 6266 * 6267 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 6268 * for each channel. 6269 */ 6270 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 6271 { 6272 u32 v; 6273 6274 v = t4_read_reg(adap, A_TP_TX_TRATE); 6275 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 6276 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 6277 if (adap->params.arch.nchan == NCHAN) { 6278 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 6279 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 6280 } 6281 6282 v = t4_read_reg(adap, A_TP_TX_ORATE); 6283 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 6284 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 6285 if (adap->params.arch.nchan == NCHAN) { 6286 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 6287 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 6288 } 6289 } 6290 6291 /** 6292 * t4_set_trace_filter - configure one of the tracing filters 6293 * @adap: the adapter 6294 * @tp: the desired trace filter parameters 6295 * @idx: which filter to configure 6296 * @enable: whether to enable or disable the filter 6297 * 6298 * Configures one of the tracing filters available in HW. If @enable is 6299 * %0 @tp is not examined and may be %NULL. The user is responsible to 6300 * set the single/multiple trace mode by writing to A_MPS_TRC_CFG register 6301 * by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/ 6302 * docs/readme.txt for a complete description of how to setup traceing on 6303 * T4. 6304 */ 6305 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx, 6306 int enable) 6307 { 6308 int i, ofst = idx * 4; 6309 u32 data_reg, mask_reg, cfg; 6310 6311 if (!enable) { 6312 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); 6313 return 0; 6314 } 6315 6316 /* 6317 * TODO - After T4 data book is updated, specify the exact 6318 * section below. 6319 * 6320 * See T4 data book - MPS section for a complete description 6321 * of the below if..else handling of A_MPS_TRC_CFG register 6322 * value. 6323 */ 6324 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 6325 if (cfg & F_TRCMULTIFILTER) { 6326 /* 6327 * If multiple tracers are enabled, then maximum 6328 * capture size is 2.5KB (FIFO size of a single channel) 6329 * minus 2 flits for CPL_TRACE_PKT header. 6330 */ 6331 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 6332 return -EINVAL; 6333 } 6334 else { 6335 /* 6336 * If multiple tracers are disabled, to avoid deadlocks 6337 * maximum packet capture size of 9600 bytes is recommended. 6338 * Also in this mode, only trace0 can be enabled and running. 6339 */ 6340 if (tp->snap_len > 9600 || idx) 6341 return -EINVAL; 6342 } 6343 6344 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 || 6345 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 6346 tp->min_len > M_TFMINPKTSIZE) 6347 return -EINVAL; 6348 6349 /* stop the tracer we'll be changing */ 6350 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); 6351 6352 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 6353 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 6354 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 6355 6356 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 6357 t4_write_reg(adap, data_reg, tp->data[i]); 6358 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 6359 } 6360 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 6361 V_TFCAPTUREMAX(tp->snap_len) | 6362 V_TFMINPKTSIZE(tp->min_len)); 6363 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 6364 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | 6365 (is_t4(adap->params.chip) ? 6366 V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert) : 6367 V_T5_TFPORT(tp->port) | F_T5_TFEN | 6368 V_T5_TFINVERTMATCH(tp->invert))); 6369 6370 return 0; 6371 } 6372 6373 /** 6374 * t4_get_trace_filter - query one of the tracing filters 6375 * @adap: the adapter 6376 * @tp: the current trace filter parameters 6377 * @idx: which trace filter to query 6378 * @enabled: non-zero if the filter is enabled 6379 * 6380 * Returns the current settings of one of the HW tracing filters. 6381 */ 6382 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 6383 int *enabled) 6384 { 6385 u32 ctla, ctlb; 6386 int i, ofst = idx * 4; 6387 u32 data_reg, mask_reg; 6388 6389 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 6390 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 6391 6392 if (is_t4(adap->params.chip)) { 6393 *enabled = !!(ctla & F_TFEN); 6394 tp->port = G_TFPORT(ctla); 6395 tp->invert = !!(ctla & F_TFINVERTMATCH); 6396 } else { 6397 *enabled = !!(ctla & F_T5_TFEN); 6398 tp->port = G_T5_TFPORT(ctla); 6399 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 6400 } 6401 tp->snap_len = G_TFCAPTUREMAX(ctlb); 6402 tp->min_len = G_TFMINPKTSIZE(ctlb); 6403 tp->skip_ofst = G_TFOFFSET(ctla); 6404 tp->skip_len = G_TFLENGTH(ctla); 6405 6406 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 6407 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 6408 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 6409 6410 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 6411 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 6412 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 6413 } 6414 } 6415 6416 /** 6417 * t4_read_tcb - read a hardware TCP Control Block structure 6418 * @adap: the adapter 6419 * @win: PCI-E Memory Window to use 6420 * @tid: the TCB ID 6421 * @tcb: the buffer to return the TCB in 6422 * 6423 * Reads the indicated hardware TCP Control Block and returns it in 6424 * the supplied buffer. Returns 0 on success. 6425 */ 6426 int t4_read_tcb(struct adapter *adap, int win, int tid, u32 tcb[TCB_SIZE/4]) 6427 { 6428 u32 tcb_base = t4_read_reg(adap, A_TP_CMM_TCB_BASE); 6429 u32 tcb_addr = tcb_base + tid * TCB_SIZE; 6430 __be32 raw_tcb[TCB_SIZE/4]; 6431 int ret, word; 6432 6433 ret = t4_memory_rw_addr(adap, win, 6434 tcb_addr, sizeof raw_tcb, raw_tcb, 6435 T4_MEMORY_READ); 6436 if (ret) 6437 return ret; 6438 6439 for (word = 0; word < 32; word++) 6440 tcb[word] = be32_to_cpu(raw_tcb[word]); 6441 return 0; 6442 } 6443 6444 /** 6445 * t4_pmtx_get_stats - returns the HW stats from PMTX 6446 * @adap: the adapter 6447 * @cnt: where to store the count statistics 6448 * @cycles: where to store the cycle statistics 6449 * 6450 * Returns performance statistics from PMTX. 6451 */ 6452 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 6453 { 6454 int i; 6455 u32 data[2]; 6456 6457 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { 6458 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 6459 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 6460 if (is_t4(adap->params.chip)) { 6461 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 6462 } else { 6463 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 6464 A_PM_TX_DBG_DATA, data, 2, 6465 A_PM_TX_DBG_STAT_MSB); 6466 cycles[i] = (((u64)data[0] << 32) | data[1]); 6467 } 6468 } 6469 } 6470 6471 /** 6472 * t4_pmrx_get_stats - returns the HW stats from PMRX 6473 * @adap: the adapter 6474 * @cnt: where to store the count statistics 6475 * @cycles: where to store the cycle statistics 6476 * 6477 * Returns performance statistics from PMRX. 6478 */ 6479 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 6480 { 6481 int i; 6482 u32 data[2]; 6483 6484 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { 6485 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 6486 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 6487 if (is_t4(adap->params.chip)) { 6488 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 6489 } else { 6490 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 6491 A_PM_RX_DBG_DATA, data, 2, 6492 A_PM_RX_DBG_STAT_MSB); 6493 cycles[i] = (((u64)data[0] << 32) | data[1]); 6494 } 6495 } 6496 } 6497 6498 /** 6499 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port 6500 * @adapter: the adapter 6501 * @pidx: the port index 6502 * 6503 * Compuytes and returns a bitmap indicating which MPS buffer groups are 6504 * associated with the given Port. Bit i is set if buffer group i is 6505 * used by the Port. 6506 */ 6507 static inline unsigned int compute_mps_bg_map(struct adapter *adapter, 6508 int pidx) 6509 { 6510 unsigned int chip_version, nports; 6511 6512 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 6513 nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL)); 6514 6515 switch (chip_version) { 6516 case CHELSIO_T4: 6517 case CHELSIO_T5: 6518 switch (nports) { 6519 case 1: return 0xf; 6520 case 2: return 3 << (2 * pidx); 6521 case 4: return 1 << pidx; 6522 } 6523 break; 6524 6525 case CHELSIO_T6: 6526 switch (nports) { 6527 case 2: return 1 << (2 * pidx); 6528 } 6529 break; 6530 } 6531 6532 CH_ERR(adapter, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n", 6533 chip_version, nports); 6534 6535 return 0; 6536 } 6537 6538 /** 6539 * t4_get_mps_bg_map - return the buffer groups associated with a port 6540 * @adapter: the adapter 6541 * @pidx: the port index 6542 * 6543 * Returns a bitmap indicating which MPS buffer groups are associated 6544 * with the given Port. Bit i is set if buffer group i is used by the 6545 * Port. 6546 */ 6547 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx) 6548 { 6549 u8 *mps_bg_map; 6550 unsigned int nports; 6551 6552 nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL)); 6553 if (pidx >= nports) { 6554 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n", pidx, nports); 6555 return 0; 6556 } 6557 6558 /* If we've already retrieved/computed this, just return the result. 6559 */ 6560 mps_bg_map = adapter->params.mps_bg_map; 6561 if (mps_bg_map[pidx]) 6562 return mps_bg_map[pidx]; 6563 6564 /* Newer Firmware can tell us what the MPS Buffer Group Map is. 6565 * If we're talking to such Firmware, let it tell us. If the new 6566 * API isn't supported, revert back to old hardcoded way. The value 6567 * obtained from Firmware is encoded in below format: 6568 * 6569 * val = (( MPSBGMAP[Port 3] << 24 ) | 6570 * ( MPSBGMAP[Port 2] << 16 ) | 6571 * ( MPSBGMAP[Port 1] << 8 ) | 6572 * ( MPSBGMAP[Port 0] << 0 )) 6573 */ 6574 if (adapter->flags & FW_OK) { 6575 u32 param, val; 6576 int ret; 6577 6578 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 6579 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MPSBGMAP)); 6580 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf, 6581 0, 1, ¶m, &val); 6582 if (!ret) { 6583 int p; 6584 6585 /* Store the BG Map for all of the Ports in order to 6586 * avoid more calls to the Firmware in the future. 6587 */ 6588 for (p = 0; p < MAX_NPORTS; p++, val >>= 8) 6589 mps_bg_map[p] = val & 0xff; 6590 6591 return mps_bg_map[pidx]; 6592 } 6593 } 6594 6595 /* Either we're not talking to the Firmware or we're dealing with 6596 * older Firmware which doesn't support the new API to get the MPS 6597 * Buffer Group Map. Fall back to computing it ourselves. 6598 */ 6599 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx); 6600 return mps_bg_map[pidx]; 6601 } 6602 6603 /** 6604 * t4_get_tp_e2c_map - return the E2C channel map associated with a port 6605 * @adapter: the adapter 6606 * @pidx: the port index 6607 */ 6608 unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx) 6609 { 6610 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL)); 6611 u32 param, val = 0; 6612 int ret; 6613 6614 if (pidx >= nports) { 6615 CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n", pidx, nports); 6616 return 0; 6617 } 6618 6619 /* FW version >= 1.16.44.0 can determine E2C channel map using 6620 * FW_PARAMS_PARAM_DEV_TPCHMAP API. 6621 */ 6622 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 6623 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPCHMAP)); 6624 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf, 6625 0, 1, ¶m, &val); 6626 if (!ret) 6627 return (val >> (8*pidx)) & 0xff; 6628 6629 return 0; 6630 } 6631 6632 /** 6633 * t4_get_tp_ch_map - return TP ingress channels associated with a port 6634 * @adapter: the adapter 6635 * @pidx: the port index 6636 * 6637 * Returns a bitmap indicating which TP Ingress Channels are associated with 6638 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port. 6639 */ 6640 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx) 6641 { 6642 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 6643 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter, A_MPS_CMN_CTL)); 6644 6645 if (pidx >= nports) { 6646 CH_WARN(adapter, "TP Port Index %d >= Nports %d\n", pidx, nports); 6647 return 0; 6648 } 6649 6650 switch (chip_version) { 6651 case CHELSIO_T4: 6652 case CHELSIO_T5: 6653 /* 6654 * Note that this happens to be the same values as the MPS 6655 * Buffer Group Map for these Chips. But we replicate the code 6656 * here because they're really separate concepts. 6657 */ 6658 switch (nports) { 6659 case 1: return 0xf; 6660 case 2: return 3 << (2 * pidx); 6661 case 4: return 1 << pidx; 6662 } 6663 break; 6664 6665 case CHELSIO_T6: 6666 switch (nports) { 6667 case 1: return 1 << pidx; 6668 case 2: return 1 << pidx; 6669 } 6670 break; 6671 } 6672 6673 CH_ERR(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n", 6674 chip_version, nports); 6675 return 0; 6676 } 6677 6678 /** 6679 * t4_get_port_type_description - return Port Type string description 6680 * @port_type: firmware Port Type enumeration 6681 */ 6682 const char *t4_get_port_type_description(enum fw_port_type port_type) 6683 { 6684 static const char *const port_type_description[] = { 6685 "Fiber_XFI", 6686 "Fiber_XAUI", 6687 "BT_SGMII", 6688 "BT_XFI", 6689 "BT_XAUI", 6690 "KX4", 6691 "CX4", 6692 "KX", 6693 "KR", 6694 "SFP", 6695 "BP_AP", 6696 "BP4_AP", 6697 "QSFP_10G", 6698 "QSA", 6699 "QSFP", 6700 "BP40_BA", 6701 "KR4_100G", 6702 "CR4_QSFP", 6703 "CR_QSFP", 6704 "CR2_QSFP", 6705 "SFP28", 6706 "KR_SFP28", 6707 "KR_XLAUI", 6708 }; 6709 6710 if (port_type < ARRAY_SIZE(port_type_description)) 6711 return port_type_description[port_type]; 6712 return "UNKNOWN"; 6713 } 6714 6715 /** 6716 * t4_get_port_stats_offset - collect port stats relative to a previous 6717 * snapshot 6718 * @adap: The adapter 6719 * @idx: The port 6720 * @stats: Current stats to fill 6721 * @offset: Previous stats snapshot 6722 */ 6723 void t4_get_port_stats_offset(struct adapter *adap, int idx, 6724 struct port_stats *stats, 6725 struct port_stats *offset) 6726 { 6727 u64 *s, *o; 6728 int i; 6729 6730 t4_get_port_stats(adap, idx, stats); 6731 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 6732 i < (sizeof(struct port_stats)/sizeof(u64)) ; 6733 i++, s++, o++) 6734 *s -= *o; 6735 } 6736 6737 /** 6738 * t4_get_port_stats - collect port statistics 6739 * @adap: the adapter 6740 * @idx: the port index 6741 * @p: the stats structure to fill 6742 * 6743 * Collect statistics related to the given port from HW. 6744 */ 6745 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 6746 { 6747 u32 bgmap = t4_get_mps_bg_map(adap, idx); 6748 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); 6749 6750 #define GET_STAT(name) \ 6751 t4_read_reg64(adap, \ 6752 (is_t4(adap->params.chip) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 6753 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 6754 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 6755 6756 p->tx_octets = GET_STAT(TX_PORT_BYTES); 6757 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 6758 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 6759 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 6760 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 6761 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 6762 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 6763 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 6764 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 6765 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 6766 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 6767 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 6768 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 6769 p->tx_drop = GET_STAT(TX_PORT_DROP); 6770 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 6771 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 6772 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 6773 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 6774 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 6775 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 6776 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 6777 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 6778 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 6779 6780 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { 6781 if (stat_ctl & F_COUNTPAUSESTATTX) 6782 p->tx_frames_64 -= p->tx_pause; 6783 if (stat_ctl & F_COUNTPAUSEMCTX) 6784 p->tx_mcast_frames -= p->tx_pause; 6785 } 6786 6787 p->rx_octets = GET_STAT(RX_PORT_BYTES); 6788 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 6789 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 6790 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 6791 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 6792 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 6793 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 6794 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 6795 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 6796 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 6797 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 6798 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 6799 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 6800 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 6801 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 6802 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 6803 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 6804 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 6805 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 6806 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 6807 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 6808 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 6809 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 6810 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 6811 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 6812 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 6813 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 6814 6815 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) { 6816 if (stat_ctl & F_COUNTPAUSESTATRX) 6817 p->rx_frames_64 -= p->rx_pause; 6818 if (stat_ctl & F_COUNTPAUSEMCRX) 6819 p->rx_mcast_frames -= p->rx_pause; 6820 } 6821 6822 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 6823 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 6824 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 6825 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 6826 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 6827 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 6828 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 6829 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 6830 6831 #undef GET_STAT 6832 #undef GET_STAT_COM 6833 } 6834 6835 /** 6836 * t4_get_lb_stats - collect loopback port statistics 6837 * @adap: the adapter 6838 * @idx: the loopback port index 6839 * @p: the stats structure to fill 6840 * 6841 * Return HW statistics for the given loopback port. 6842 */ 6843 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 6844 { 6845 u32 bgmap = t4_get_mps_bg_map(adap, idx); 6846 6847 #define GET_STAT(name) \ 6848 t4_read_reg64(adap, \ 6849 (is_t4(adap->params.chip) ? \ 6850 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 6851 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 6852 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 6853 6854 p->octets = GET_STAT(BYTES); 6855 p->frames = GET_STAT(FRAMES); 6856 p->bcast_frames = GET_STAT(BCAST); 6857 p->mcast_frames = GET_STAT(MCAST); 6858 p->ucast_frames = GET_STAT(UCAST); 6859 p->error_frames = GET_STAT(ERROR); 6860 6861 p->frames_64 = GET_STAT(64B); 6862 p->frames_65_127 = GET_STAT(65B_127B); 6863 p->frames_128_255 = GET_STAT(128B_255B); 6864 p->frames_256_511 = GET_STAT(256B_511B); 6865 p->frames_512_1023 = GET_STAT(512B_1023B); 6866 p->frames_1024_1518 = GET_STAT(1024B_1518B); 6867 p->frames_1519_max = GET_STAT(1519B_MAX); 6868 p->drop = GET_STAT(DROP_FRAMES); 6869 6870 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 6871 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 6872 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 6873 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 6874 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 6875 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 6876 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 6877 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 6878 6879 #undef GET_STAT 6880 #undef GET_STAT_COM 6881 } 6882 6883 /* t4_mk_filtdelwr - create a delete filter WR 6884 * @ftid: the filter ID 6885 * @wr: the filter work request to populate 6886 * @rqtype: the filter Request Type: 0 => IPv4, 1 => IPv6 6887 * @qid: ingress queue to receive the delete notification 6888 * 6889 * Creates a filter work request to delete the supplied filter. If @qid 6890 * is negative the delete notification is suppressed. 6891 */ 6892 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, 6893 int rqtype, int qid) 6894 { 6895 memset(wr, 0, sizeof(*wr)); 6896 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); 6897 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); 6898 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | 6899 V_FW_FILTER_WR_RQTYPE(rqtype) | 6900 V_FW_FILTER_WR_NOREPLY(qid < 0)); 6901 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); 6902 if (qid >= 0) 6903 wr->rx_chan_rx_rpl_iq = 6904 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 6905 } 6906 6907 #define INIT_CMD(var, cmd, rd_wr) do { \ 6908 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 6909 F_FW_CMD_REQUEST | \ 6910 F_FW_CMD_##rd_wr); \ 6911 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ 6912 } while (0) 6913 6914 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 6915 u32 addr, u32 val) 6916 { 6917 u32 ldst_addrspace; 6918 struct fw_ldst_cmd c; 6919 6920 memset(&c, 0, sizeof(c)); 6921 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); 6922 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6923 F_FW_CMD_REQUEST | 6924 F_FW_CMD_WRITE | 6925 ldst_addrspace); 6926 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6927 c.u.addrval.addr = cpu_to_be32(addr); 6928 c.u.addrval.val = cpu_to_be32(val); 6929 6930 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6931 } 6932 6933 /** 6934 * t4_mdio_rd - read a PHY register through MDIO 6935 * @adap: the adapter 6936 * @mbox: mailbox to use for the FW command 6937 * @phy_addr: the PHY address 6938 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 6939 * @reg: the register to read 6940 * @valp: where to store the value 6941 * 6942 * Issues a FW command through the given mailbox to read a PHY register. 6943 */ 6944 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 6945 unsigned int mmd, unsigned int reg, unsigned int *valp) 6946 { 6947 int ret; 6948 u32 ldst_addrspace; 6949 struct fw_ldst_cmd c; 6950 6951 memset(&c, 0, sizeof(c)); 6952 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 6953 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6954 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6955 ldst_addrspace); 6956 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6957 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 6958 V_FW_LDST_CMD_MMD(mmd)); 6959 c.u.mdio.raddr = cpu_to_be16(reg); 6960 6961 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6962 if (ret == 0) 6963 *valp = be16_to_cpu(c.u.mdio.rval); 6964 return ret; 6965 } 6966 6967 /** 6968 * t4_mdio_wr - write a PHY register through MDIO 6969 * @adap: the adapter 6970 * @mbox: mailbox to use for the FW command 6971 * @phy_addr: the PHY address 6972 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 6973 * @reg: the register to write 6974 * @valp: value to write 6975 * 6976 * Issues a FW command through the given mailbox to write a PHY register. 6977 */ 6978 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 6979 unsigned int mmd, unsigned int reg, unsigned int val) 6980 { 6981 u32 ldst_addrspace; 6982 struct fw_ldst_cmd c; 6983 6984 memset(&c, 0, sizeof(c)); 6985 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 6986 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6987 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6988 ldst_addrspace); 6989 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6990 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 6991 V_FW_LDST_CMD_MMD(mmd)); 6992 c.u.mdio.raddr = cpu_to_be16(reg); 6993 c.u.mdio.rval = cpu_to_be16(val); 6994 6995 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6996 } 6997 6998 /** 6999 * 7000 * t4_sge_decode_idma_state - decode the idma state 7001 * @adap: the adapter 7002 * @state: the state idma is stuck in 7003 */ 7004 void t4_sge_decode_idma_state(struct adapter *adapter, int state) 7005 { 7006 static const char * const t4_decode[] = { 7007 "IDMA_IDLE", 7008 "IDMA_PUSH_MORE_CPL_FIFO", 7009 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7010 "Not used", 7011 "IDMA_PHYSADDR_SEND_PCIEHDR", 7012 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7013 "IDMA_PHYSADDR_SEND_PAYLOAD", 7014 "IDMA_SEND_FIFO_TO_IMSG", 7015 "IDMA_FL_REQ_DATA_FL_PREP", 7016 "IDMA_FL_REQ_DATA_FL", 7017 "IDMA_FL_DROP", 7018 "IDMA_FL_H_REQ_HEADER_FL", 7019 "IDMA_FL_H_SEND_PCIEHDR", 7020 "IDMA_FL_H_PUSH_CPL_FIFO", 7021 "IDMA_FL_H_SEND_CPL", 7022 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7023 "IDMA_FL_H_SEND_IP_HDR", 7024 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7025 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7026 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7027 "IDMA_FL_D_SEND_PCIEHDR", 7028 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7029 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7030 "IDMA_FL_SEND_PCIEHDR", 7031 "IDMA_FL_PUSH_CPL_FIFO", 7032 "IDMA_FL_SEND_CPL", 7033 "IDMA_FL_SEND_PAYLOAD_FIRST", 7034 "IDMA_FL_SEND_PAYLOAD", 7035 "IDMA_FL_REQ_NEXT_DATA_FL", 7036 "IDMA_FL_SEND_NEXT_PCIEHDR", 7037 "IDMA_FL_SEND_PADDING", 7038 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7039 "IDMA_FL_SEND_FIFO_TO_IMSG", 7040 "IDMA_FL_REQ_DATAFL_DONE", 7041 "IDMA_FL_REQ_HEADERFL_DONE", 7042 }; 7043 static const char * const t5_decode[] = { 7044 "IDMA_IDLE", 7045 "IDMA_ALMOST_IDLE", 7046 "IDMA_PUSH_MORE_CPL_FIFO", 7047 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7048 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 7049 "IDMA_PHYSADDR_SEND_PCIEHDR", 7050 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7051 "IDMA_PHYSADDR_SEND_PAYLOAD", 7052 "IDMA_SEND_FIFO_TO_IMSG", 7053 "IDMA_FL_REQ_DATA_FL", 7054 "IDMA_FL_DROP", 7055 "IDMA_FL_DROP_SEND_INC", 7056 "IDMA_FL_H_REQ_HEADER_FL", 7057 "IDMA_FL_H_SEND_PCIEHDR", 7058 "IDMA_FL_H_PUSH_CPL_FIFO", 7059 "IDMA_FL_H_SEND_CPL", 7060 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7061 "IDMA_FL_H_SEND_IP_HDR", 7062 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7063 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7064 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7065 "IDMA_FL_D_SEND_PCIEHDR", 7066 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7067 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7068 "IDMA_FL_SEND_PCIEHDR", 7069 "IDMA_FL_PUSH_CPL_FIFO", 7070 "IDMA_FL_SEND_CPL", 7071 "IDMA_FL_SEND_PAYLOAD_FIRST", 7072 "IDMA_FL_SEND_PAYLOAD", 7073 "IDMA_FL_REQ_NEXT_DATA_FL", 7074 "IDMA_FL_SEND_NEXT_PCIEHDR", 7075 "IDMA_FL_SEND_PADDING", 7076 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7077 }; 7078 static const char * const t6_decode[] = { 7079 "IDMA_IDLE", 7080 "IDMA_PUSH_MORE_CPL_FIFO", 7081 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 7082 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 7083 "IDMA_PHYSADDR_SEND_PCIEHDR", 7084 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 7085 "IDMA_PHYSADDR_SEND_PAYLOAD", 7086 "IDMA_FL_REQ_DATA_FL", 7087 "IDMA_FL_DROP", 7088 "IDMA_FL_DROP_SEND_INC", 7089 "IDMA_FL_H_REQ_HEADER_FL", 7090 "IDMA_FL_H_SEND_PCIEHDR", 7091 "IDMA_FL_H_PUSH_CPL_FIFO", 7092 "IDMA_FL_H_SEND_CPL", 7093 "IDMA_FL_H_SEND_IP_HDR_FIRST", 7094 "IDMA_FL_H_SEND_IP_HDR", 7095 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 7096 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 7097 "IDMA_FL_H_SEND_IP_HDR_PADDING", 7098 "IDMA_FL_D_SEND_PCIEHDR", 7099 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 7100 "IDMA_FL_D_REQ_NEXT_DATA_FL", 7101 "IDMA_FL_SEND_PCIEHDR", 7102 "IDMA_FL_PUSH_CPL_FIFO", 7103 "IDMA_FL_SEND_CPL", 7104 "IDMA_FL_SEND_PAYLOAD_FIRST", 7105 "IDMA_FL_SEND_PAYLOAD", 7106 "IDMA_FL_REQ_NEXT_DATA_FL", 7107 "IDMA_FL_SEND_NEXT_PCIEHDR", 7108 "IDMA_FL_SEND_PADDING", 7109 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 7110 }; 7111 static const u32 sge_regs[] = { 7112 A_SGE_DEBUG_DATA_LOW_INDEX_2, 7113 A_SGE_DEBUG_DATA_LOW_INDEX_3, 7114 A_SGE_DEBUG_DATA_HIGH_INDEX_10, 7115 }; 7116 const char **sge_idma_decode; 7117 int sge_idma_decode_nstates; 7118 int i; 7119 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 7120 7121 /* Select the right set of decode strings to dump depending on the 7122 * adapter chip type. 7123 */ 7124 switch (chip_version) { 7125 case CHELSIO_T4: 7126 sge_idma_decode = (const char **)t4_decode; 7127 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 7128 break; 7129 7130 case CHELSIO_T5: 7131 sge_idma_decode = (const char **)t5_decode; 7132 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 7133 break; 7134 7135 case CHELSIO_T6: 7136 sge_idma_decode = (const char **)t6_decode; 7137 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); 7138 break; 7139 7140 default: 7141 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); 7142 return; 7143 } 7144 7145 if (state < sge_idma_decode_nstates) 7146 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 7147 else 7148 CH_WARN(adapter, "idma state %d unknown\n", state); 7149 7150 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 7151 CH_WARN(adapter, "SGE register %#x value %#x\n", 7152 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 7153 } 7154 7155 /** 7156 * t4_sge_ctxt_flush - flush the SGE context cache 7157 * @adap: the adapter 7158 * @mbox: mailbox to use for the FW command 7159 * 7160 * Issues a FW command through the given mailbox to flush the 7161 * SGE context cache. 7162 */ 7163 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) 7164 { 7165 int ret; 7166 u32 ldst_addrspace; 7167 struct fw_ldst_cmd c; 7168 7169 memset(&c, 0, sizeof(c)); 7170 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(ctxt_type == CTXT_EGRESS ? 7171 FW_LDST_ADDRSPC_SGE_EGRC : 7172 FW_LDST_ADDRSPC_SGE_INGC); 7173 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 7174 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7175 ldst_addrspace); 7176 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 7177 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); 7178 7179 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7180 return ret; 7181 } 7182 7183 /** 7184 * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values 7185 * @adap - the adapter 7186 * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table 7187 * @dbqtimers: SGE Doorbell Queue Timer table 7188 * 7189 * Reads the SGE Doorbell Queue Timer values into the provided table. 7190 * Returns 0 on success (Firmware and Hardware support this feature), 7191 * an error on failure. 7192 */ 7193 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers, 7194 u16 *dbqtimers) 7195 { 7196 int ret, dbqtimerix; 7197 7198 ret = 0; 7199 dbqtimerix = 0; 7200 while (dbqtimerix < ndbqtimers) { 7201 int nparams, param; 7202 u32 params[7], vals[7]; 7203 7204 nparams = ndbqtimers - dbqtimerix; 7205 if (nparams > ARRAY_SIZE(params)) 7206 nparams = ARRAY_SIZE(params); 7207 7208 for (param = 0; param < nparams; param++) 7209 params[param] = 7210 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 7211 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DBQ_TIMER) | 7212 V_FW_PARAMS_PARAM_Y(dbqtimerix + param)); 7213 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 7214 nparams, params, vals); 7215 if (ret) 7216 break; 7217 7218 for (param = 0; param < nparams; param++) 7219 dbqtimers[dbqtimerix++] = vals[param]; 7220 } 7221 return ret; 7222 } 7223 7224 /** 7225 * t4_fw_hello - establish communication with FW 7226 * @adap: the adapter 7227 * @mbox: mailbox to use for the FW command 7228 * @evt_mbox: mailbox to receive async FW events 7229 * @master: specifies the caller's willingness to be the device master 7230 * @state: returns the current device state (if non-NULL) 7231 * 7232 * Issues a command to establish communication with FW. Returns either 7233 * an error (negative integer) or the mailbox of the Master PF. 7234 */ 7235 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 7236 enum dev_master master, enum dev_state *state) 7237 { 7238 int ret; 7239 struct fw_hello_cmd c; 7240 u32 v; 7241 unsigned int master_mbox; 7242 int retries = FW_CMD_HELLO_RETRIES; 7243 7244 retry: 7245 memset(&c, 0, sizeof(c)); 7246 INIT_CMD(c, HELLO, WRITE); 7247 c.err_to_clearinit = cpu_to_be32( 7248 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 7249 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 7250 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? 7251 mbox : M_FW_HELLO_CMD_MBMASTER) | 7252 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 7253 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 7254 F_FW_HELLO_CMD_CLEARINIT); 7255 7256 /* 7257 * Issue the HELLO command to the firmware. If it's not successful 7258 * but indicates that we got a "busy" or "timeout" condition, retry 7259 * the HELLO until we exhaust our retry limit. If we do exceed our 7260 * retry limit, check to see if the firmware left us any error 7261 * information and report that if so ... 7262 */ 7263 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7264 if (ret != FW_SUCCESS) { 7265 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 7266 goto retry; 7267 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 7268 t4_report_fw_error(adap); 7269 return ret; 7270 } 7271 7272 v = be32_to_cpu(c.err_to_clearinit); 7273 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 7274 if (state) { 7275 if (v & F_FW_HELLO_CMD_ERR) 7276 *state = DEV_STATE_ERR; 7277 else if (v & F_FW_HELLO_CMD_INIT) 7278 *state = DEV_STATE_INIT; 7279 else 7280 *state = DEV_STATE_UNINIT; 7281 } 7282 7283 /* 7284 * If we're not the Master PF then we need to wait around for the 7285 * Master PF Driver to finish setting up the adapter. 7286 * 7287 * Note that we also do this wait if we're a non-Master-capable PF and 7288 * there is no current Master PF; a Master PF may show up momentarily 7289 * and we wouldn't want to fail pointlessly. (This can happen when an 7290 * OS loads lots of different drivers rapidly at the same time). In 7291 * this case, the Master PF returned by the firmware will be 7292 * M_PCIE_FW_MASTER so the test below will work ... 7293 */ 7294 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 7295 master_mbox != mbox) { 7296 int waiting = FW_CMD_HELLO_TIMEOUT; 7297 7298 /* 7299 * Wait for the firmware to either indicate an error or 7300 * initialized state. If we see either of these we bail out 7301 * and report the issue to the caller. If we exhaust the 7302 * "hello timeout" and we haven't exhausted our retries, try 7303 * again. Otherwise bail with a timeout error. 7304 */ 7305 for (;;) { 7306 u32 pcie_fw; 7307 7308 msleep(50); 7309 waiting -= 50; 7310 7311 /* 7312 * If neither Error nor Initialialized are indicated 7313 * by the firmware keep waiting till we exaust our 7314 * timeout ... and then retry if we haven't exhausted 7315 * our retries ... 7316 */ 7317 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 7318 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 7319 if (waiting <= 0) { 7320 if (retries-- > 0) 7321 goto retry; 7322 7323 return -ETIMEDOUT; 7324 } 7325 continue; 7326 } 7327 7328 /* 7329 * We either have an Error or Initialized condition 7330 * report errors preferentially. 7331 */ 7332 if (state) { 7333 if (pcie_fw & F_PCIE_FW_ERR) 7334 *state = DEV_STATE_ERR; 7335 else if (pcie_fw & F_PCIE_FW_INIT) 7336 *state = DEV_STATE_INIT; 7337 } 7338 7339 /* 7340 * If we arrived before a Master PF was selected and 7341 * there's not a valid Master PF, grab its identity 7342 * for our caller. 7343 */ 7344 if (master_mbox == M_PCIE_FW_MASTER && 7345 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 7346 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 7347 break; 7348 } 7349 } 7350 7351 return master_mbox; 7352 } 7353 7354 /** 7355 * t4_fw_bye - end communication with FW 7356 * @adap: the adapter 7357 * @mbox: mailbox to use for the FW command 7358 * 7359 * Issues a command to terminate communication with FW. 7360 */ 7361 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 7362 { 7363 struct fw_bye_cmd c; 7364 7365 memset(&c, 0, sizeof(c)); 7366 INIT_CMD(c, BYE, WRITE); 7367 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7368 } 7369 7370 /** 7371 * t4_fw_reset - issue a reset to FW 7372 * @adap: the adapter 7373 * @mbox: mailbox to use for the FW command 7374 * @reset: specifies the type of reset to perform 7375 * 7376 * Issues a reset command of the specified type to FW. 7377 */ 7378 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 7379 { 7380 struct fw_reset_cmd c; 7381 7382 memset(&c, 0, sizeof(c)); 7383 INIT_CMD(c, RESET, WRITE); 7384 c.val = cpu_to_be32(reset); 7385 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7386 } 7387 7388 /** 7389 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 7390 * @adap: the adapter 7391 * @mbox: mailbox to use for the FW RESET command (if desired) 7392 * @force: force uP into RESET even if FW RESET command fails 7393 * 7394 * Issues a RESET command to firmware (if desired) with a HALT indication 7395 * and then puts the microprocessor into RESET state. The RESET command 7396 * will only be issued if a legitimate mailbox is provided (mbox <= 7397 * M_PCIE_FW_MASTER). 7398 * 7399 * This is generally used in order for the host to safely manipulate the 7400 * adapter without fear of conflicting with whatever the firmware might 7401 * be doing. The only way out of this state is to RESTART the firmware 7402 * ... 7403 */ 7404 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 7405 { 7406 int ret = 0; 7407 7408 /* 7409 * If a legitimate mailbox is provided, issue a RESET command 7410 * with a HALT indication. 7411 */ 7412 if (mbox <= M_PCIE_FW_MASTER) { 7413 struct fw_reset_cmd c; 7414 7415 memset(&c, 0, sizeof(c)); 7416 INIT_CMD(c, RESET, WRITE); 7417 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); 7418 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); 7419 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7420 } 7421 7422 /* 7423 * Normally we won't complete the operation if the firmware RESET 7424 * command fails but if our caller insists we'll go ahead and put the 7425 * uP into RESET. This can be useful if the firmware is hung or even 7426 * missing ... We'll have to take the risk of putting the uP into 7427 * RESET without the cooperation of firmware in that case. 7428 * 7429 * We also force the firmware's HALT flag to be on in case we bypassed 7430 * the firmware RESET command above or we're dealing with old firmware 7431 * which doesn't have the HALT capability. This will serve as a flag 7432 * for the incoming firmware to know that it's coming out of a HALT 7433 * rather than a RESET ... if it's new enough to understand that ... 7434 */ 7435 if (ret == 0 || force) { 7436 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 7437 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 7438 F_PCIE_FW_HALT); 7439 } 7440 7441 /* 7442 * And we always return the result of the firmware RESET command 7443 * even when we force the uP into RESET ... 7444 */ 7445 return ret; 7446 } 7447 7448 /** 7449 * t4_fw_restart - restart the firmware by taking the uP out of RESET 7450 * @adap: the adapter 7451 * @reset: if we want to do a RESET to restart things 7452 * 7453 * Restart firmware previously halted by t4_fw_halt(). On successful 7454 * return the previous PF Master remains as the new PF Master and there 7455 * is no need to issue a new HELLO command, etc. 7456 * 7457 * We do this in two ways: 7458 * 7459 * 1. If we're dealing with newer firmware we'll simply want to take 7460 * the chip's microprocessor out of RESET. This will cause the 7461 * firmware to start up from its start vector. And then we'll loop 7462 * until the firmware indicates it's started again (PCIE_FW.HALT 7463 * reset to 0) or we timeout. 7464 * 7465 * 2. If we're dealing with older firmware then we'll need to RESET 7466 * the chip since older firmware won't recognize the PCIE_FW.HALT 7467 * flag and automatically RESET itself on startup. 7468 */ 7469 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 7470 { 7471 if (reset) { 7472 /* 7473 * Since we're directing the RESET instead of the firmware 7474 * doing it automatically, we need to clear the PCIE_FW.HALT 7475 * bit. 7476 */ 7477 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 7478 7479 /* 7480 * If we've been given a valid mailbox, first try to get the 7481 * firmware to do the RESET. If that works, great and we can 7482 * return success. Otherwise, if we haven't been given a 7483 * valid mailbox or the RESET command failed, fall back to 7484 * hitting the chip with a hammer. 7485 */ 7486 if (mbox <= M_PCIE_FW_MASTER) { 7487 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 7488 msleep(100); 7489 if (t4_fw_reset(adap, mbox, 7490 F_PIORST | F_PIORSTMODE) == 0) 7491 return 0; 7492 } 7493 7494 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 7495 msleep(2000); 7496 } else { 7497 int ms; 7498 7499 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 7500 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 7501 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 7502 return FW_SUCCESS; 7503 msleep(100); 7504 ms += 100; 7505 } 7506 return -ETIMEDOUT; 7507 } 7508 return 0; 7509 } 7510 7511 /** 7512 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 7513 * @adap: the adapter 7514 * @mbox: mailbox to use for the FW RESET command (if desired) 7515 * @fw_data: the firmware image to write 7516 * @size: image size 7517 * @force: force upgrade even if firmware doesn't cooperate 7518 * 7519 * Perform all of the steps necessary for upgrading an adapter's 7520 * firmware image. Normally this requires the cooperation of the 7521 * existing firmware in order to halt all existing activities 7522 * but if an invalid mailbox token is passed in we skip that step 7523 * (though we'll still put the adapter microprocessor into RESET in 7524 * that case). 7525 * 7526 * On successful return the new firmware will have been loaded and 7527 * the adapter will have been fully RESET losing all previous setup 7528 * state. On unsuccessful return the adapter may be completely hosed ... 7529 * positive errno indicates that the adapter is ~probably~ intact, a 7530 * negative errno indicates that things are looking bad ... 7531 */ 7532 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 7533 const u8 *fw_data, unsigned int size, int force) 7534 { 7535 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 7536 unsigned int bootstrap = 7537 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 7538 int reset, ret; 7539 7540 if (!t4_fw_matches_chip(adap, fw_hdr)) 7541 return -EINVAL; 7542 7543 /* Disable FW_OK flags so that mbox commands with FW_OK flags check 7544 * wont be send when we are flashing FW. 7545 */ 7546 adap->flags &= ~FW_OK; 7547 7548 if (!bootstrap) { 7549 ret = t4_fw_halt(adap, mbox, force); 7550 if (ret < 0 && !force) 7551 goto out; 7552 } 7553 7554 ret = t4_load_fw(adap, fw_data, size, bootstrap); 7555 if (ret < 0 || bootstrap) 7556 goto out; 7557 7558 /* 7559 * If there was a Firmware Configuration File staored in FLASH, 7560 * there's a good chance that it won't be compatible with the new 7561 * Firmware. In order to prevent difficult to diagnose adapter 7562 * initialization issues, we clear out the Firmware Configuration File 7563 * portion of the FLASH . The user will need to re-FLASH a new 7564 * Firmware Configuration File which is compatible with the new 7565 * Firmware if that's desired. 7566 */ 7567 (void)t4_load_cfg(adap, NULL, 0); 7568 7569 /* 7570 * Older versions of the firmware don't understand the new 7571 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 7572 * restart. So for newly loaded older firmware we'll have to do the 7573 * RESET for it so it starts up on a clean slate. We can tell if 7574 * the newly loaded firmware will handle this right by checking 7575 * its header flags to see if it advertises the capability. 7576 */ 7577 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 7578 ret = t4_fw_restart(adap, mbox, reset); 7579 7580 /* Grab potentially new Firmware Device Log parameters so we can see 7581 * how helthy the new Firmware is. It's okay to contact the new 7582 * Firmware for these parameters even though, as far as it's 7583 * concerned, we've never said "HELLO" to it ... 7584 */ 7585 (void)t4_init_devlog_params(adap, 1); 7586 7587 out: 7588 adap->flags |= FW_OK; 7589 return ret; 7590 } 7591 7592 /** 7593 * t4_fl_pkt_align - return the fl packet alignment 7594 * @adap: the adapter 7595 * is_packed: True when the driver uses packed FLM mode 7596 * 7597 * T4 has a single field to specify the packing and padding boundary. 7598 * T5 onwards has separate fields for this and hence the alignment for 7599 * next packet offset is maximum of these two. 7600 * 7601 */ 7602 int t4_fl_pkt_align(struct adapter *adap, bool is_packed) 7603 { 7604 u32 sge_control, sge_control2; 7605 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; 7606 7607 sge_control = t4_read_reg(adap, A_SGE_CONTROL); 7608 7609 /* T4 uses a single control field to specify both the PCIe Padding and 7610 * Packing Boundary. T5 introduced the ability to specify these 7611 * separately. The actual Ingress Packet Data alignment boundary 7612 * within Packed Buffer Mode is the maximum of these two 7613 * specifications. (Note that it makes no real practical sense to 7614 * have the Pading Boudary be larger than the Packing Boundary but you 7615 * could set the chip up that way and, in fact, legacy T4 code would 7616 * end doing this because it would initialize the Padding Boundary and 7617 * leave the Packing Boundary initialized to 0 (16 bytes).) 7618 * Padding Boundary values in T6 starts from 8B, 7619 * where as it is 32B for T4 and T5. 7620 */ 7621 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 7622 ingpad_shift = X_INGPADBOUNDARY_SHIFT; 7623 else 7624 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT; 7625 7626 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift); 7627 7628 fl_align = ingpadboundary; 7629 if (!is_t4(adap->params.chip) && is_packed) { 7630 /* T5 has a weird interpretation of one of the PCIe Packing 7631 * Boundary values. No idea why ... 7632 */ 7633 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2); 7634 ingpackboundary = G_INGPACKBOUNDARY(sge_control2); 7635 if (ingpackboundary == X_INGPACKBOUNDARY_16B) 7636 ingpackboundary = 16; 7637 else 7638 ingpackboundary = 1 << (ingpackboundary + 7639 X_INGPACKBOUNDARY_SHIFT); 7640 7641 fl_align = max(ingpadboundary, ingpackboundary); 7642 } 7643 return fl_align; 7644 } 7645 7646 /** 7647 * t4_fixup_host_params_compat - fix up host-dependent parameters 7648 * @adap: the adapter 7649 * @page_size: the host's Base Page Size 7650 * @cache_line_size: the host's Cache Line Size 7651 * @chip_compat: maintain compatibility with designated chip 7652 * 7653 * Various registers in the chip contain values which are dependent on the 7654 * host's Base Page and Cache Line Sizes. This function will fix all of 7655 * those registers with the appropriate values as passed in ... 7656 * 7657 * @chip_compat is used to limit the set of changes that are made 7658 * to be compatible with the indicated chip release. This is used by 7659 * drivers to maintain compatibility with chip register settings when 7660 * the drivers haven't [yet] been updated with new chip support. 7661 */ 7662 int t4_fixup_host_params_compat(struct adapter *adap, 7663 unsigned int page_size, 7664 unsigned int cache_line_size, 7665 enum chip_type chip_compat) 7666 { 7667 unsigned int page_shift = fls(page_size) - 1; 7668 unsigned int sge_hps = page_shift - 10; 7669 unsigned int stat_len = cache_line_size > 64 ? 128 : 64; 7670 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 7671 unsigned int fl_align_log = fls(fl_align) - 1; 7672 7673 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE, 7674 V_HOSTPAGESIZEPF0(sge_hps) | 7675 V_HOSTPAGESIZEPF1(sge_hps) | 7676 V_HOSTPAGESIZEPF2(sge_hps) | 7677 V_HOSTPAGESIZEPF3(sge_hps) | 7678 V_HOSTPAGESIZEPF4(sge_hps) | 7679 V_HOSTPAGESIZEPF5(sge_hps) | 7680 V_HOSTPAGESIZEPF6(sge_hps) | 7681 V_HOSTPAGESIZEPF7(sge_hps)); 7682 7683 if (is_t4(adap->params.chip) || is_t4(chip_compat)) { 7684 t4_set_reg_field(adap, A_SGE_CONTROL, 7685 V_INGPADBOUNDARY(M_INGPADBOUNDARY) | 7686 F_EGRSTATUSPAGESIZE, 7687 V_INGPADBOUNDARY(fl_align_log - 7688 X_INGPADBOUNDARY_SHIFT) | 7689 V_EGRSTATUSPAGESIZE(stat_len != 64)); 7690 } else { 7691 unsigned int pack_align; 7692 unsigned int ingpad, ingpack; 7693 unsigned int pcie_cap; 7694 7695 /* T5 introduced the separation of the Free List Padding and 7696 * Packing Boundaries. Thus, we can select a smaller Padding 7697 * Boundary to avoid uselessly chewing up PCIe Link and Memory 7698 * Bandwidth, and use a Packing Boundary which is large enough 7699 * to avoid false sharing between CPUs, etc. 7700 * 7701 * For the PCI Link, the smaller the Padding Boundary the 7702 * better. For the Memory Controller, a smaller Padding 7703 * Boundary is better until we cross under the Memory Line 7704 * Size (the minimum unit of transfer to/from Memory). If we 7705 * have a Padding Boundary which is smaller than the Memory 7706 * Line Size, that'll involve a Read-Modify-Write cycle on the 7707 * Memory Controller which is never good. 7708 */ 7709 7710 /* We want the Packing Boundary to be based on the Cache Line 7711 * Size in order to help avoid False Sharing performance 7712 * issues between CPUs, etc. We also want the Packing 7713 * Boundary to incorporate the PCI-E Maximum Payload Size. We 7714 * get best performance when the Packing Boundary is a 7715 * multiple of the Maximum Payload Size. 7716 */ 7717 pack_align = fl_align; 7718 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP); 7719 if (pcie_cap) { 7720 unsigned int mps, mps_log; 7721 u16 devctl; 7722 7723 /* 7724 * The PCIe Device Control Maximum Payload Size field 7725 * [bits 7:5] encodes sizes as powers of 2 starting at 7726 * 128 bytes. 7727 */ 7728 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL, 7729 &devctl); 7730 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; 7731 mps = 1 << mps_log; 7732 if (mps > pack_align) 7733 pack_align = mps; 7734 } 7735 7736 /* N.B. T5/T6 have a crazy special interpretation of the "0" 7737 * value for the Packing Boundary. This corresponds to 16 7738 * bytes instead of the expected 32 bytes. So if we want 32 7739 * bytes, the best we can really do is 64 bytes ... 7740 */ 7741 if (pack_align <= 16) { 7742 ingpack = X_INGPACKBOUNDARY_16B; 7743 fl_align = 16; 7744 } else if (pack_align == 32) { 7745 ingpack = X_INGPACKBOUNDARY_64B; 7746 fl_align = 64; 7747 } else { 7748 unsigned int pack_align_log = fls(pack_align) - 1; 7749 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT; 7750 fl_align = pack_align; 7751 } 7752 7753 /* Use the smallest Ingress Padding which isn't smaller than 7754 * the Memory Controller Read/Write Size. We'll take that as 7755 * being 8 bytes since we don't know of any system with a 7756 * wider Memory Controller Bus Width. 7757 */ 7758 if (is_t5(adap->params.chip)) 7759 ingpad = X_INGPADBOUNDARY_32B; 7760 else 7761 ingpad = X_T6_INGPADBOUNDARY_8B; 7762 7763 t4_set_reg_field(adap, A_SGE_CONTROL, 7764 V_INGPADBOUNDARY(M_INGPADBOUNDARY) | 7765 F_EGRSTATUSPAGESIZE, 7766 V_INGPADBOUNDARY(ingpad) | 7767 V_EGRSTATUSPAGESIZE(stat_len != 64)); 7768 t4_set_reg_field(adap, A_SGE_CONTROL2, 7769 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY), 7770 V_INGPACKBOUNDARY(ingpack)); 7771 } 7772 /* 7773 * Adjust various SGE Free List Host Buffer Sizes. 7774 * 7775 * This is something of a crock since we're using fixed indices into 7776 * the array which are also known by the sge.c code and the T4 7777 * Firmware Configuration File. We need to come up with a much better 7778 * approach to managing this array. For now, the first four entries 7779 * are: 7780 * 7781 * 0: Host Page Size 7782 * 1: 64KB 7783 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) 7784 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) 7785 * 7786 * For the single-MTU buffers in unpacked mode we need to include 7787 * space for the SGE Control Packet Shift, 14 byte Ethernet header, 7788 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet 7789 * Padding boundary. All of these are accommodated in the Factory 7790 * Default Firmware Configuration File but we need to adjust it for 7791 * this host's cache line size. 7792 */ 7793 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size); 7794 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2, 7795 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align-1) 7796 & ~(fl_align-1)); 7797 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3, 7798 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align-1) 7799 & ~(fl_align-1)); 7800 7801 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12)); 7802 7803 return 0; 7804 } 7805 7806 /** 7807 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible) 7808 * @adap: the adapter 7809 * @page_size: the host's Base Page Size 7810 * @cache_line_size: the host's Cache Line Size 7811 * 7812 * Various registers in T4 contain values which are dependent on the 7813 * host's Base Page and Cache Line Sizes. This function will fix all of 7814 * those registers with the appropriate values as passed in ... 7815 * 7816 * This routine makes changes which are compatible with T4 chips. 7817 */ 7818 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 7819 unsigned int cache_line_size) 7820 { 7821 return t4_fixup_host_params_compat(adap, page_size, cache_line_size, 7822 T4_LAST_REV); 7823 } 7824 7825 /** 7826 * t4_fw_initialize - ask FW to initialize the device 7827 * @adap: the adapter 7828 * @mbox: mailbox to use for the FW command 7829 * 7830 * Issues a command to FW to partially initialize the device. This 7831 * performs initialization that generally doesn't depend on user input. 7832 */ 7833 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 7834 { 7835 struct fw_initialize_cmd c; 7836 7837 memset(&c, 0, sizeof(c)); 7838 INIT_CMD(c, INITIALIZE, WRITE); 7839 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7840 } 7841 7842 /** 7843 * t4_query_params_rw - query FW or device parameters 7844 * @adap: the adapter 7845 * @mbox: mailbox to use for the FW command 7846 * @pf: the PF 7847 * @vf: the VF 7848 * @nparams: the number of parameters 7849 * @params: the parameter names 7850 * @val: the parameter values 7851 * @rw: Write and read flag 7852 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion 7853 * 7854 * Reads the value of FW or device parameters. Up to 7 parameters can be 7855 * queried at once. 7856 */ 7857 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, 7858 unsigned int vf, unsigned int nparams, const u32 *params, 7859 u32 *val, int rw, bool sleep_ok) 7860 { 7861 int i, ret; 7862 struct fw_params_cmd c; 7863 __be32 *p = &c.param[0].mnem; 7864 7865 if (nparams > 7) 7866 return -EINVAL; 7867 7868 memset(&c, 0, sizeof(c)); 7869 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 7870 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7871 V_FW_PARAMS_CMD_PFN(pf) | 7872 V_FW_PARAMS_CMD_VFN(vf)); 7873 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7874 7875 for (i = 0; i < nparams; i++) { 7876 *p++ = cpu_to_be32(*params++); 7877 if (rw) 7878 *p = cpu_to_be32(*(val + i)); 7879 p++; 7880 } 7881 7882 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 7883 7884 /* 7885 * We always copy back the reults, even if there's an error. We'll 7886 * get an error if any of the parameters was unknown to the Firmware, 7887 * but there will be results for the others ... (Older Firmware 7888 * stopped at the first unknown parameter; newer Firmware processes 7889 * them all and flags the unknown parameters with a return value of 7890 * ~0UL.) 7891 */ 7892 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 7893 *val++ = be32_to_cpu(*p); 7894 7895 return ret; 7896 } 7897 7898 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 7899 unsigned int vf, unsigned int nparams, const u32 *params, 7900 u32 *val) 7901 { 7902 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0, 7903 true); 7904 } 7905 7906 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf, 7907 unsigned int vf, unsigned int nparams, const u32 *params, 7908 u32 *val) 7909 { 7910 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0, 7911 false); 7912 } 7913 7914 /** 7915 * t4_set_params_timeout - sets FW or device parameters 7916 * @adap: the adapter 7917 * @mbox: mailbox to use for the FW command 7918 * @pf: the PF 7919 * @vf: the VF 7920 * @nparams: the number of parameters 7921 * @params: the parameter names 7922 * @val: the parameter values 7923 * @timeout: the timeout time 7924 * 7925 * Sets the value of FW or device parameters. Up to 7 parameters can be 7926 * specified at once. 7927 */ 7928 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, 7929 unsigned int pf, unsigned int vf, 7930 unsigned int nparams, const u32 *params, 7931 const u32 *val, int timeout) 7932 { 7933 struct fw_params_cmd c; 7934 __be32 *p = &c.param[0].mnem; 7935 7936 if (nparams > 7) 7937 return -EINVAL; 7938 7939 memset(&c, 0, sizeof(c)); 7940 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 7941 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7942 V_FW_PARAMS_CMD_PFN(pf) | 7943 V_FW_PARAMS_CMD_VFN(vf)); 7944 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 7945 7946 while (nparams--) { 7947 *p++ = cpu_to_be32(*params++); 7948 *p++ = cpu_to_be32(*val++); 7949 } 7950 7951 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); 7952 } 7953 7954 /** 7955 * t4_set_params - sets FW or device parameters 7956 * @adap: the adapter 7957 * @mbox: mailbox to use for the FW command 7958 * @pf: the PF 7959 * @vf: the VF 7960 * @nparams: the number of parameters 7961 * @params: the parameter names 7962 * @val: the parameter values 7963 * 7964 * Sets the value of FW or device parameters. Up to 7 parameters can be 7965 * specified at once. 7966 */ 7967 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 7968 unsigned int vf, unsigned int nparams, const u32 *params, 7969 const u32 *val) 7970 { 7971 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, 7972 FW_CMD_MAX_TIMEOUT); 7973 } 7974 7975 /** 7976 * t4_cfg_pfvf - configure PF/VF resource limits 7977 * @adap: the adapter 7978 * @mbox: mailbox to use for the FW command 7979 * @pf: the PF being configured 7980 * @vf: the VF being configured 7981 * @txq: the max number of egress queues 7982 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 7983 * @rxqi: the max number of interrupt-capable ingress queues 7984 * @rxq: the max number of interruptless ingress queues 7985 * @tc: the PCI traffic class 7986 * @vi: the max number of virtual interfaces 7987 * @cmask: the channel access rights mask for the PF/VF 7988 * @pmask: the port access rights mask for the PF/VF 7989 * @nexact: the maximum number of exact MPS filters 7990 * @rcaps: read capabilities 7991 * @wxcaps: write/execute capabilities 7992 * 7993 * Configures resource limits and capabilities for a physical or virtual 7994 * function. 7995 */ 7996 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 7997 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 7998 unsigned int rxqi, unsigned int rxq, unsigned int tc, 7999 unsigned int vi, unsigned int cmask, unsigned int pmask, 8000 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 8001 { 8002 struct fw_pfvf_cmd c; 8003 8004 memset(&c, 0, sizeof(c)); 8005 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 8006 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 8007 V_FW_PFVF_CMD_VFN(vf)); 8008 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8009 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 8010 V_FW_PFVF_CMD_NIQ(rxq)); 8011 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | 8012 V_FW_PFVF_CMD_PMASK(pmask) | 8013 V_FW_PFVF_CMD_NEQ(txq)); 8014 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | 8015 V_FW_PFVF_CMD_NVI(vi) | 8016 V_FW_PFVF_CMD_NEXACTF(nexact)); 8017 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | 8018 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 8019 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 8020 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8021 } 8022 8023 /** 8024 * t4_alloc_vi_func - allocate a virtual interface 8025 * @adap: the adapter 8026 * @mbox: mailbox to use for the FW command 8027 * @port: physical port associated with the VI 8028 * @pf: the PF owning the VI 8029 * @vf: the VF owning the VI 8030 * @nmac: number of MAC addresses needed (1 to 5) 8031 * @mac: the MAC addresses of the VI 8032 * @rss_size: size of RSS table slice associated with this VI 8033 * @portfunc: which Port Application Function MAC Address is desired 8034 * @idstype: Intrusion Detection Type 8035 * 8036 * Allocates a virtual interface for the given physical port. If @mac is 8037 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 8038 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. 8039 * @mac should be large enough to hold @nmac Ethernet addresses, they are 8040 * stored consecutively so the space needed is @nmac * 6 bytes. 8041 * Returns a negative error number or the non-negative VI id. 8042 */ 8043 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 8044 unsigned int port, unsigned int pf, unsigned int vf, 8045 unsigned int nmac, u8 *mac, unsigned int *rss_size, 8046 u8 *vivld, u8 *vin, 8047 unsigned int portfunc, unsigned int idstype) 8048 { 8049 int ret; 8050 struct fw_vi_cmd c; 8051 8052 memset(&c, 0, sizeof(c)); 8053 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 8054 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 8055 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 8056 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 8057 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | 8058 V_FW_VI_CMD_FUNC(portfunc)); 8059 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 8060 c.nmac = nmac - 1; 8061 if(!rss_size) 8062 c.norss_rsssize = F_FW_VI_CMD_NORSS; 8063 8064 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8065 if (ret) 8066 return ret; 8067 8068 if (mac) { 8069 memcpy(mac, c.mac, sizeof(c.mac)); 8070 switch (nmac) { 8071 case 5: 8072 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 8073 /* FALLTHRU */ 8074 case 4: 8075 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 8076 /* FALLTHRU */ 8077 case 3: 8078 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 8079 /* FALLTHRU */ 8080 case 2: 8081 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 8082 } 8083 } 8084 if (rss_size) 8085 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); 8086 8087 if (vivld) 8088 *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16)); 8089 8090 if (vin) 8091 *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16)); 8092 8093 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); 8094 } 8095 8096 /** 8097 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 8098 * @adap: the adapter 8099 * @mbox: mailbox to use for the FW command 8100 * @port: physical port associated with the VI 8101 * @pf: the PF owning the VI 8102 * @vf: the VF owning the VI 8103 * @nmac: number of MAC addresses needed (1 to 5) 8104 * @mac: the MAC addresses of the VI 8105 * @rss_size: size of RSS table slice associated with this VI 8106 * 8107 * backwards compatible and convieniance routine to allocate a Virtual 8108 * Interface with a Ethernet Port Application Function and Intrustion 8109 * Detection System disabled. 8110 */ 8111 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 8112 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 8113 unsigned int *rss_size, u8 *vivld, u8 *vin) 8114 { 8115 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 8116 vivld, vin, FW_VI_FUNC_ETH, 0); 8117 } 8118 8119 8120 /** 8121 * t4_free_vi - free a virtual interface 8122 * @adap: the adapter 8123 * @mbox: mailbox to use for the FW command 8124 * @pf: the PF owning the VI 8125 * @vf: the VF owning the VI 8126 * @viid: virtual interface identifiler 8127 * 8128 * Free a previously allocated virtual interface. 8129 */ 8130 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 8131 unsigned int vf, unsigned int viid) 8132 { 8133 struct fw_vi_cmd c; 8134 8135 memset(&c, 0, sizeof(c)); 8136 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | 8137 F_FW_CMD_REQUEST | 8138 F_FW_CMD_EXEC | 8139 V_FW_VI_CMD_PFN(pf) | 8140 V_FW_VI_CMD_VFN(vf)); 8141 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); 8142 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); 8143 8144 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 8145 } 8146 8147 /** 8148 * t4_set_rxmode - set Rx properties of a virtual interface 8149 * @adap: the adapter 8150 * @mbox: mailbox to use for the FW command 8151 * @viid: the VI id 8152 * @mtu: the new MTU or -1 8153 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 8154 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 8155 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 8156 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 8157 * @sleep_ok: if true we may sleep while awaiting command completion 8158 * 8159 * Sets Rx properties of a virtual interface. 8160 */ 8161 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 8162 int mtu, int promisc, int all_multi, int bcast, int vlanex, 8163 bool sleep_ok) 8164 { 8165 struct fw_vi_rxmode_cmd c; 8166 8167 /* convert to FW values */ 8168 if (mtu < 0) 8169 mtu = M_FW_VI_RXMODE_CMD_MTU; 8170 if (promisc < 0) 8171 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 8172 if (all_multi < 0) 8173 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 8174 if (bcast < 0) 8175 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 8176 if (vlanex < 0) 8177 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 8178 8179 memset(&c, 0, sizeof(c)); 8180 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | 8181 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8182 V_FW_VI_RXMODE_CMD_VIID(viid)); 8183 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 8184 c.mtu_to_vlanexen = 8185 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | 8186 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 8187 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 8188 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 8189 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 8190 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 8191 } 8192 8193 /** 8194 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support 8195 * @adap: the adapter 8196 * @viid: the VI id 8197 * @mac: the MAC address 8198 * @mask: the mask 8199 * @vni: the VNI id for the tunnel protocol 8200 * @vni_mask: mask for the VNI id 8201 * @dip_hit: to enable DIP match for the MPS entry 8202 * @lookup_type: MAC address for inner (1) or outer (0) header 8203 * @sleep_ok: call is allowed to sleep 8204 * 8205 * Allocates an MPS entry with specified MAC address and VNI value. 8206 * 8207 * Returns a negative error number or the allocated index for this mac. 8208 */ 8209 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, 8210 const u8 *addr, const u8 *mask, unsigned int vni, 8211 unsigned int vni_mask, u8 dip_hit, u8 lookup_type, 8212 bool sleep_ok) 8213 { 8214 struct fw_vi_mac_cmd c; 8215 struct fw_vi_mac_vni *p = c.u.exact_vni; 8216 int ret = 0; 8217 u32 val; 8218 8219 memset(&c, 0, sizeof(c)); 8220 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8221 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8222 V_FW_VI_MAC_CMD_VIID(viid)); 8223 val = V_FW_CMD_LEN16(1) | 8224 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC_VNI); 8225 c.freemacs_to_len16 = cpu_to_be32(val); 8226 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8227 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 8228 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 8229 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask)); 8230 8231 p->lookup_type_to_vni = cpu_to_be32(V_FW_VI_MAC_CMD_VNI(vni) | 8232 V_FW_VI_MAC_CMD_DIP_HIT(dip_hit) | 8233 V_FW_VI_MAC_CMD_LOOKUP_TYPE(lookup_type)); 8234 p->vni_mask_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_VNI_MASK(vni_mask)); 8235 8236 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8237 if (ret == 0) 8238 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 8239 return ret; 8240 } 8241 8242 /** 8243 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam 8244 * @adap: the adapter 8245 * @viid: the VI id 8246 * @mac: the MAC address 8247 * @mask: the mask 8248 * @idx: index at which to add this entry 8249 * @port_id: the port index 8250 * @lookup_type: MAC address for inner (1) or outer (0) header 8251 * @sleep_ok: call is allowed to sleep 8252 * 8253 * Adds the mac entry at the specified index using raw mac interface. 8254 * 8255 * Returns a negative error number or the allocated index for this mac. 8256 */ 8257 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, 8258 const u8 *addr, const u8 *mask, unsigned int idx, 8259 u8 lookup_type, u8 port_id, bool sleep_ok) 8260 { 8261 int ret = 0; 8262 struct fw_vi_mac_cmd c; 8263 struct fw_vi_mac_raw *p = &c.u.raw; 8264 u32 val; 8265 8266 memset(&c, 0, sizeof(c)); 8267 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8268 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8269 V_FW_VI_MAC_CMD_VIID(viid)); 8270 val = V_FW_CMD_LEN16(1) | 8271 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); 8272 c.freemacs_to_len16 = cpu_to_be32(val); 8273 8274 /* Specify that this is an inner mac address */ 8275 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx)); 8276 8277 /* Lookup Type. Outer header: 0, Inner header: 1 */ 8278 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | 8279 V_DATAPORTNUM(port_id)); 8280 /* Lookup mask and port mask */ 8281 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | 8282 V_DATAPORTNUM(M_DATAPORTNUM)); 8283 8284 /* Copy the address and the mask */ 8285 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); 8286 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); 8287 8288 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8289 if (ret == 0) { 8290 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd)); 8291 if (ret != idx) 8292 ret = -ENOMEM; 8293 } 8294 8295 return ret; 8296 } 8297 8298 /** 8299 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 8300 * @adap: the adapter 8301 * @mbox: mailbox to use for the FW command 8302 * @viid: the VI id 8303 * @free: if true any existing filters for this VI id are first removed 8304 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 8305 * @addr: the MAC address(es) 8306 * @idx: where to store the index of each allocated filter 8307 * @hash: pointer to hash address filter bitmap 8308 * @sleep_ok: call is allowed to sleep 8309 * 8310 * Allocates an exact-match filter for each of the supplied addresses and 8311 * sets it to the corresponding address. If @idx is not %NULL it should 8312 * have at least @naddr entries, each of which will be set to the index of 8313 * the filter allocated for the corresponding MAC address. If a filter 8314 * could not be allocated for an address its index is set to 0xffff. 8315 * If @hash is not %NULL addresses that fail to allocate an exact filter 8316 * are hashed and update the hash filter bitmap pointed at by @hash. 8317 * 8318 * Returns a negative error number or the number of filters allocated. 8319 */ 8320 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 8321 unsigned int viid, bool free, unsigned int naddr, 8322 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 8323 { 8324 int offset, ret = 0; 8325 struct fw_vi_mac_cmd c; 8326 unsigned int nfilters = 0; 8327 unsigned int max_naddr = adap->params.arch.mps_tcam_size; 8328 unsigned int rem = naddr; 8329 8330 if (naddr > max_naddr) 8331 return -EINVAL; 8332 8333 for (offset = 0; offset < naddr ; /**/) { 8334 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 8335 ? rem 8336 : ARRAY_SIZE(c.u.exact)); 8337 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 8338 u.exact[fw_naddr]), 16); 8339 struct fw_vi_mac_exact *p; 8340 int i; 8341 8342 memset(&c, 0, sizeof(c)); 8343 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8344 F_FW_CMD_REQUEST | 8345 F_FW_CMD_WRITE | 8346 V_FW_CMD_EXEC(free) | 8347 V_FW_VI_MAC_CMD_VIID(viid)); 8348 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | 8349 V_FW_CMD_LEN16(len16)); 8350 8351 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 8352 p->valid_to_idx = 8353 cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8354 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 8355 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 8356 } 8357 8358 /* 8359 * It's okay if we run out of space in our MAC address arena. 8360 * Some of the addresses we submit may get stored so we need 8361 * to run through the reply to see what the results were ... 8362 */ 8363 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 8364 if (ret && ret != -FW_ENOMEM) 8365 break; 8366 8367 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 8368 u16 index = G_FW_VI_MAC_CMD_IDX( 8369 be16_to_cpu(p->valid_to_idx)); 8370 8371 if (idx) 8372 idx[offset+i] = (index >= max_naddr 8373 ? 0xffff 8374 : index); 8375 if (index < max_naddr) 8376 nfilters++; 8377 else if (hash) 8378 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 8379 } 8380 8381 free = false; 8382 offset += fw_naddr; 8383 rem -= fw_naddr; 8384 } 8385 8386 if (ret == 0 || ret == -FW_ENOMEM) 8387 ret = nfilters; 8388 return ret; 8389 } 8390 8391 /** 8392 * t4_free_encap_mac_filt - frees MPS entry at given index 8393 * @adap: the adapter 8394 * @viid: the VI id 8395 * @idx: index of MPS entry to be freed 8396 * @sleep_ok: call is allowed to sleep 8397 * 8398 * Frees the MPS entry at supplied index 8399 * 8400 * Returns a negative error number or zero on success 8401 */ 8402 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, 8403 int idx, bool sleep_ok) 8404 { 8405 struct fw_vi_mac_exact *p; 8406 struct fw_vi_mac_cmd c; 8407 u8 addr[] = {0,0,0,0,0,0}; 8408 int ret = 0; 8409 u32 exact; 8410 8411 memset(&c, 0, sizeof(c)); 8412 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8413 F_FW_CMD_REQUEST | 8414 F_FW_CMD_WRITE | 8415 V_FW_CMD_EXEC(0) | 8416 V_FW_VI_MAC_CMD_VIID(viid)); 8417 exact = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_EXACTMAC); 8418 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 8419 exact | 8420 V_FW_CMD_LEN16(1)); 8421 p = c.u.exact; 8422 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 8423 V_FW_VI_MAC_CMD_IDX(idx)); 8424 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 8425 8426 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8427 return ret; 8428 } 8429 8430 /** 8431 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam 8432 * @adap: the adapter 8433 * @viid: the VI id 8434 * @addr: the MAC address 8435 * @mask: the mask 8436 * @idx: index of the entry in mps tcam 8437 * @lookup_type: MAC address for inner (1) or outer (0) header 8438 * @port_id: the port index 8439 * @sleep_ok: call is allowed to sleep 8440 * 8441 * Removes the mac entry at the specified index using raw mac interface. 8442 * 8443 * Returns a negative error number on failure. 8444 */ 8445 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, 8446 const u8 *addr, const u8 *mask, unsigned int idx, 8447 u8 lookup_type, u8 port_id, bool sleep_ok) 8448 { 8449 struct fw_vi_mac_cmd c; 8450 struct fw_vi_mac_raw *p = &c.u.raw; 8451 u32 raw; 8452 8453 memset(&c, 0, sizeof(c)); 8454 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8455 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8456 V_FW_CMD_EXEC(0) | 8457 V_FW_VI_MAC_CMD_VIID(viid)); 8458 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW); 8459 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 8460 raw | 8461 V_FW_CMD_LEN16(1)); 8462 8463 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) | 8464 FW_VI_MAC_ID_BASED_FREE); 8465 8466 /* Lookup Type. Outer header: 0, Inner header: 1 */ 8467 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) | 8468 V_DATAPORTNUM(port_id)); 8469 /* Lookup mask and port mask */ 8470 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) | 8471 V_DATAPORTNUM(M_DATAPORTNUM)); 8472 8473 /* Copy the address and the mask */ 8474 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); 8475 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); 8476 8477 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); 8478 } 8479 8480 /** 8481 * t4_free_mac_filt - frees exact-match filters of given MAC addresses 8482 * @adap: the adapter 8483 * @mbox: mailbox to use for the FW command 8484 * @viid: the VI id 8485 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 8486 * @addr: the MAC address(es) 8487 * @sleep_ok: call is allowed to sleep 8488 * 8489 * Frees the exact-match filter for each of the supplied addresses 8490 * 8491 * Returns a negative error number or the number of filters freed. 8492 */ 8493 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, 8494 unsigned int viid, unsigned int naddr, 8495 const u8 **addr, bool sleep_ok) 8496 { 8497 int offset, ret = 0; 8498 struct fw_vi_mac_cmd c; 8499 unsigned int nfilters = 0; 8500 unsigned int max_naddr = is_t4(adap->params.chip) ? 8501 NUM_MPS_CLS_SRAM_L_INSTANCES : 8502 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 8503 unsigned int rem = naddr; 8504 8505 if (naddr > max_naddr) 8506 return -EINVAL; 8507 8508 for (offset = 0; offset < (int)naddr ; /**/) { 8509 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 8510 ? rem 8511 : ARRAY_SIZE(c.u.exact)); 8512 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 8513 u.exact[fw_naddr]), 16); 8514 struct fw_vi_mac_exact *p; 8515 int i; 8516 8517 memset(&c, 0, sizeof(c)); 8518 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8519 F_FW_CMD_REQUEST | 8520 F_FW_CMD_WRITE | 8521 V_FW_CMD_EXEC(0) | 8522 V_FW_VI_MAC_CMD_VIID(viid)); 8523 c.freemacs_to_len16 = 8524 cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0) | 8525 V_FW_CMD_LEN16(len16)); 8526 8527 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) { 8528 p->valid_to_idx = cpu_to_be16( 8529 F_FW_VI_MAC_CMD_VALID | 8530 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE)); 8531 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 8532 } 8533 8534 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 8535 if (ret) 8536 break; 8537 8538 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 8539 u16 index = G_FW_VI_MAC_CMD_IDX( 8540 be16_to_cpu(p->valid_to_idx)); 8541 8542 if (index < max_naddr) 8543 nfilters++; 8544 } 8545 8546 offset += fw_naddr; 8547 rem -= fw_naddr; 8548 } 8549 8550 if (ret == 0) 8551 ret = nfilters; 8552 return ret; 8553 } 8554 8555 /** 8556 * t4_change_mac - modifies the exact-match filter for a MAC address 8557 * @adap: the adapter 8558 * @mbox: mailbox to use for the FW command 8559 * @viid: the VI id 8560 * @idx: index of existing filter for old value of MAC address, or -1 8561 * @addr: the new MAC address value 8562 * @persist: whether a new MAC allocation should be persistent 8563 * @add_smt: if true also add the address to the HW SMT 8564 * 8565 * Modifies an exact-match filter and sets it to the new MAC address if 8566 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 8567 * latter case the address is added persistently if @persist is %true. 8568 * 8569 * Note that in general it is not possible to modify the value of a given 8570 * filter so the generic way to modify an address filter is to free the one 8571 * being used by the old address value and allocate a new filter for the 8572 * new address value. 8573 * 8574 * Returns a negative error number or the index of the filter with the new 8575 * MAC value. Note that this index may differ from @idx. 8576 */ 8577 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 8578 int idx, const u8 *addr, bool persist, u8 *smt_idx) 8579 { 8580 /* This will add this mac address to the destination TCAM region */ 8581 return t4_add_mac(adap, mbox, viid, idx, addr, persist, smt_idx, 0); 8582 } 8583 8584 /** 8585 * t4_set_addr_hash - program the MAC inexact-match hash filter 8586 * @adap: the adapter 8587 * @mbox: mailbox to use for the FW command 8588 * @viid: the VI id 8589 * @ucast: whether the hash filter should also match unicast addresses 8590 * @vec: the value to be written to the hash filter 8591 * @sleep_ok: call is allowed to sleep 8592 * 8593 * Sets the 64-bit inexact-match hash filter for a virtual interface. 8594 */ 8595 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 8596 bool ucast, u64 vec, bool sleep_ok) 8597 { 8598 struct fw_vi_mac_cmd c; 8599 u32 val; 8600 8601 memset(&c, 0, sizeof(c)); 8602 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 8603 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 8604 V_FW_VI_ENABLE_CMD_VIID(viid)); 8605 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 8606 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 8607 c.freemacs_to_len16 = cpu_to_be32(val); 8608 c.u.hash.hashvec = cpu_to_be64(vec); 8609 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 8610 } 8611 8612 /** 8613 * t4_enable_vi_params - enable/disable a virtual interface 8614 * @adap: the adapter 8615 * @mbox: mailbox to use for the FW command 8616 * @viid: the VI id 8617 * @rx_en: 1=enable Rx, 0=disable Rx 8618 * @tx_en: 1=enable Tx, 0=disable Tx 8619 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 8620 * 8621 * Enables/disables a virtual interface. Note that setting DCB Enable 8622 * only makes sense when enabling a Virtual Interface ... 8623 */ 8624 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 8625 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 8626 { 8627 struct fw_vi_enable_cmd c; 8628 8629 memset(&c, 0, sizeof(c)); 8630 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 8631 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8632 V_FW_VI_ENABLE_CMD_VIID(viid)); 8633 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 8634 V_FW_VI_ENABLE_CMD_EEN(tx_en) | 8635 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | 8636 FW_LEN16(c)); 8637 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 8638 } 8639 8640 /** 8641 * t4_enable_vi - enable/disable a virtual interface 8642 * @adap: the adapter 8643 * @mbox: mailbox to use for the FW command 8644 * @viid: the VI id 8645 * @rx_en: 1=enable Rx, 0=disable Rx 8646 * @tx_en: 1=enable Tx, 0=disable Tx 8647 * 8648 * Enables/disables a virtual interface. Note that setting DCB Enable 8649 * only makes sense when enabling a Virtual Interface ... 8650 */ 8651 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 8652 bool rx_en, bool tx_en) 8653 { 8654 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 8655 } 8656 8657 /** 8658 * t4_enable_pi_params - enable/disable a Port's Virtual Interface 8659 * @adap: the adapter 8660 * @mbox: mailbox to use for the FW command 8661 * @pi: the Port Information structure 8662 * @rx_en: 1=enable Rx, 0=disable Rx 8663 * @tx_en: 1=enable Tx, 0=disable Tx 8664 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 8665 * 8666 * Enables/disables a Port's Virtual Interface. Note that setting DCB 8667 * Enable only makes sense when enabling a Virtual Interface ... 8668 * If the Virtual Interface enable/disable operation is successful, 8669 * we notify the OS-specific code of a potential Link Status change 8670 * via the OS Contract API t4_os_link_changed(). 8671 */ 8672 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox, 8673 struct port_info *pi, 8674 bool rx_en, bool tx_en, bool dcb_en) 8675 { 8676 int ret = t4_enable_vi_params(adap, mbox, pi->viid, 8677 rx_en, tx_en, dcb_en); 8678 if (ret) 8679 return ret; 8680 t4_os_link_changed(adap, pi->port_id, 8681 rx_en && tx_en && pi->link_cfg.link_ok); 8682 return 0; 8683 } 8684 8685 /** 8686 * t4_identify_port - identify a VI's port by blinking its LED 8687 * @adap: the adapter 8688 * @mbox: mailbox to use for the FW command 8689 * @viid: the VI id 8690 * @nblinks: how many times to blink LED at 2.5 Hz 8691 * 8692 * Identifies a VI's port by blinking its LED. 8693 */ 8694 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 8695 unsigned int nblinks) 8696 { 8697 struct fw_vi_enable_cmd c; 8698 8699 memset(&c, 0, sizeof(c)); 8700 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 8701 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8702 V_FW_VI_ENABLE_CMD_VIID(viid)); 8703 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 8704 c.blinkdur = cpu_to_be16(nblinks); 8705 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8706 } 8707 8708 /** 8709 * t4_iq_stop - stop an ingress queue and its FLs 8710 * @adap: the adapter 8711 * @mbox: mailbox to use for the FW command 8712 * @pf: the PF owning the queues 8713 * @vf: the VF owning the queues 8714 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 8715 * @iqid: ingress queue id 8716 * @fl0id: FL0 queue id or 0xffff if no attached FL0 8717 * @fl1id: FL1 queue id or 0xffff if no attached FL1 8718 * 8719 * Stops an ingress queue and its associated FLs, if any. This causes 8720 * any current or future data/messages destined for these queues to be 8721 * tossed. 8722 */ 8723 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 8724 unsigned int vf, unsigned int iqtype, unsigned int iqid, 8725 unsigned int fl0id, unsigned int fl1id) 8726 { 8727 struct fw_iq_cmd c; 8728 8729 memset(&c, 0, sizeof(c)); 8730 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 8731 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 8732 V_FW_IQ_CMD_VFN(vf)); 8733 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); 8734 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 8735 c.iqid = cpu_to_be16(iqid); 8736 c.fl0id = cpu_to_be16(fl0id); 8737 c.fl1id = cpu_to_be16(fl1id); 8738 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8739 } 8740 8741 /** 8742 * t4_iq_free - free an ingress queue and its FLs 8743 * @adap: the adapter 8744 * @mbox: mailbox to use for the FW command 8745 * @pf: the PF owning the queues 8746 * @vf: the VF owning the queues 8747 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 8748 * @iqid: ingress queue id 8749 * @fl0id: FL0 queue id or 0xffff if no attached FL0 8750 * @fl1id: FL1 queue id or 0xffff if no attached FL1 8751 * 8752 * Frees an ingress queue and its associated FLs, if any. 8753 */ 8754 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 8755 unsigned int vf, unsigned int iqtype, unsigned int iqid, 8756 unsigned int fl0id, unsigned int fl1id) 8757 { 8758 struct fw_iq_cmd c; 8759 8760 memset(&c, 0, sizeof(c)); 8761 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 8762 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 8763 V_FW_IQ_CMD_VFN(vf)); 8764 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 8765 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 8766 c.iqid = cpu_to_be16(iqid); 8767 c.fl0id = cpu_to_be16(fl0id); 8768 c.fl1id = cpu_to_be16(fl1id); 8769 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8770 } 8771 8772 /** 8773 * t4_eth_eq_free - free an Ethernet egress queue 8774 * @adap: the adapter 8775 * @mbox: mailbox to use for the FW command 8776 * @pf: the PF owning the queue 8777 * @vf: the VF owning the queue 8778 * @eqid: egress queue id 8779 * 8780 * Frees an Ethernet egress queue. 8781 */ 8782 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 8783 unsigned int vf, unsigned int eqid) 8784 { 8785 struct fw_eq_eth_cmd c; 8786 8787 memset(&c, 0, sizeof(c)); 8788 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 8789 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8790 V_FW_EQ_ETH_CMD_PFN(pf) | 8791 V_FW_EQ_ETH_CMD_VFN(vf)); 8792 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 8793 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 8794 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8795 } 8796 8797 /** 8798 * t4_ctrl_eq_free - free a control egress queue 8799 * @adap: the adapter 8800 * @mbox: mailbox to use for the FW command 8801 * @pf: the PF owning the queue 8802 * @vf: the VF owning the queue 8803 * @eqid: egress queue id 8804 * 8805 * Frees a control egress queue. 8806 */ 8807 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 8808 unsigned int vf, unsigned int eqid) 8809 { 8810 struct fw_eq_ctrl_cmd c; 8811 8812 memset(&c, 0, sizeof(c)); 8813 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | 8814 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8815 V_FW_EQ_CTRL_CMD_PFN(pf) | 8816 V_FW_EQ_CTRL_CMD_VFN(vf)); 8817 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 8818 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); 8819 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8820 } 8821 8822 /** 8823 * t4_ofld_eq_free - free an offload egress queue 8824 * @adap: the adapter 8825 * @mbox: mailbox to use for the FW command 8826 * @pf: the PF owning the queue 8827 * @vf: the VF owning the queue 8828 * @eqid: egress queue id 8829 * 8830 * Frees a control egress queue. 8831 */ 8832 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 8833 unsigned int vf, unsigned int eqid) 8834 { 8835 struct fw_eq_ofld_cmd c; 8836 8837 memset(&c, 0, sizeof(c)); 8838 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | 8839 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 8840 V_FW_EQ_OFLD_CMD_PFN(pf) | 8841 V_FW_EQ_OFLD_CMD_VFN(vf)); 8842 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 8843 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); 8844 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 8845 } 8846 8847 /** 8848 * Return the highest speed set in the port capabilities, in Mb/s. 8849 */ 8850 unsigned int t4_link_fwcap_to_speed(fw_port_cap32_t caps) 8851 { 8852 #define TEST_SPEED_RETURN(__caps_speed, __speed) \ 8853 do { \ 8854 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 8855 return __speed; \ 8856 } while (0) 8857 8858 TEST_SPEED_RETURN(400G, 400000); 8859 TEST_SPEED_RETURN(200G, 200000); 8860 TEST_SPEED_RETURN(100G, 100000); 8861 TEST_SPEED_RETURN(50G, 50000); 8862 TEST_SPEED_RETURN(40G, 40000); 8863 TEST_SPEED_RETURN(25G, 25000); 8864 TEST_SPEED_RETURN(10G, 10000); 8865 TEST_SPEED_RETURN(1G, 1000); 8866 TEST_SPEED_RETURN(100M, 100); 8867 8868 #undef TEST_SPEED_RETURN 8869 8870 return 0; 8871 } 8872 8873 /** 8874 * t4_link_fwcap_to_fwspeed - return highest speed in Port Capabilities 8875 * @acaps: advertised Port Capabilities 8876 * 8877 * Get the highest speed for the port from the advertised Port 8878 * Capabilities. It will be either the highest speed from the list of 8879 * speeds or whatever user has set using ethtool. 8880 */ 8881 static fw_port_cap32_t t4_link_fwcap_to_fwspeed(fw_port_cap32_t acaps) 8882 { 8883 #define TEST_SPEED_RETURN(__caps_speed) \ 8884 do { \ 8885 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ 8886 return FW_PORT_CAP32_SPEED_##__caps_speed; \ 8887 } while (0) 8888 8889 TEST_SPEED_RETURN(400G); 8890 TEST_SPEED_RETURN(200G); 8891 TEST_SPEED_RETURN(100G); 8892 TEST_SPEED_RETURN(50G); 8893 TEST_SPEED_RETURN(40G); 8894 TEST_SPEED_RETURN(25G); 8895 TEST_SPEED_RETURN(10G); 8896 TEST_SPEED_RETURN(1G); 8897 TEST_SPEED_RETURN(100M); 8898 8899 #undef TEST_SPEED_RETURN 8900 8901 return 0; 8902 } 8903 8904 /** 8905 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits 8906 * @caps16: a 16-bit Port Capabilities value 8907 * 8908 * Returns the equivalent 32-bit Port Capabilities value. 8909 */ 8910 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) 8911 { 8912 fw_port_cap32_t caps32 = 0; 8913 8914 #define CAP16_TO_CAP32(__cap) \ 8915 do { \ 8916 if (caps16 & FW_PORT_CAP_##__cap) \ 8917 caps32 |= FW_PORT_CAP32_##__cap; \ 8918 } while (0) 8919 8920 CAP16_TO_CAP32(SPEED_100M); 8921 CAP16_TO_CAP32(SPEED_1G); 8922 CAP16_TO_CAP32(SPEED_25G); 8923 CAP16_TO_CAP32(SPEED_10G); 8924 CAP16_TO_CAP32(SPEED_40G); 8925 CAP16_TO_CAP32(SPEED_100G); 8926 CAP16_TO_CAP32(FC_RX); 8927 CAP16_TO_CAP32(FC_TX); 8928 CAP16_TO_CAP32(ANEG); 8929 CAP16_TO_CAP32(FORCE_PAUSE); 8930 CAP16_TO_CAP32(MDIAUTO); 8931 CAP16_TO_CAP32(MDISTRAIGHT); 8932 CAP16_TO_CAP32(FEC_RS); 8933 CAP16_TO_CAP32(FEC_BASER_RS); 8934 CAP16_TO_CAP32(802_3_PAUSE); 8935 CAP16_TO_CAP32(802_3_ASM_DIR); 8936 8937 #undef CAP16_TO_CAP32 8938 8939 return caps32; 8940 } 8941 8942 /** 8943 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits 8944 * @caps32: a 32-bit Port Capabilities value 8945 * 8946 * Returns the equivalent 16-bit Port Capabilities value. Note that 8947 * not all 32-bit Port Capabilities can be represented in the 16-bit 8948 * Port Capabilities and some fields/values may not make it. 8949 */ 8950 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) 8951 { 8952 fw_port_cap16_t caps16 = 0; 8953 8954 #define CAP32_TO_CAP16(__cap) \ 8955 do { \ 8956 if (caps32 & FW_PORT_CAP32_##__cap) \ 8957 caps16 |= FW_PORT_CAP_##__cap; \ 8958 } while (0) 8959 8960 CAP32_TO_CAP16(SPEED_100M); 8961 CAP32_TO_CAP16(SPEED_1G); 8962 CAP32_TO_CAP16(SPEED_10G); 8963 CAP32_TO_CAP16(SPEED_25G); 8964 CAP32_TO_CAP16(SPEED_40G); 8965 CAP32_TO_CAP16(SPEED_100G); 8966 CAP32_TO_CAP16(FC_RX); 8967 CAP32_TO_CAP16(FC_TX); 8968 CAP32_TO_CAP16(802_3_PAUSE); 8969 CAP32_TO_CAP16(802_3_ASM_DIR); 8970 CAP32_TO_CAP16(ANEG); 8971 CAP32_TO_CAP16(FORCE_PAUSE); 8972 CAP32_TO_CAP16(MDIAUTO); 8973 CAP32_TO_CAP16(MDISTRAIGHT); 8974 CAP32_TO_CAP16(FEC_RS); 8975 CAP32_TO_CAP16(FEC_BASER_RS); 8976 8977 #undef CAP32_TO_CAP16 8978 8979 return caps16; 8980 } 8981 8982 int t4_link_set_autoneg(struct port_info *pi, u8 autoneg, 8983 fw_port_cap32_t *new_caps) 8984 { 8985 struct link_config *lc = &pi->link_cfg; 8986 fw_port_cap32_t caps = *new_caps; 8987 8988 if (autoneg) { 8989 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) 8990 return -ENOTSUP; 8991 8992 caps |= FW_PORT_CAP32_ANEG; 8993 } else { 8994 caps &= ~FW_PORT_CAP32_ANEG; 8995 } 8996 8997 caps &= ~V_FW_PORT_CAP32_MDI(M_FW_PORT_CAP32_MDI); 8998 if (lc->pcaps & FW_PORT_CAP32_MDIAUTO) 8999 caps |= FW_PORT_CAP32_MDIAUTO; 9000 9001 *new_caps = caps; 9002 return 0; 9003 } 9004 9005 int t4_link_set_pause(struct port_info *pi, cc_pause_t pause, 9006 fw_port_cap32_t *new_caps) 9007 { 9008 struct link_config *lc = &pi->link_cfg; 9009 fw_port_cap32_t caps = *new_caps; 9010 9011 caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC); 9012 caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3); 9013 9014 if ((pause & PAUSE_TX) && (pause & PAUSE_RX)) { 9015 caps |= FW_PORT_CAP32_FC_TX | FW_PORT_CAP32_FC_RX; 9016 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE) 9017 caps |= FW_PORT_CAP32_802_3_PAUSE; 9018 } else if (pause & PAUSE_TX) { 9019 caps |= FW_PORT_CAP32_FC_TX; 9020 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR) 9021 caps |= FW_PORT_CAP32_802_3_ASM_DIR; 9022 } else if (pause & PAUSE_RX) { 9023 caps |= FW_PORT_CAP32_FC_RX; 9024 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE) 9025 caps |= FW_PORT_CAP32_802_3_PAUSE; 9026 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR) 9027 caps |= FW_PORT_CAP32_802_3_ASM_DIR; 9028 } 9029 9030 if (!(pause & PAUSE_AUTONEG)) 9031 caps |= FW_PORT_CAP32_FORCE_PAUSE; 9032 9033 *new_caps = caps; 9034 return 0; 9035 } 9036 9037 #define T4_LINK_FEC_MASK V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC) 9038 9039 static fw_port_cap32_t t4_link_supported_speed_to_fec(u32 speed) 9040 { 9041 fw_port_cap32_t caps = 0; 9042 9043 switch (speed) { 9044 case 100000: 9045 caps |= FW_PORT_CAP32_FEC_RS; 9046 break; 9047 case 50000: 9048 caps |= FW_PORT_CAP32_FEC_BASER_RS; 9049 break; 9050 case 25000: 9051 caps |= FW_PORT_CAP32_FEC_RS | 9052 FW_PORT_CAP32_FEC_BASER_RS; 9053 break; 9054 default: 9055 break; 9056 } 9057 9058 caps |= FW_PORT_CAP32_FEC_NO_FEC; 9059 return caps; 9060 } 9061 9062 static void t4_link_update_fec(struct port_info *pi, u32 max_speed, 9063 cc_fec_t fec, fw_port_cap32_t *new_caps) 9064 { 9065 fw_port_cap32_t caps = *new_caps; 9066 9067 caps &= ~T4_LINK_FEC_MASK; 9068 if (fec & FEC_RS) { 9069 switch (max_speed) { 9070 case 100000: 9071 case 25000: 9072 caps |= FW_PORT_CAP32_FEC_RS; 9073 break; 9074 default: 9075 CH_ERR(pi->adapter, 9076 "Ignoring unsupported RS FEC for speed %u\n", 9077 max_speed); 9078 break; 9079 } 9080 } 9081 9082 if (fec & FEC_BASER_RS) { 9083 switch (max_speed) { 9084 case 50000: 9085 case 25000: 9086 caps |= FW_PORT_CAP32_FEC_BASER_RS; 9087 break; 9088 default: 9089 CH_ERR(pi->adapter, 9090 "Ignoring unsupported BASER FEC for speed %u\n", 9091 max_speed); 9092 break; 9093 } 9094 } 9095 9096 if (fec & FEC_NONE) 9097 caps |= FW_PORT_CAP32_FEC_NO_FEC; 9098 9099 if (!(caps & T4_LINK_FEC_MASK)) { 9100 /* No explicit encoding is requested. 9101 * So, default back to AUTO. 9102 */ 9103 caps |= t4_link_supported_speed_to_fec(max_speed); 9104 caps &= ~FW_PORT_CAP32_FORCE_FEC; 9105 } 9106 9107 if (fec & FEC_FORCE) 9108 caps |= FW_PORT_CAP32_FORCE_FEC; 9109 9110 *new_caps = caps; 9111 } 9112 9113 int t4_link_set_fec(struct port_info *pi, cc_fec_t fec, 9114 fw_port_cap32_t *new_caps) 9115 { 9116 struct link_config *lc = &pi->link_cfg; 9117 u32 max_speed; 9118 9119 if (!(lc->pcaps & T4_LINK_FEC_MASK)) 9120 return -ENOTSUP; 9121 9122 max_speed = t4_link_fwcap_to_speed(lc->link_caps); 9123 /* Link might be down. In that case consider the max 9124 * speed advertised 9125 */ 9126 if (!max_speed) 9127 max_speed = t4_link_fwcap_to_speed(lc->acaps); 9128 9129 t4_link_update_fec(pi, max_speed, fec, new_caps); 9130 return 0; 9131 } 9132 9133 #define T4_LINK_SPEED_MASK V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) 9134 9135 int t4_link_set_speed(struct port_info *pi, fw_port_cap32_t speed, u8 en, 9136 fw_port_cap32_t *new_caps) 9137 { 9138 fw_port_cap32_t tcaps, caps = *new_caps; 9139 struct link_config *lc = &pi->link_cfg; 9140 9141 if (((lc->pcaps & T4_LINK_SPEED_MASK) & speed) != speed) 9142 return -ENOTSUP; 9143 9144 if (en) 9145 caps |= speed; 9146 else 9147 caps &= ~speed; 9148 9149 /* If no speeds are left, then pick the next highest speed. */ 9150 if (!(caps & T4_LINK_SPEED_MASK)) { 9151 tcaps = CAP32_SPEED(lc->pcaps); 9152 tcaps &= ~speed; 9153 tcaps &= (speed - 1); 9154 if (tcaps == 0) 9155 return -EINVAL; 9156 9157 caps |= t4_link_fwcap_to_fwspeed(tcaps); 9158 } 9159 9160 *new_caps = caps; 9161 return 0; 9162 } 9163 9164 static void t4_link_sanitize_speed_caps(struct link_config *lc, 9165 fw_port_cap32_t *new_caps) 9166 { 9167 fw_port_cap32_t tcaps, caps = *new_caps; 9168 9169 /* Sanitize Speeds when AN is disabled */ 9170 if (!(caps & FW_PORT_CAP32_ANEG)) { 9171 tcaps = CAP32_SPEED(caps); 9172 caps &= ~T4_LINK_SPEED_MASK; 9173 caps |= t4_link_fwcap_to_fwspeed(tcaps); 9174 } 9175 9176 *new_caps = caps; 9177 } 9178 9179 static void t4_link_sanitize_fec_caps(struct link_config *lc, 9180 fw_port_cap32_t *new_caps) 9181 { 9182 fw_port_cap32_t tcaps, caps = *new_caps; 9183 u32 max_speed; 9184 9185 /* Sanitize FECs when supported */ 9186 if (CAP32_FEC(lc->pcaps)) { 9187 max_speed = t4_link_fwcap_to_speed(caps); 9188 tcaps = t4_link_supported_speed_to_fec(max_speed); 9189 if (caps & FW_PORT_CAP32_FORCE_FEC) { 9190 /* If the current chosen FEC params are 9191 * completely invalid, then disable FEC. 9192 * Else, pick only the FECs requested 9193 * by user or the defaults supported by 9194 * the speed. 9195 */ 9196 if (!(tcaps & CAP32_FEC(caps))) 9197 tcaps = FW_PORT_CAP32_FEC_NO_FEC; 9198 else 9199 tcaps &= CAP32_FEC(caps); 9200 } 9201 } else { 9202 /* Always force NO_FEC when FECs are not supported */ 9203 tcaps = FW_PORT_CAP32_FEC_NO_FEC; 9204 } 9205 9206 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) { 9207 tcaps |= FW_PORT_CAP32_FORCE_FEC; 9208 } else { 9209 /* Older firmware doesn't allow driver to send request 9210 * to try multiple FECs for FEC_AUTO case. So, clear 9211 * the FEC caps for FEC_AUTO case because the older 9212 * firmware will try all supported FECs on its own. 9213 */ 9214 caps &= ~FW_PORT_CAP32_FORCE_FEC; 9215 if (tcaps & (tcaps - 1)) 9216 tcaps = 0; 9217 } 9218 9219 caps &= ~T4_LINK_FEC_MASK; 9220 caps |= tcaps; 9221 9222 *new_caps = caps; 9223 } 9224 9225 static void t4_link_sanitize_caps(struct link_config *lc, 9226 fw_port_cap32_t *new_caps) 9227 { 9228 fw_port_cap32_t caps = *new_caps; 9229 9230 t4_link_sanitize_speed_caps(lc, &caps); 9231 t4_link_sanitize_fec_caps(lc, &caps); 9232 9233 /* Remove all unsupported caps */ 9234 if ((lc->pcaps | caps) != lc->pcaps) 9235 caps &= lc->pcaps; 9236 9237 *new_caps = caps; 9238 } 9239 9240 /** 9241 * t4_link_l1cfg_core - apply link configuration to MAC/PHY 9242 * @adapter: the adapter 9243 * @mbox: the Firmware Mailbox to use 9244 * @port: the Port ID 9245 * @lc: the Port's Link Configuration 9246 * @rcap: new link configuration 9247 * @sleep_ok: if true we may sleep while awaiting command completion 9248 * @timeout: time to wait for command to finish before timing out 9249 * (negative implies @sleep_ok=false) 9250 * 9251 * Set up a port's MAC and PHY according to a desired link configuration. 9252 * - If the PHY can auto-negotiate first decide what to advertise, then 9253 * enable/disable auto-negotiation as desired, and reset. 9254 * - If the PHY does not auto-negotiate just reset it. 9255 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 9256 * otherwise do it later based on the outcome of auto-negotiation. 9257 */ 9258 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, 9259 unsigned int port, struct link_config *lc, 9260 fw_port_cap32_t rcap, bool sleep_ok, int timeout) 9261 { 9262 unsigned int fw_caps = adapter->params.fw_caps_support; 9263 struct fw_port_cmd cmd; 9264 int ret; 9265 9266 t4_link_sanitize_caps(lc, &rcap); 9267 9268 memset(&cmd, 0, sizeof(cmd)); 9269 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 9270 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9271 V_FW_PORT_CMD_PORTID(port)); 9272 cmd.action_to_len16 = 9273 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 9274 ? FW_PORT_ACTION_L1_CFG 9275 : FW_PORT_ACTION_L1_CFG32) | 9276 FW_LEN16(cmd)); 9277 if (fw_caps == FW_CAPS16) 9278 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); 9279 else 9280 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); 9281 ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL, 9282 sleep_ok, timeout); 9283 9284 /* Unfortunately, even if the Requested Port Capabilities "fit" within 9285 * the Physical Port Capabilities, some combinations of features may 9286 * still not be legal. For example, 40Gb/s and Reed-Solomon Forward 9287 * Error Correction. So if the Firmware rejects the L1 Configure 9288 * request, flag that here. 9289 */ 9290 if (ret) { 9291 CH_ERR(adapter, 9292 "Requested Port Capabilities 0x%x rejected, error %d\n", 9293 rcap, -ret); 9294 return ret; 9295 } 9296 9297 return 0; 9298 } 9299 9300 /** 9301 * t4_restart_aneg - restart autonegotiation 9302 * @adap: the adapter 9303 * @mbox: mbox to use for the FW command 9304 * @port: the port id 9305 * 9306 * Restarts autonegotiation for the selected port. 9307 */ 9308 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 9309 { 9310 unsigned int fw_caps = adap->params.fw_caps_support; 9311 struct fw_port_cmd c; 9312 9313 memset(&c, 0, sizeof(c)); 9314 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 9315 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 9316 V_FW_PORT_CMD_PORTID(port)); 9317 c.action_to_len16 = 9318 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 9319 ? FW_PORT_ACTION_L1_CFG 9320 : FW_PORT_ACTION_L1_CFG32) | 9321 FW_LEN16(c)); 9322 if (fw_caps == FW_CAPS16) 9323 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); 9324 else 9325 c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG); 9326 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 9327 } 9328 9329 /** 9330 * t4_init_link_config - initialize a link's SW state 9331 * @pi: the port info 9332 * @pcaps: link Port Capabilities 9333 * @acaps: link current Advertised Port Capabilities 9334 * 9335 * Initializes the SW state maintained for each link, including the link's 9336 * capabilities and default speed/flow-control/autonegotiation settings. 9337 */ 9338 static void t4_init_link_config(struct port_info *pi, fw_port_cap32_t pcaps, 9339 fw_port_cap32_t acaps) 9340 { 9341 u32 max_speed = t4_link_fwcap_to_speed(acaps); 9342 struct link_config *lc = &pi->link_cfg; 9343 fw_port_cap32_t new_caps = acaps; 9344 9345 /* If initializing for the first time or if port module changed, 9346 * then overwrite the saved link params with the new port module 9347 * caps. 9348 */ 9349 if (lc->admin_caps == 0 || lc->pcaps != pcaps) { 9350 t4_link_update_fec(pi, max_speed, FEC_AUTO, &new_caps); 9351 lc->admin_caps = new_caps; 9352 } 9353 9354 lc->pcaps = pcaps; 9355 lc->acaps = acaps; 9356 lc->lpacaps = 0; 9357 lc->link_caps = 0; 9358 } 9359 9360 /** 9361 * t4_link_down_rc_str - return a string for a Link Down Reason Code 9362 * @link_down_rc: Link Down Reason Code 9363 * 9364 * Returns a string representation of the Link Down Reason Code. 9365 */ 9366 const char *t4_link_down_rc_str(unsigned char link_down_rc) 9367 { 9368 static const char * const reason[] = { 9369 "Link Down", 9370 "Remote Fault", 9371 "Auto-negotiation Failure", 9372 "Reserved", 9373 "Insufficient Airflow", 9374 "Unable To Determine Reason", 9375 "No RX Signal Detected", 9376 "Reserved", 9377 }; 9378 9379 if (link_down_rc >= ARRAY_SIZE(reason)) 9380 return "Bad Reason Code"; 9381 9382 return reason[link_down_rc]; 9383 } 9384 9385 /** 9386 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 9387 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 9388 * 9389 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new 9390 * 32-bit Port Capabilities value. 9391 */ 9392 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) 9393 { 9394 fw_port_cap32_t linkattr = 0; 9395 9396 /* 9397 * Unfortunately the format of the Link Status in the old 9398 * 16-bit Port Information message isn't the same as the 9399 * 16-bit Port Capabilities bitfield used everywhere else ... 9400 */ 9401 if (lstatus & F_FW_PORT_CMD_RXPAUSE) 9402 linkattr |= FW_PORT_CAP32_FC_RX; 9403 if (lstatus & F_FW_PORT_CMD_TXPAUSE) 9404 linkattr |= FW_PORT_CAP32_FC_TX; 9405 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 9406 linkattr |= FW_PORT_CAP32_SPEED_100M; 9407 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 9408 linkattr |= FW_PORT_CAP32_SPEED_1G; 9409 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 9410 linkattr |= FW_PORT_CAP32_SPEED_10G; 9411 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G)) 9412 linkattr |= FW_PORT_CAP32_SPEED_25G; 9413 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 9414 linkattr |= FW_PORT_CAP32_SPEED_40G; 9415 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G)) 9416 linkattr |= FW_PORT_CAP32_SPEED_100G; 9417 9418 return linkattr; 9419 } 9420 9421 /** 9422 * t4_handle_get_port_info - process a FW reply message 9423 * @pi: the port info 9424 * @rpl: start of the FW message 9425 * 9426 * Processes a GET_PORT_INFO FW reply message. 9427 */ 9428 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) 9429 { 9430 const struct fw_port_cmd *cmd = (const void *)rpl; 9431 int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16)); 9432 struct adapter *adapter = pi->adapter; 9433 struct link_config *lc = &pi->link_cfg; 9434 int link_ok, linkdnrc; 9435 enum fw_port_type port_type; 9436 enum fw_port_module_type mod_type; 9437 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; 9438 9439 /* 9440 * Extract the various fields from the Port Information message. 9441 */ 9442 switch (action) { 9443 case FW_PORT_ACTION_GET_PORT_INFO: { 9444 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); 9445 9446 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0; 9447 linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus); 9448 port_type = G_FW_PORT_CMD_PTYPE(lstatus); 9449 mod_type = G_FW_PORT_CMD_MODTYPE(lstatus); 9450 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); 9451 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); 9452 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap)); 9453 linkattr = lstatus_to_fwcap(lstatus); 9454 break; 9455 } 9456 9457 case FW_PORT_ACTION_GET_PORT_INFO32: { 9458 u32 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); 9459 9460 link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0; 9461 linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32); 9462 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); 9463 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32); 9464 pcaps = be32_to_cpu(cmd->u.info32.pcaps32); 9465 acaps = be32_to_cpu(cmd->u.info32.acaps32); 9466 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32); 9467 linkattr = be32_to_cpu(cmd->u.info32.linkattr32); 9468 break; 9469 } 9470 9471 default: 9472 CH_ERR(adapter, "Handle Port Information: Bad Command/Action %#x\n", 9473 be32_to_cpu(cmd->action_to_len16)); 9474 return; 9475 } 9476 9477 /* 9478 * Reset state for communicating new Transceiver Module status and 9479 * whether the OS-dependent layer wants us to redo the current 9480 * "sticky" L1 Configure Link Parameters. 9481 */ 9482 lc->new_module = false; 9483 lc->redo_l1cfg = false; 9484 9485 if (mod_type != pi->mod_type) { 9486 /* 9487 * Some versions of the early T6 Firmware "cheated" when 9488 * handling different Transceiver Modules by changing the 9489 * underlaying Port Type reported to the Host Drivers. As 9490 * such we need to capture whatever Port Type the Firmware 9491 * sends us and record it in case it's different from what we 9492 * were told earlier. Unfortunately, since Firmware is 9493 * forever, we'll need to keep this code here forever, but in 9494 * later T6 Firmware it should just be an assignment of the 9495 * same value already recorded. 9496 */ 9497 pi->port_type = port_type; 9498 9499 /* 9500 * Record new Module Type information. 9501 */ 9502 pi->mod_type = mod_type; 9503 9504 /* 9505 * Let the OS-dependent layer know if we have a new 9506 * Transceiver Module inserted. 9507 */ 9508 lc->new_module = t4_is_inserted_mod_type(mod_type); 9509 9510 if (lc->new_module) 9511 t4_init_link_config(pi, pcaps, acaps); 9512 t4_os_portmod_changed(adapter, pi->port_id); 9513 } 9514 9515 if (link_ok != lc->link_ok || acaps != lc->acaps || 9516 lpacaps != lc->lpacaps || linkattr != lc->link_caps) { 9517 /* something changed */ 9518 if (!link_ok && lc->link_ok) { 9519 lc->link_down_rc = linkdnrc; 9520 CH_WARN_RATELIMIT(adapter, 9521 "Port %d link down, reason: %s\n", 9522 pi->tx_chan, t4_link_down_rc_str(linkdnrc)); 9523 } 9524 9525 lc->link_ok = link_ok; 9526 lc->acaps = acaps; 9527 lc->lpacaps = lpacaps; 9528 lc->link_caps = linkattr; 9529 9530 t4_os_link_changed(adapter, pi->port_id, link_ok); 9531 } 9532 9533 /* 9534 * If we have a new Transceiver Module and the OS-dependent code has 9535 * told us that it wants us to redo whatever "sticky" L1 Configuration 9536 * Link Parameters are set, do that now. 9537 */ 9538 if (lc->new_module && lc->redo_l1cfg) { 9539 int ret; 9540 9541 /* 9542 * Save the current L1 Configuration and restore it if an 9543 * error occurs. We probably should fix the l1_cfg*() 9544 * routines not to change the link_config when an error 9545 * occurs ... 9546 */ 9547 ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc, 9548 lc->admin_caps); 9549 if (ret) { 9550 CH_WARN(adapter, 9551 "Attempt to update new Transceiver Module settings failed\n"); 9552 } 9553 } 9554 lc->new_module = false; 9555 lc->redo_l1cfg = false; 9556 } 9557 9558 /** 9559 * t4_update_port_info - retrieve and update port information if changed 9560 * @pi: the port_info 9561 * 9562 * We issue a Get Port Information Command to the Firmware and, if 9563 * successful, we check to see if anything is different from what we 9564 * last recorded and update things accordingly. 9565 */ 9566 int t4_update_port_info(struct port_info *pi) 9567 { 9568 unsigned int fw_caps = pi->adapter->params.fw_caps_support; 9569 struct fw_port_cmd port_cmd; 9570 int ret; 9571 9572 memset(&port_cmd, 0, sizeof port_cmd); 9573 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 9574 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9575 V_FW_PORT_CMD_PORTID(pi->lport)); 9576 port_cmd.action_to_len16 = cpu_to_be32( 9577 V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 9578 ? FW_PORT_ACTION_GET_PORT_INFO 9579 : FW_PORT_ACTION_GET_PORT_INFO32) | 9580 FW_LEN16(port_cmd)); 9581 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, 9582 &port_cmd, sizeof(port_cmd), &port_cmd); 9583 if (ret) 9584 return ret; 9585 9586 t4_handle_get_port_info(pi, (__be64 *)&port_cmd); 9587 return 0; 9588 } 9589 9590 /** 9591 * t4_get_link_params - retrieve basic link parameters for given port 9592 * @pi: the port 9593 * @link_okp: value return pointer for link up/down 9594 * @speedp: value return pointer for speed (Mb/s) 9595 * @mtup: value return pointer for mtu 9596 * 9597 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s), 9598 * and MTU for a specified port. A negative error is returned on 9599 * failure; 0 on success. 9600 */ 9601 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, 9602 unsigned int *speedp, unsigned int *mtup) 9603 { 9604 unsigned int fw_caps = pi->adapter->params.fw_caps_support; 9605 struct fw_port_cmd port_cmd; 9606 unsigned int action, link_ok, mtu; 9607 fw_port_cap32_t linkattr; 9608 int ret; 9609 9610 memset(&port_cmd, 0, sizeof port_cmd); 9611 port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 9612 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9613 V_FW_PORT_CMD_PORTID(pi->tx_chan)); 9614 action = (fw_caps == FW_CAPS16 9615 ? FW_PORT_ACTION_GET_PORT_INFO 9616 : FW_PORT_ACTION_GET_PORT_INFO32); 9617 port_cmd.action_to_len16 = cpu_to_be32( 9618 V_FW_PORT_CMD_ACTION(action) | 9619 FW_LEN16(port_cmd)); 9620 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, 9621 &port_cmd, sizeof(port_cmd), &port_cmd); 9622 if (ret) 9623 return ret; 9624 9625 if (action == FW_PORT_ACTION_GET_PORT_INFO) { 9626 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype); 9627 9628 link_ok = !!(lstatus & F_FW_PORT_CMD_LSTATUS); 9629 linkattr = lstatus_to_fwcap(lstatus); 9630 mtu = be16_to_cpu(port_cmd.u.info.mtu);; 9631 } else { 9632 u32 lstatus32 = be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32); 9633 9634 link_ok = !!(lstatus32 & F_FW_PORT_CMD_LSTATUS32); 9635 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32); 9636 mtu = G_FW_PORT_CMD_MTU32( 9637 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32)); 9638 } 9639 9640 *link_okp = link_ok; 9641 *speedp = t4_link_fwcap_to_speed(linkattr); 9642 *mtup = mtu; 9643 9644 return 0; 9645 } 9646 9647 /** 9648 * t4_handle_fw_rpl - process a FW reply message 9649 * @adap: the adapter 9650 * @rpl: start of the FW message 9651 * 9652 * Processes a FW message, such as link state change messages. 9653 */ 9654 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 9655 { 9656 u8 opcode = *(const u8 *)rpl; 9657 9658 /* 9659 * This might be a port command ... this simplifies the following 9660 * conditionals ... We can get away with pre-dereferencing 9661 * action_to_len16 because it's in the first 16 bytes and all messages 9662 * will be at least that long. 9663 */ 9664 const struct fw_port_cmd *p = (const void *)rpl; 9665 unsigned int action = 9666 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); 9667 9668 if (opcode == FW_PORT_CMD && 9669 (action == FW_PORT_ACTION_GET_PORT_INFO || 9670 action == FW_PORT_ACTION_GET_PORT_INFO32)) { 9671 int i; 9672 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); 9673 struct port_info *pi = NULL; 9674 9675 for_each_port(adap, i) { 9676 pi = adap2pinfo(adap, i); 9677 if (pi->lport == chan) 9678 break; 9679 } 9680 9681 t4_handle_get_port_info(pi, rpl); 9682 } else { 9683 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); 9684 return -EINVAL; 9685 } 9686 return 0; 9687 } 9688 9689 /** 9690 * get_pci_mode - determine a card's PCI mode 9691 * @adapter: the adapter 9692 * @p: where to store the PCI settings 9693 * 9694 * Determines a card's PCI mode and associated parameters, such as speed 9695 * and width. 9696 */ 9697 static void get_pci_mode(struct adapter *adapter, 9698 struct pci_params *p) 9699 { 9700 u16 val; 9701 u32 pcie_cap; 9702 9703 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 9704 if (pcie_cap) { 9705 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 9706 p->speed = val & PCI_EXP_LNKSTA_CLS; 9707 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 9708 } 9709 } 9710 9711 /** 9712 * t4_wait_dev_ready - wait till to reads of registers work 9713 * 9714 * Right after the device is RESET is can take a small amount of time 9715 * for it to respond to register reads. Until then, all reads will 9716 * return either 0xff...ff or 0xee...ee. Return an error if reads 9717 * don't work within a reasonable time frame. 9718 */ 9719 int t4_wait_dev_ready(struct adapter *adapter) 9720 { 9721 u32 whoami; 9722 9723 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 9724 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) 9725 return 0; 9726 9727 msleep(500); 9728 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 9729 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) 9730 return 0; 9731 9732 CH_ERR(adapter, "Device didn't become ready for access, " 9733 "whoami = %#x\n", whoami); 9734 return -EIO; 9735 } 9736 9737 struct flash_desc { 9738 u32 vendor_and_model_id; 9739 u32 size_mb; 9740 }; 9741 9742 int t4_get_flash_params(struct adapter *adapter) 9743 { 9744 /* 9745 * Table for non-standard supported Flash parts. Note, all Flash 9746 * parts must have 64KB sectors. 9747 */ 9748 static struct flash_desc supported_flash[] = { 9749 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 9750 }; 9751 9752 int ret; 9753 u32 flashid = 0; 9754 unsigned int part, manufacturer; 9755 unsigned int density, size = 0; 9756 9757 9758 /* 9759 * Issue a Read ID Command to the Flash part. We decode supported 9760 * Flash parts and their sizes from this. There's a newer Query 9761 * Command which can retrieve detailed geometry information but many 9762 * Flash parts don't support it. 9763 */ 9764 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 9765 if (!ret) 9766 ret = sf1_read(adapter, 3, 0, 1, &flashid); 9767 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 9768 if (ret < 0) 9769 return ret; 9770 9771 /* 9772 * Check to see if it's one of our non-standard supported Flash parts. 9773 */ 9774 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) 9775 if (supported_flash[part].vendor_and_model_id == flashid) { 9776 adapter->params.sf_size = 9777 supported_flash[part].size_mb; 9778 adapter->params.sf_nsec = 9779 adapter->params.sf_size / SF_SEC_SIZE; 9780 goto found; 9781 } 9782 9783 /* 9784 * Decode Flash part size. The code below looks repetative with 9785 * common encodings, but that's not guaranteed in the JEDEC 9786 * specification for the Read JADEC ID command. The only thing that 9787 * we're guaranteed by the JADEC specification is where the 9788 * Manufacturer ID is in the returned result. After that each 9789 * Manufacturer ~could~ encode things completely differently. 9790 * Note, all Flash parts must have 64KB sectors. 9791 */ 9792 manufacturer = flashid & 0xff; 9793 switch (manufacturer) { 9794 case 0x20: { /* Micron/Numonix */ 9795 /* 9796 * This Density -> Size decoding table is taken from Micron 9797 * Data Sheets. 9798 */ 9799 density = (flashid >> 16) & 0xff; 9800 switch (density) { 9801 case 0x14: size = 1 << 20; break; /* 1MB */ 9802 case 0x15: size = 1 << 21; break; /* 2MB */ 9803 case 0x16: size = 1 << 22; break; /* 4MB */ 9804 case 0x17: size = 1 << 23; break; /* 8MB */ 9805 case 0x18: size = 1 << 24; break; /* 16MB */ 9806 case 0x19: size = 1 << 25; break; /* 32MB */ 9807 case 0x20: size = 1 << 26; break; /* 64MB */ 9808 case 0x21: size = 1 << 27; break; /* 128MB */ 9809 case 0x22: size = 1 << 28; break; /* 256MB */ 9810 } 9811 break; 9812 } 9813 9814 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ 9815 /* 9816 * This Density -> Size decoding table is taken from ISSI 9817 * Data Sheets. 9818 */ 9819 density = (flashid >> 16) & 0xff; 9820 switch (density) { 9821 case 0x16: size = 1 << 25; break; /* 32MB */ 9822 case 0x17: size = 1 << 26; break; /* 64MB */ 9823 } 9824 break; 9825 } 9826 9827 case 0xc2: { /* Macronix */ 9828 /* 9829 * This Density -> Size decoding table is taken from Macronix 9830 * Data Sheets. 9831 */ 9832 density = (flashid >> 16) & 0xff; 9833 switch (density) { 9834 case 0x17: size = 1 << 23; break; /* 8MB */ 9835 case 0x18: size = 1 << 24; break; /* 16MB */ 9836 } 9837 break; 9838 } 9839 9840 case 0xef: { /* Winbond */ 9841 /* 9842 * This Density -> Size decoding table is taken from Winbond 9843 * Data Sheets. 9844 */ 9845 density = (flashid >> 16) & 0xff; 9846 switch (density) { 9847 case 0x17: size = 1 << 23; break; /* 8MB */ 9848 case 0x18: size = 1 << 24; break; /* 16MB */ 9849 } 9850 break; 9851 } 9852 } 9853 9854 /* 9855 * If we didn't recognize the FLASH part, that's no real issue: the 9856 * Hardware/Software contract says that Hardware will _*ALWAYS*_ 9857 * use a FLASH part which is at least 4MB in size and has 64KB 9858 * sectors. The unrecognized FLASH part is likely to be much larger 9859 * than 4MB, but that's all we really need. 9860 */ 9861 if (size == 0) { 9862 CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid); 9863 size = 1 << 22; 9864 } 9865 9866 /* 9867 * Store decoded Flash size and fall through into vetting code. 9868 */ 9869 adapter->params.sf_size = size; 9870 adapter->params.sf_nsec = size / SF_SEC_SIZE; 9871 9872 found: 9873 /* 9874 * We should ~probably~ reject adapters with FLASHes which are too 9875 * small but we have some legacy FPGAs with small FLASHes that we'd 9876 * still like to use. So instead we emit a scary message ... 9877 */ 9878 if (adapter->params.sf_size < FLASH_MIN_SIZE) 9879 CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n", 9880 flashid, adapter->params.sf_size, FLASH_MIN_SIZE); 9881 9882 return 0; 9883 } 9884 9885 static void set_pcie_completion_timeout(struct adapter *adapter, 9886 u8 range) 9887 { 9888 u16 val; 9889 u32 pcie_cap; 9890 9891 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 9892 if (pcie_cap) { 9893 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 9894 val &= 0xfff0; 9895 val |= range ; 9896 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 9897 } 9898 } 9899 9900 /** 9901 * t4_get_chip_type - Determine chip type from device ID 9902 * @adap: the adapter 9903 * @ver: adapter version 9904 */ 9905 enum chip_type t4_get_chip_type(struct adapter *adap, int ver) 9906 { 9907 enum chip_type chip = 0; 9908 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV)); 9909 9910 /* Retrieve adapter's device ID */ 9911 switch (ver) { 9912 case CHELSIO_T4_FPGA: 9913 chip |= CHELSIO_CHIP_FPGA; 9914 /*FALLTHROUGH*/ 9915 case CHELSIO_T4: 9916 chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); 9917 break; 9918 case CHELSIO_T5_FPGA: 9919 chip |= CHELSIO_CHIP_FPGA; 9920 /*FALLTHROUGH*/ 9921 case CHELSIO_T5: 9922 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); 9923 break; 9924 case CHELSIO_T6_FPGA: 9925 chip |= CHELSIO_CHIP_FPGA; 9926 /*FALLTHROUGH*/ 9927 case CHELSIO_T6: 9928 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); 9929 break; 9930 default: 9931 CH_ERR(adap, "Device %d is not supported\n", 9932 adap->params.pci.device_id); 9933 return -EINVAL; 9934 } 9935 9936 /* T4A1 chip is no longer supported */ 9937 if (chip == T4_A1) { 9938 CH_ALERT(adap, "T4 rev 1 chip is no longer supported\n"); 9939 return -EINVAL; 9940 } 9941 return chip; 9942 } 9943 9944 /** 9945 * t4_prep_pf - prepare SW and HW for PF operation 9946 * @adapter: the adapter 9947 * 9948 * Initialize adapter SW state for the various HW modules, set initial 9949 * values for some adapter tunables on each PF. 9950 */ 9951 int t4_prep_pf(struct adapter *adapter) 9952 { 9953 int ret, ver; 9954 9955 ret = t4_wait_dev_ready(adapter); 9956 if (ret < 0) 9957 return ret; 9958 9959 get_pci_mode(adapter, &adapter->params.pci); 9960 9961 9962 /* Retrieve adapter's device ID 9963 */ 9964 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &adapter->params.pci.device_id); 9965 t4_os_pci_read_cfg2(adapter, PCI_VENDOR_ID, &adapter->params.pci.vendor_id); 9966 9967 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id); 9968 adapter->params.chip = t4_get_chip_type(adapter, ver); 9969 if (is_t4(adapter->params.chip)) { 9970 adapter->params.arch.sge_fl_db = F_DBPRIO; 9971 adapter->params.arch.mps_tcam_size = 9972 NUM_MPS_CLS_SRAM_L_INSTANCES; 9973 adapter->params.arch.mps_rplc_size = 128; 9974 adapter->params.arch.nchan = NCHAN; 9975 adapter->params.arch.pm_stats_cnt = PM_NSTATS; 9976 adapter->params.arch.vfcount = 128; 9977 /* Congestion map is for 4 channels so that 9978 * MPS can have 4 priority per port. 9979 */ 9980 adapter->params.arch.cng_ch_bits_log = 2; 9981 } else if (is_t5(adapter->params.chip)) { 9982 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE; 9983 adapter->params.arch.mps_tcam_size = 9984 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 9985 adapter->params.arch.mps_rplc_size = 128; 9986 adapter->params.arch.nchan = NCHAN; 9987 adapter->params.arch.pm_stats_cnt = PM_NSTATS; 9988 adapter->params.arch.vfcount = 128; 9989 adapter->params.arch.cng_ch_bits_log = 2; 9990 } else if (is_t6(adapter->params.chip)) { 9991 adapter->params.arch.sge_fl_db = 0; 9992 adapter->params.arch.mps_tcam_size = 9993 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 9994 adapter->params.arch.mps_rplc_size = 256; 9995 adapter->params.arch.nchan = 2; 9996 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS; 9997 adapter->params.arch.vfcount = 256; 9998 /* Congestion map will be for 2 channels so that 9999 * MPS can have 8 priority per port. 10000 */ 10001 adapter->params.arch.cng_ch_bits_log = 3; 10002 } else { 10003 CH_ERR(adapter, "Device %d is not supported\n", 10004 adapter->params.pci.device_id); 10005 return -EINVAL; 10006 } 10007 10008 adapter->params.pci.vpd_cap_addr = 10009 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 10010 10011 if (is_fpga(adapter->params.chip)) { 10012 /* FPGA */ 10013 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 10014 } else { 10015 /* ASIC */ 10016 adapter->params.cim_la_size = CIMLA_SIZE; 10017 } 10018 10019 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 10020 10021 /* 10022 * Default port and clock for debugging in case we can't reach FW. 10023 */ 10024 adapter->params.nports = 1; 10025 adapter->params.portvec = 1; 10026 adapter->params.vpd.cclk = 50000; 10027 10028 /* Set pci completion timeout value to 4 seconds. */ 10029 set_pcie_completion_timeout(adapter, 0xd); 10030 return 0; 10031 } 10032 10033 /** 10034 * t4_prep_master_pf - prepare SW for master PF operations 10035 * @adapter: the adapter 10036 * 10037 */ 10038 int t4_prep_master_pf(struct adapter *adapter) 10039 { 10040 int ret; 10041 10042 ret = t4_prep_pf(adapter); 10043 if (ret < 0) 10044 return ret; 10045 10046 ret = t4_get_flash_params(adapter); 10047 if (ret < 0) { 10048 CH_ERR(adapter, 10049 "Unable to retrieve Flash parameters ret = %d\n", -ret); 10050 return ret; 10051 } 10052 10053 return 0; 10054 } 10055 10056 /** 10057 * t4_prep_adapter - prepare SW and HW for operation 10058 * @adapter: the adapter 10059 * @reset: if true perform a HW reset 10060 * 10061 * Initialize adapter SW state for the various HW modules, set initial 10062 * values for some adapter tunables. 10063 */ 10064 int t4_prep_adapter(struct adapter *adapter, bool reset) 10065 { 10066 return t4_prep_master_pf(adapter); 10067 } 10068 10069 /** 10070 * t4_shutdown_adapter - shut down adapter, host & wire 10071 * @adapter: the adapter 10072 * 10073 * Perform an emergency shutdown of the adapter and stop it from 10074 * continuing any further communication on the ports or DMA to the 10075 * host. This is typically used when the adapter and/or firmware 10076 * have crashed and we want to prevent any further accidental 10077 * communication with the rest of the world. This will also force 10078 * the port Link Status to go down -- if register writes work -- 10079 * which should help our peers figure out that we're down. 10080 */ 10081 int t4_shutdown_adapter(struct adapter *adapter) 10082 { 10083 int port; 10084 10085 t4_intr_disable(adapter); 10086 t4_write_reg(adapter, A_DBG_GPIO_EN, 0); 10087 for_each_port(adapter, port) { 10088 u32 a_port_cfg = is_t4(adapter->params.chip) ? 10089 PORT_REG(port, A_XGMAC_PORT_CFG) : 10090 T5_PORT_REG(port, A_MAC_PORT_CFG); 10091 10092 t4_write_reg(adapter, a_port_cfg, 10093 t4_read_reg(adapter, a_port_cfg) 10094 & ~V_SIGNAL_DET(1)); 10095 } 10096 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); 10097 10098 return 0; 10099 } 10100 10101 /** 10102 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information 10103 * @adapter: the adapter 10104 * @qid: the Queue ID 10105 * @qtype: the Ingress or Egress type for @qid 10106 * @user: true if this request is for a user mode queue 10107 * @pbar2_qoffset: BAR2 Queue Offset 10108 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues 10109 * 10110 * Returns the BAR2 SGE Queue Registers information associated with the 10111 * indicated Absolute Queue ID. These are passed back in return value 10112 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue 10113 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. 10114 * 10115 * This may return an error which indicates that BAR2 SGE Queue 10116 * registers aren't available. If an error is not returned, then the 10117 * following values are returned: 10118 * 10119 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers 10120 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid 10121 * 10122 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which 10123 * require the "Inferred Queue ID" ability may be used. E.g. the 10124 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, 10125 * then these "Inferred Queue ID" register may not be used. 10126 */ 10127 int t4_bar2_sge_qregs(struct adapter *adapter, 10128 unsigned int qid, 10129 enum t4_bar2_qtype qtype, 10130 int user, 10131 u64 *pbar2_qoffset, 10132 unsigned int *pbar2_qid) 10133 { 10134 unsigned int page_shift, page_size, qpp_shift, qpp_mask; 10135 u64 bar2_page_offset, bar2_qoffset; 10136 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; 10137 10138 /* T4 doesn't support BAR2 SGE Queue registers for kernel 10139 * mode queues. 10140 */ 10141 if (!user && is_t4(adapter->params.chip)) 10142 return -EINVAL; 10143 10144 /* Get our SGE Page Size parameters. 10145 */ 10146 page_shift = adapter->params.sge.hps + 10; 10147 page_size = 1 << page_shift; 10148 10149 /* Get the right Queues per Page parameters for our Queue. 10150 */ 10151 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS 10152 ? adapter->params.sge.eq_qpp 10153 : adapter->params.sge.iq_qpp); 10154 qpp_mask = (1 << qpp_shift) - 1; 10155 10156 /* Calculate the basics of the BAR2 SGE Queue register area: 10157 * o The BAR2 page the Queue registers will be in. 10158 * o The BAR2 Queue ID. 10159 * o The BAR2 Queue ID Offset into the BAR2 page. 10160 */ 10161 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); 10162 bar2_qid = qid & qpp_mask; 10163 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; 10164 10165 /* If the BAR2 Queue ID Offset is less than the Page Size, then the 10166 * hardware will infer the Absolute Queue ID simply from the writes to 10167 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a 10168 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply 10169 * write to the first BAR2 SGE Queue Area within the BAR2 Page with 10170 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID 10171 * from the BAR2 Page and BAR2 Queue ID. 10172 * 10173 * One important censequence of this is that some BAR2 SGE registers 10174 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID 10175 * there. But other registers synthesize the SGE Queue ID purely 10176 * from the writes to the registers -- the Write Combined Doorbell 10177 * Buffer is a good example. These BAR2 SGE Registers are only 10178 * available for those BAR2 SGE Register areas where the SGE Absolute 10179 * Queue ID can be inferred from simple writes. 10180 */ 10181 bar2_qoffset = bar2_page_offset; 10182 bar2_qinferred = (bar2_qid_offset < page_size); 10183 if (bar2_qinferred) { 10184 bar2_qoffset += bar2_qid_offset; 10185 bar2_qid = 0; 10186 } 10187 10188 *pbar2_qoffset = bar2_qoffset; 10189 *pbar2_qid = bar2_qid; 10190 return 0; 10191 } 10192 10193 /** 10194 * t4_init_devlog_params - initialize adapter->params.devlog 10195 * @adap: the adapter 10196 * @fw_attach: whether we can talk to the firmware 10197 * 10198 * Initialize various fields of the adapter's Firmware Device Log 10199 * Parameters structure. 10200 */ 10201 int t4_init_devlog_params(struct adapter *adap, int fw_attach) 10202 { 10203 struct devlog_params *dparams = &adap->params.devlog; 10204 u32 pf_dparams; 10205 unsigned int devlog_meminfo; 10206 struct fw_devlog_cmd devlog_cmd; 10207 int ret; 10208 10209 /* If we're dealing with newer firmware, the Device Log Paramerters 10210 * are stored in a designated register which allows us to access the 10211 * Device Log even if we can't talk to the firmware. 10212 */ 10213 pf_dparams = 10214 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); 10215 if (pf_dparams) { 10216 unsigned int nentries, nentries128; 10217 10218 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); 10219 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; 10220 10221 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); 10222 nentries = (nentries128 + 1) * 128; 10223 dparams->size = nentries * sizeof(struct fw_devlog_e); 10224 10225 return 0; 10226 } 10227 10228 /* 10229 * For any failing returns ... 10230 */ 10231 memset(dparams, 0, sizeof *dparams); 10232 10233 /* 10234 * If we can't talk to the firmware, there's really nothing we can do 10235 * at this point. 10236 */ 10237 if (!fw_attach) 10238 return -ENXIO; 10239 10240 /* Otherwise, ask the firmware for it's Device Log Parameters. 10241 */ 10242 memset(&devlog_cmd, 0, sizeof devlog_cmd); 10243 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 10244 F_FW_CMD_REQUEST | F_FW_CMD_READ); 10245 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 10246 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), 10247 &devlog_cmd); 10248 if (ret) 10249 return ret; 10250 10251 devlog_meminfo = 10252 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); 10253 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); 10254 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; 10255 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); 10256 10257 return 0; 10258 } 10259 10260 /** 10261 * t4_init_sge_params - initialize adap->params.sge 10262 * @adapter: the adapter 10263 * 10264 * Initialize various fields of the adapter's SGE Parameters structure. 10265 */ 10266 int t4_init_sge_params(struct adapter *adapter) 10267 { 10268 struct sge_params *sge_params = &adapter->params.sge; 10269 u32 hps, qpp; 10270 unsigned int s_hps, s_qpp; 10271 10272 /* Extract the SGE Page Size for our PF. 10273 */ 10274 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); 10275 s_hps = (S_HOSTPAGESIZEPF0 + 10276 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf); 10277 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0); 10278 10279 /* Extract the SGE Egress and Ingess Queues Per Page for our PF. 10280 */ 10281 s_qpp = (S_QUEUESPERPAGEPF0 + 10282 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf); 10283 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 10284 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); 10285 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 10286 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); 10287 10288 return 0; 10289 } 10290 10291 /** 10292 * t4_init_tp_params - initialize adap->params.tp 10293 * @adap: the adapter 10294 * @sleep_ok: if true we may sleep while awaiting command completion 10295 * 10296 * Initialize various fields of the adapter's TP Parameters structure. 10297 */ 10298 int t4_init_tp_params(struct adapter *adap, bool sleep_ok) 10299 { 10300 u32 param, val, v; 10301 int chan, ret; 10302 10303 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 10304 adap->params.tp.tre = G_TIMERRESOLUTION(v); 10305 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); 10306 10307 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 10308 for (chan = 0; chan < NCHAN; chan++) 10309 adap->params.tp.tx_modq[chan] = chan; 10310 10311 /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress 10312 * Configuration. 10313 */ 10314 10315 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 10316 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) | 10317 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK)); 10318 10319 /* Read current value */ 10320 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, 10321 ¶m, &val); 10322 if (ret == 0) { 10323 CH_INFO(adap, 10324 "Current filter mode/mask 0x%x:0x%x\n", 10325 G_FW_PARAMS_PARAM_FILTER_MODE(val), 10326 G_FW_PARAMS_PARAM_FILTER_MASK(val)); 10327 adap->params.tp.vlan_pri_map = G_FW_PARAMS_PARAM_FILTER_MODE(val); 10328 adap->params.tp.filter_mask = G_FW_PARAMS_PARAM_FILTER_MASK(val); 10329 } else { 10330 CH_WARN(adap, 10331 "Reading filter mode/mask not supported via fw api, " 10332 "falling back to older indirect-reg-read \n"); 10333 10334 /* Incase of older-fw (which doesn't expose the api 10335 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses 10336 * the fw api) combination, fall-back to older method of reading 10337 * the filter mode from indirect-register 10338 */ 10339 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1, 10340 A_TP_VLAN_PRI_MAP, sleep_ok); 10341 10342 /* With the older-fw and newer-driver combination we might run 10343 * into an issue when user wants to use hash filter region but 10344 * the filter_mask is zero, in this case filter_mask validation 10345 * is tough. To avoid that we set the filter_mask same as filter 10346 * mode, which will behave exactly as the older way of ignoring 10347 * the filter mask validation. 10348 */ 10349 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map; 10350 } 10351 10352 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1, 10353 A_TP_INGRESS_CONFIG, sleep_ok); 10354 10355 /* For T6, cache the adapter's compressed error vector 10356 * and passing outer header info for encapsulated packets. 10357 */ 10358 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { 10359 v = t4_read_reg(adap, A_TP_OUT_CONFIG); 10360 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0; 10361 } 10362 10363 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 10364 * shift positions of several elements of the Compressed Filter Tuple 10365 * for this adapter which we need frequently ... 10366 */ 10367 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, F_FCOE); 10368 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 10369 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 10370 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 10371 adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS); 10372 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 10373 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap, 10374 F_ETHERTYPE); 10375 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap, 10376 F_MACMATCH); 10377 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap, 10378 F_MPSHITTYPE); 10379 adap->params.tp.frag_shift = t4_filter_field_shift(adap, 10380 F_FRAGMENTATION); 10381 return 0; 10382 } 10383 10384 /** 10385 * t4_filter_field_shift - calculate filter field shift 10386 * @adap: the adapter 10387 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 10388 * 10389 * Return the shift position of a filter field within the Compressed 10390 * Filter Tuple. The filter field is specified via its selection bit 10391 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 10392 */ 10393 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 10394 { 10395 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 10396 unsigned int sel; 10397 int field_shift; 10398 10399 if ((filter_mode & filter_sel) == 0) 10400 return -1; 10401 10402 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 10403 switch (filter_mode & sel) { 10404 case F_FCOE: 10405 field_shift += W_FT_FCOE; 10406 break; 10407 case F_PORT: 10408 field_shift += W_FT_PORT; 10409 break; 10410 case F_VNIC_ID: 10411 field_shift += W_FT_VNIC_ID; 10412 break; 10413 case F_VLAN: 10414 field_shift += W_FT_VLAN; 10415 break; 10416 case F_TOS: 10417 field_shift += W_FT_TOS; 10418 break; 10419 case F_PROTOCOL: 10420 field_shift += W_FT_PROTOCOL; 10421 break; 10422 case F_ETHERTYPE: 10423 field_shift += W_FT_ETHERTYPE; 10424 break; 10425 case F_MACMATCH: 10426 field_shift += W_FT_MACMATCH; 10427 break; 10428 case F_MPSHITTYPE: 10429 field_shift += W_FT_MPSHITTYPE; 10430 break; 10431 case F_FRAGMENTATION: 10432 field_shift += W_FT_FRAGMENTATION; 10433 break; 10434 } 10435 } 10436 return field_shift; 10437 } 10438 10439 /** 10440 * t4_create_filter_info - return Compressed Filter Value/Mask tuple 10441 * @adapter: the adapter 10442 * @filter_value: Filter Value return value pointer 10443 * @filter_mask: Filter Mask return value pointer 10444 * @fcoe: FCoE filter selection 10445 * @port: physical port filter selection 10446 * @vnic: Virtual NIC ID filter selection 10447 * @vlan: VLAN ID filter selection 10448 * @vlan_pcp: VLAN Priority Code Point 10449 * @vlan_dei: VLAN Drop Eligibility Indicator 10450 * @tos: Type Of Server filter selection 10451 * @protocol: IP Protocol filter selection 10452 * @ethertype: Ethernet Type filter selection 10453 * @macmatch: MPS MAC Index filter selection 10454 * @matchtype: MPS Hit Type filter selection 10455 * @frag: IP Fragmentation filter selection 10456 * 10457 * Construct a Compressed Filter Value/Mask tuple based on a set of 10458 * "filter selection" values. For each passed filter selection value 10459 * which is greater than or equal to 0, we put that value into the 10460 * constructed Filter Value and the appropriate mask into the Filter 10461 * Mask. If a filter selections is specified which is not currently 10462 * configured into the hardware, an error will be returned. Otherwise 10463 * the constructed FIlter Value/Mask tuple will be returned via the 10464 * specified return value pointers and success will be returned. 10465 * 10466 * All filter selection values and the returned Filter Value/Mask values 10467 * are in Host-Endian format. 10468 */ 10469 int t4_create_filter_info(const struct adapter *adapter, 10470 u64 *filter_value, u64 *filter_mask, 10471 int fcoe, int port, int vnic, 10472 int vlan, int vlan_pcp, int vlan_dei, 10473 int tos, int protocol, int ethertype, 10474 int macmatch, int matchtype, int frag) 10475 { 10476 const struct tp_params *tp = &adapter->params.tp; 10477 u64 v, m; 10478 10479 /* 10480 * If any selected filter field isn't enabled, return an error. 10481 */ 10482 #define BAD_FILTER(__field) \ 10483 ((__field) >= 0 && tp->__field##_shift < 0) 10484 if (BAD_FILTER(fcoe) || 10485 BAD_FILTER(port) || 10486 BAD_FILTER(vnic) || 10487 BAD_FILTER(vlan) || 10488 BAD_FILTER(tos) || 10489 BAD_FILTER(protocol) || 10490 BAD_FILTER(ethertype) || 10491 BAD_FILTER(macmatch) || 10492 BAD_FILTER(matchtype) || 10493 BAD_FILTER(frag)) 10494 return -EINVAL; 10495 #undef BAD_FILTER 10496 10497 /* 10498 * We have to have VLAN ID selected if we want to also select on 10499 * either the Priority Code Point or Drop Eligibility Indicator 10500 * fields. 10501 */ 10502 if ((vlan_pcp >= 0 || vlan_dei >= 0) && vlan < 0) 10503 return -EINVAL; 10504 10505 /* 10506 * Construct Filter Value and Mask. 10507 */ 10508 v = m = 0; 10509 #define SET_FILTER_FIELD(__field, __width) \ 10510 do { \ 10511 if ((__field) >= 0) { \ 10512 const int shift = tp->__field##_shift; \ 10513 \ 10514 v |= (__field) << shift; \ 10515 m |= ((1ULL << (__width)) - 1) << shift; \ 10516 } \ 10517 } while (0) 10518 SET_FILTER_FIELD(fcoe, W_FT_FCOE); 10519 SET_FILTER_FIELD(port, W_FT_PORT); 10520 SET_FILTER_FIELD(tos, W_FT_TOS); 10521 SET_FILTER_FIELD(protocol, W_FT_PROTOCOL); 10522 SET_FILTER_FIELD(ethertype, W_FT_ETHERTYPE); 10523 SET_FILTER_FIELD(macmatch, W_FT_MACMATCH); 10524 SET_FILTER_FIELD(matchtype, W_FT_MPSHITTYPE); 10525 SET_FILTER_FIELD(frag, W_FT_FRAGMENTATION); 10526 #undef SET_FILTER_FIELD 10527 10528 /* 10529 * We handle VNIC ID and VLANs separately because they're slightly 10530 * different than the rest of the fields. Both require that a 10531 * corresponding "valid" bit be set in the Filter Value and Mask. 10532 * These bits are in the top bit of the field. Additionally, we can 10533 * select the Priority Code Point and Drop Eligibility Indicator 10534 * fields for VLANs as an option. Remember that the format of a VLAN 10535 * Tag is: 10536 * 10537 * bits: 3 1 12 10538 * +---+-+------------+ 10539 * |PCP|D| VLAN ID | 10540 * +---+-+------------+ 10541 */ 10542 if (vnic >= 0) { 10543 v |= ((1ULL << (W_FT_VNIC_ID-1)) | vnic) << tp->vnic_shift; 10544 m |= ((1ULL << W_FT_VNIC_ID) - 1) << tp->vnic_shift; 10545 } 10546 if (vlan >= 0) { 10547 v |= ((1ULL << (W_FT_VLAN-1)) | vlan) << tp->vlan_shift; 10548 m |= ((1ULL << (W_FT_VLAN-1)) | 0xfff) << tp->vlan_shift; 10549 10550 if (vlan_dei >= 0) { 10551 v |= vlan_dei << (tp->vlan_shift + 12); 10552 m |= 0x7 << (tp->vlan_shift + 12); 10553 } 10554 if (vlan_pcp >= 0) { 10555 v |= vlan_pcp << (tp->vlan_shift + 13); 10556 m |= 0x7 << (tp->vlan_shift + 13); 10557 } 10558 } 10559 10560 /* 10561 * Pass back computed Filter Value and Mask; return success. 10562 */ 10563 *filter_value = v; 10564 *filter_mask = m; 10565 return 0; 10566 } 10567 10568 int t4_init_rss_mode(struct adapter *adap, int mbox) 10569 { 10570 int i, ret; 10571 struct fw_rss_vi_config_cmd rvc; 10572 10573 memset(&rvc, 0, sizeof(rvc)); 10574 10575 for_each_port(adap, i) { 10576 struct port_info *p = adap2pinfo(adap, i); 10577 rvc.op_to_viid = 10578 cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 10579 F_FW_CMD_REQUEST | F_FW_CMD_READ | 10580 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); 10581 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc)); 10582 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); 10583 if (ret) 10584 return ret; 10585 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen); 10586 } 10587 return 0; 10588 } 10589 10590 static int t4_init_portmirror(struct port_info *pi, int mbox, 10591 int port, int pf, int vf) 10592 { 10593 struct adapter *adapter = pi->adapter; 10594 int ret; 10595 u8 vivld = 0, vin = 0; 10596 10597 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL, 10598 &vivld, &vin); 10599 if (ret < 0) 10600 return ret; 10601 10602 pi->viid_mirror = ret; 10603 10604 /* If fw supports returning the VIN as part of FW_VI_CMD, 10605 * save the returned values. 10606 */ 10607 if (adapter->params.viid_smt_extn_support) { 10608 pi->vivld_mirror = vivld; 10609 pi->vin_mirror = vin; 10610 } else { 10611 /* Retrieve the values from VIID */ 10612 pi->vivld_mirror = G_FW_VIID_VIVLD(pi->viid_mirror); 10613 pi->vin_mirror = G_FW_VIID_VIN(pi->viid_mirror); 10614 } 10615 10616 CH_INFO(pi->adapter, "Port %d Traffic Mirror PF = %u; VF = %u\n", 10617 port, pf, pi->vin_mirror); 10618 return 0; 10619 } 10620 10621 int t4_mirror_init(struct adapter *adap, int mbox, int pf, int vf, 10622 bool enable_ringbb) 10623 { 10624 int ret, i, j = 0; 10625 10626 for_each_port(adap, i) { 10627 struct port_info *pi = adap2pinfo(adap, i); 10628 10629 /* We want mirroring only on Port0 for ringbackbone 10630 * configuration. 10631 */ 10632 if (enable_ringbb && i) 10633 break; 10634 while ((adap->params.portvec & (1 << j)) == 0) 10635 j++; 10636 10637 ret = t4_init_portmirror(pi, mbox, j, pf, vf); 10638 if (ret) 10639 return ret; 10640 j++; 10641 } 10642 return 0; 10643 } 10644 10645 /** 10646 * t4_init_portinfo_viid - allocate a virtual interface and initialize 10647 * port_info 10648 * @pi: the port_info 10649 * @mbox: mailbox to use for the FW command 10650 * @port: physical port associated with the VI 10651 * @pf: the PF owning the VI 10652 * @vf: the VF owning the VI 10653 * @mac: the MAC address of the VI 10654 * @alloc_vi: Indicator to alloc VI 10655 * 10656 * Allocates a virtual interface for the given physical port. If @mac is 10657 * not %NULL it contains the MAC address of the VI as assigned by FW. 10658 * @mac should be large enough to hold an Ethernet address. 10659 * Returns < 0 on error. 10660 */ 10661 int t4_init_portinfo_viid(struct port_info *pi, int mbox, 10662 int port, int pf, int vf, u8 mac[], bool alloc_vi) 10663 { 10664 struct adapter *adapter = pi->adapter; 10665 unsigned int fw_caps = adapter->params.fw_caps_support; 10666 struct fw_port_cmd cmd; 10667 unsigned int rss_size; 10668 enum fw_port_type port_type; 10669 int mdio_addr; 10670 fw_port_cap32_t pcaps, acaps; 10671 int ret; 10672 10673 /* 10674 * If we haven't yet determined whether we're talking to Firmware 10675 * which knows the new 32-bit Port Capabilities, it's time to find 10676 * out now. This will also tell new Firmware to send us Port Status 10677 * Updates using the new 32-bit Port Capabilities version of the 10678 * Port Information message. 10679 */ 10680 if (fw_caps == FW_CAPS_UNKNOWN) { 10681 u32 param, val; 10682 10683 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | 10684 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); 10685 val = 1; 10686 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val); 10687 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); 10688 adapter->params.fw_caps_support = fw_caps; 10689 } 10690 10691 memset(&cmd, 0, sizeof(cmd)); 10692 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 10693 F_FW_CMD_REQUEST | F_FW_CMD_READ | 10694 V_FW_PORT_CMD_PORTID(port)); 10695 cmd.action_to_len16 = cpu_to_be32( 10696 V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 10697 ? FW_PORT_ACTION_GET_PORT_INFO 10698 : FW_PORT_ACTION_GET_PORT_INFO32) | 10699 FW_LEN16(cmd)); 10700 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd); 10701 if (ret) 10702 return ret; 10703 10704 /* 10705 * Extract the various fields from the Port Information message. 10706 */ 10707 if (fw_caps == FW_CAPS16) { 10708 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype); 10709 10710 port_type = G_FW_PORT_CMD_PTYPE(lstatus); 10711 mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) 10712 ? G_FW_PORT_CMD_MDIOADDR(lstatus) 10713 : -1); 10714 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap)); 10715 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap)); 10716 } else { 10717 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32); 10718 10719 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32); 10720 mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) 10721 ? G_FW_PORT_CMD_MDIOADDR32(lstatus32) 10722 : -1); 10723 pcaps = be32_to_cpu(cmd.u.info32.pcaps32); 10724 acaps = be32_to_cpu(cmd.u.info32.acaps32); 10725 } 10726 10727 if (alloc_vi) { 10728 u8 vivld = 0, vin = 0; 10729 10730 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, 10731 &rss_size, &vivld, &vin); 10732 if (ret < 0) 10733 return ret; 10734 10735 pi->viid = ret; 10736 pi->rss_size = rss_size; 10737 10738 /* If fw supports returning the VIN as part of FW_VI_CMD, 10739 * save the returned values. 10740 */ 10741 if (adapter->params.viid_smt_extn_support) { 10742 pi->vivld = vivld; 10743 pi->vin = vin; 10744 } else { 10745 /* Retrieve the values from VIID */ 10746 pi->vivld = G_FW_VIID_VIVLD(pi->viid); 10747 pi->vin = G_FW_VIID_VIN(pi->viid); 10748 } 10749 } 10750 10751 pi->tx_chan = port; 10752 pi->lport = port; 10753 pi->rx_chan = port; 10754 pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port); 10755 10756 pi->port_type = port_type; 10757 pi->mdio_addr = mdio_addr; 10758 pi->mod_type = FW_PORT_MOD_TYPE_NA; 10759 10760 t4_init_link_config(pi, pcaps, acaps); 10761 return 0; 10762 } 10763 10764 /** 10765 * t4_init_portinfo - allocate a virtual interface and initialize port_info 10766 * @pi: the port_info 10767 * @mbox: mailbox to use for the FW command 10768 * @port: physical port associated with the VI 10769 * @pf: the PF owning the VI 10770 * @vf: the VF owning the VI 10771 * @mac: the MAC address of the VI 10772 * 10773 * Allocates a virtual interface for the given physical port. If @mac is 10774 * not %NULL it contains the MAC address of the VI as assigned by FW. 10775 * @mac should be large enough to hold an Ethernet address. 10776 * Returns < 0 on error. 10777 */ 10778 int t4_init_portinfo(struct port_info *pi, int mbox, 10779 int port, int pf, int vf, u8 mac[]) 10780 { 10781 return t4_init_portinfo_viid(pi, mbox, port, pf, vf, mac, true); 10782 } 10783 10784 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 10785 { 10786 u8 addr[6]; 10787 int ret, i, j = 0; 10788 10789 for_each_port(adap, i) { 10790 struct port_info *pi = adap2pinfo(adap, i); 10791 10792 while ((adap->params.portvec & (1 << j)) == 0) 10793 j++; 10794 10795 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr); 10796 if (ret) 10797 return ret; 10798 10799 t4_os_set_hw_addr(adap, i, addr); 10800 j++; 10801 } 10802 return 0; 10803 } 10804 10805 /** 10806 * t4_read_cimq_cfg - read CIM queue configuration 10807 * @adap: the adapter 10808 * @base: holds the queue base addresses in bytes 10809 * @size: holds the queue sizes in bytes 10810 * @thres: holds the queue full thresholds in bytes 10811 * 10812 * Returns the current configuration of the CIM queues, starting with 10813 * the IBQs, then the OBQs. 10814 */ 10815 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 10816 { 10817 unsigned int i, v; 10818 int cim_num_obq = is_t4(adap->params.chip) ? 10819 CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 10820 10821 for (i = 0; i < CIM_NUM_IBQ; i++) { 10822 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 10823 V_QUENUMSELECT(i)); 10824 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 10825 /* value is in 256-byte units */ 10826 *base++ = G_CIMQBASE(v) * 256; 10827 *size++ = G_CIMQSIZE(v) * 256; 10828 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 10829 } 10830 for (i = 0; i < cim_num_obq; i++) { 10831 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 10832 V_QUENUMSELECT(i)); 10833 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 10834 /* value is in 256-byte units */ 10835 *base++ = G_CIMQBASE(v) * 256; 10836 *size++ = G_CIMQSIZE(v) * 256; 10837 } 10838 } 10839 10840 /** 10841 * t4_read_cim_ibq - read the contents of a CIM inbound queue 10842 * @adap: the adapter 10843 * @qid: the queue index 10844 * @data: where to store the queue contents 10845 * @n: capacity of @data in 32-bit words 10846 * 10847 * Reads the contents of the selected CIM queue starting at address 0 up 10848 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 10849 * error and the number of 32-bit words actually read on success. 10850 */ 10851 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 10852 { 10853 int i, err, attempts; 10854 unsigned int addr; 10855 const unsigned int nwords = CIM_IBQ_SIZE * 4; 10856 10857 if (qid > 5 || (n & 3)) 10858 return -EINVAL; 10859 10860 addr = qid * nwords; 10861 if (n > nwords) 10862 n = nwords; 10863 10864 /* It might take 3-10ms before the IBQ debug read access is allowed. 10865 * Wait for 1 Sec with a delay of 1 usec. 10866 */ 10867 attempts = 1000000; 10868 10869 for (i = 0; i < n; i++, addr++) { 10870 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 10871 F_IBQDBGEN); 10872 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 10873 attempts, 1); 10874 if (err) 10875 return err; 10876 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 10877 } 10878 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 10879 return i; 10880 } 10881 10882 /** 10883 * t4_read_cim_obq - read the contents of a CIM outbound queue 10884 * @adap: the adapter 10885 * @qid: the queue index 10886 * @data: where to store the queue contents 10887 * @n: capacity of @data in 32-bit words 10888 * 10889 * Reads the contents of the selected CIM queue starting at address 0 up 10890 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 10891 * error and the number of 32-bit words actually read on success. 10892 */ 10893 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 10894 { 10895 int i, err; 10896 unsigned int addr, v, nwords; 10897 int cim_num_obq = is_t4(adap->params.chip) ? 10898 CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 10899 10900 if ((qid > (cim_num_obq - 1)) || (n & 3)) 10901 return -EINVAL; 10902 10903 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 10904 V_QUENUMSELECT(qid)); 10905 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 10906 10907 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 10908 nwords = G_CIMQSIZE(v) * 64; /* same */ 10909 if (n > nwords) 10910 n = nwords; 10911 10912 for (i = 0; i < n; i++, addr++) { 10913 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 10914 F_OBQDBGEN); 10915 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 10916 2, 1); 10917 if (err) 10918 return err; 10919 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 10920 } 10921 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 10922 return i; 10923 } 10924 10925 /** 10926 * t4_cim_read - read a block from CIM internal address space 10927 * @adap: the adapter 10928 * @addr: the start address within the CIM address space 10929 * @n: number of words to read 10930 * @valp: where to store the result 10931 * 10932 * Reads a block of 4-byte words from the CIM intenal address space. 10933 */ 10934 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 10935 unsigned int *valp) 10936 { 10937 int ret = 0; 10938 10939 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 10940 return -EBUSY; 10941 10942 for ( ; !ret && n--; addr += 4) { 10943 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 10944 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 10945 0, 5, 2); 10946 if (!ret) 10947 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 10948 } 10949 return ret; 10950 } 10951 10952 /** 10953 * t4_cim_write - write a block into CIM internal address space 10954 * @adap: the adapter 10955 * @addr: the start address within the CIM address space 10956 * @n: number of words to write 10957 * @valp: set of values to write 10958 * 10959 * Writes a block of 4-byte words into the CIM intenal address space. 10960 */ 10961 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 10962 const unsigned int *valp) 10963 { 10964 int ret = 0; 10965 10966 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 10967 return -EBUSY; 10968 10969 for ( ; !ret && n--; addr += 4) { 10970 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 10971 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 10972 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 10973 0, 5, 2); 10974 } 10975 return ret; 10976 } 10977 10978 static int t4_cim_write1(struct adapter *adap, unsigned int addr, 10979 unsigned int val) 10980 { 10981 return t4_cim_write(adap, addr, 1, &val); 10982 } 10983 10984 /** 10985 * t4_cim_read_la - read CIM LA capture buffer 10986 * @adap: the adapter 10987 * @la_buf: where to store the LA data 10988 * @wrptr: the HW write pointer within the capture buffer 10989 * 10990 * Reads the contents of the CIM LA buffer with the most recent entry at 10991 * the end of the returned data and with the entry at @wrptr first. 10992 * We try to leave the LA in the running state we find it in. 10993 */ 10994 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 10995 { 10996 int i, ret; 10997 unsigned int cfg, val, idx; 10998 10999 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 11000 if (ret) 11001 return ret; 11002 11003 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 11004 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 11005 if (ret) 11006 return ret; 11007 } 11008 11009 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 11010 if (ret) 11011 goto restart; 11012 11013 idx = G_UPDBGLAWRPTR(val); 11014 if (wrptr) 11015 *wrptr = idx; 11016 11017 for (i = 0; i < adap->params.cim_la_size; i++) { 11018 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 11019 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 11020 if (ret) 11021 break; 11022 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 11023 if (ret) 11024 break; 11025 if (val & F_UPDBGLARDEN) { 11026 ret = -ETIMEDOUT; 11027 break; 11028 } 11029 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 11030 if (ret) 11031 break; 11032 11033 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 11034 * identify the 32-bit portion of the full 312-bit data 11035 */ 11036 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9) 11037 idx = (idx & 0xff0) + 0x10; 11038 else 11039 idx++; 11040 /* address can't exceed 0xfff */ 11041 idx &= M_UPDBGLARDPTR; 11042 } 11043 restart: 11044 if (cfg & F_UPDBGLAEN) { 11045 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 11046 cfg & ~F_UPDBGLARDEN); 11047 if (!ret) 11048 ret = r; 11049 } 11050 return ret; 11051 } 11052 11053 /** 11054 * t4_tp_read_la - read TP LA capture buffer 11055 * @adap: the adapter 11056 * @la_buf: where to store the LA data 11057 * @wrptr: the HW write pointer within the capture buffer 11058 * 11059 * Reads the contents of the TP LA buffer with the most recent entry at 11060 * the end of the returned data and with the entry at @wrptr first. 11061 * We leave the LA in the running state we find it in. 11062 */ 11063 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 11064 { 11065 bool last_incomplete; 11066 unsigned int i, cfg, val, idx; 11067 11068 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 11069 if (cfg & F_DBGLAENABLE) /* freeze LA */ 11070 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 11071 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 11072 11073 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 11074 idx = G_DBGLAWPTR(val); 11075 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 11076 if (last_incomplete) 11077 idx = (idx + 1) & M_DBGLARPTR; 11078 if (wrptr) 11079 *wrptr = idx; 11080 11081 val &= 0xffff; 11082 val &= ~V_DBGLARPTR(M_DBGLARPTR); 11083 val |= adap->params.tp.la_mask; 11084 11085 for (i = 0; i < TPLA_SIZE; i++) { 11086 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 11087 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 11088 idx = (idx + 1) & M_DBGLARPTR; 11089 } 11090 11091 /* Wipe out last entry if it isn't valid */ 11092 if (last_incomplete) 11093 la_buf[TPLA_SIZE - 1] = ~0ULL; 11094 11095 if (cfg & F_DBGLAENABLE) /* restore running state */ 11096 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 11097 cfg | adap->params.tp.la_mask); 11098 } 11099 11100 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in 11101 * seconds). If we find one of the SGE Ingress DMA State Machines in the same 11102 * state for more than the Warning Threshold then we'll issue a warning about 11103 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel 11104 * appears to be hung every Warning Repeat second till the situation clears. 11105 * If the situation clears, we'll note that as well. 11106 */ 11107 #define SGE_IDMA_WARN_THRESH 1 11108 #define SGE_IDMA_WARN_REPEAT 300 11109 11110 /** 11111 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor 11112 * @adapter: the adapter 11113 * @idma: the adapter IDMA Monitor state 11114 * 11115 * Initialize the state of an SGE Ingress DMA Monitor. 11116 */ 11117 void t4_idma_monitor_init(struct adapter *adapter, 11118 struct sge_idma_monitor_state *idma) 11119 { 11120 /* Initialize the state variables for detecting an SGE Ingress DMA 11121 * hang. The SGE has internal counters which count up on each clock 11122 * tick whenever the SGE finds its Ingress DMA State Engines in the 11123 * same state they were on the previous clock tick. The clock used is 11124 * the Core Clock so we have a limit on the maximum "time" they can 11125 * record; typically a very small number of seconds. For instance, 11126 * with a 600MHz Core Clock, we can only count up to a bit more than 11127 * 7s. So we'll synthesize a larger counter in order to not run the 11128 * risk of having the "timers" overflow and give us the flexibility to 11129 * maintain a Hung SGE State Machine of our own which operates across 11130 * a longer time frame. 11131 */ 11132 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ 11133 idma->idma_stalled[0] = idma->idma_stalled[1] = 0; 11134 } 11135 11136 /** 11137 * t4_idma_monitor - monitor SGE Ingress DMA state 11138 * @adapter: the adapter 11139 * @idma: the adapter IDMA Monitor state 11140 * @hz: number of ticks/second 11141 * @ticks: number of ticks since the last IDMA Monitor call 11142 */ 11143 void t4_idma_monitor(struct adapter *adapter, 11144 struct sge_idma_monitor_state *idma, 11145 int hz, int ticks) 11146 { 11147 int i, idma_same_state_cnt[2]; 11148 11149 /* Read the SGE Debug Ingress DMA Same State Count registers. These 11150 * are counters inside the SGE which count up on each clock when the 11151 * SGE finds its Ingress DMA State Engines in the same states they 11152 * were in the previous clock. The counters will peg out at 11153 * 0xffffffff without wrapping around so once they pass the 1s 11154 * threshold they'll stay above that till the IDMA state changes. 11155 */ 11156 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); 11157 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); 11158 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11159 11160 for (i = 0; i < 2; i++) { 11161 u32 debug0, debug11; 11162 11163 /* If the Ingress DMA Same State Counter ("timer") is less 11164 * than 1s, then we can reset our synthesized Stall Timer and 11165 * continue. If we have previously emitted warnings about a 11166 * potential stalled Ingress Queue, issue a note indicating 11167 * that the Ingress Queue has resumed forward progress. 11168 */ 11169 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { 11170 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) 11171 CH_WARN(adapter, "SGE idma%d, queue %u, " 11172 "resumed after %d seconds\n", 11173 i, idma->idma_qid[i], 11174 idma->idma_stalled[i]/hz); 11175 idma->idma_stalled[i] = 0; 11176 continue; 11177 } 11178 11179 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 11180 * domain. The first time we get here it'll be because we 11181 * passed the 1s Threshold; each additional time it'll be 11182 * because the RX Timer Callback is being fired on its regular 11183 * schedule. 11184 * 11185 * If the stall is below our Potential Hung Ingress Queue 11186 * Warning Threshold, continue. 11187 */ 11188 if (idma->idma_stalled[i] == 0) { 11189 idma->idma_stalled[i] = hz; 11190 idma->idma_warn[i] = 0; 11191 } else { 11192 idma->idma_stalled[i] += ticks; 11193 idma->idma_warn[i] -= ticks; 11194 } 11195 11196 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) 11197 continue; 11198 11199 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. 11200 */ 11201 if (idma->idma_warn[i] > 0) 11202 continue; 11203 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; 11204 11205 /* Read and save the SGE IDMA State and Queue ID information. 11206 * We do this every time in case it changes across time ... 11207 * can't be too careful ... 11208 */ 11209 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); 11210 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11211 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 11212 11213 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); 11214 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 11215 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 11216 11217 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " 11218 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", 11219 i, idma->idma_qid[i], idma->idma_state[i], 11220 idma->idma_stalled[i]/hz, 11221 debug0, debug11); 11222 t4_sge_decode_idma_state(adapter, idma->idma_state[i]); 11223 } 11224 } 11225 11226 /** 11227 * t4_set_vf_mac - Set MAC address for the specified VF 11228 * @adapter: The adapter 11229 * @vf: one of the VFs instantiated by the specified PF 11230 * @naddr: the number of MAC addresses 11231 * @addr: the MAC address(es) to be set to the specified VF 11232 */ 11233 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, 11234 unsigned int naddr, u8 *addr) 11235 { 11236 struct fw_acl_mac_cmd cmd; 11237 11238 memset(&cmd, 0, sizeof(cmd)); 11239 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) | 11240 F_FW_CMD_REQUEST | 11241 F_FW_CMD_WRITE | 11242 V_FW_ACL_MAC_CMD_PFN(adapter->pf) | 11243 V_FW_ACL_MAC_CMD_VFN(vf)); 11244 11245 /* Note: Do not enable the ACL */ 11246 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); 11247 cmd.nmac = naddr; 11248 11249 switch (adapter->pf) { 11250 case 3: 11251 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); 11252 break; 11253 case 2: 11254 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2)); 11255 break; 11256 case 1: 11257 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1)); 11258 break; 11259 case 0: 11260 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0)); 11261 break; 11262 } 11263 11264 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd); 11265 } 11266 11267 /* Code which cannot be pushed to kernel.org e.g., cxgbtool ioctl helper 11268 * functions 11269 */ 11270 11271 /** 11272 * t4_read_pace_tbl - read the pace table 11273 * @adap: the adapter 11274 * @pace_vals: holds the returned values 11275 * 11276 * Returns the values of TP's pace table in microseconds. 11277 */ 11278 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 11279 { 11280 unsigned int i, v; 11281 11282 for (i = 0; i < NTX_SCHED; i++) { 11283 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 11284 v = t4_read_reg(adap, A_TP_PACE_TABLE); 11285 pace_vals[i] = dack_ticks_to_usec(adap, v); 11286 } 11287 } 11288 11289 /** 11290 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 11291 * @adap: the adapter 11292 * @sched: the scheduler index 11293 * @kbps: the byte rate in Kbps 11294 * @ipg: the interpacket delay in tenths of nanoseconds 11295 * @sleep_ok: if true we may sleep while awaiting command completion 11296 * 11297 * Return the current configuration of a HW Tx scheduler. 11298 */ 11299 void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 11300 unsigned int *ipg, bool sleep_ok) 11301 { 11302 unsigned int v, addr, bpt, cpt; 11303 11304 if (kbps) { 11305 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 11306 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 11307 if (sched & 1) 11308 v >>= 16; 11309 bpt = (v >> 8) & 0xff; 11310 cpt = v & 0xff; 11311 if (!cpt) 11312 *kbps = 0; /* scheduler disabled */ 11313 else { 11314 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 11315 *kbps = (v * bpt) / 125; 11316 } 11317 } 11318 if (ipg) { 11319 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 11320 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); 11321 if (sched & 1) 11322 v >>= 16; 11323 v &= 0xffff; 11324 *ipg = (10000 * v) / core_ticks_per_usec(adap); 11325 } 11326 } 11327 11328 /** 11329 * t4_load_cfg - download config file 11330 * @adap: the adapter 11331 * @cfg_data: the cfg text file to write 11332 * @size: text file size 11333 * 11334 * Write the supplied config text file to the card's serial flash. 11335 */ 11336 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 11337 { 11338 int ret, i, n, cfg_addr; 11339 unsigned int addr; 11340 unsigned int flash_cfg_start_sec; 11341 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 11342 11343 cfg_addr = t4_flash_cfg_addr(adap); 11344 if (cfg_addr < 0) 11345 return cfg_addr; 11346 11347 addr = cfg_addr; 11348 flash_cfg_start_sec = addr / SF_SEC_SIZE; 11349 11350 if (size > FLASH_CFG_MAX_SIZE) { 11351 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 11352 FLASH_CFG_MAX_SIZE); 11353 return -EFBIG; 11354 } 11355 11356 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 11357 sf_sec_size); 11358 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 11359 flash_cfg_start_sec + i - 1); 11360 /* 11361 * If size == 0 then we're simply erasing the FLASH sectors associated 11362 * with the on-adapter Firmware Configuration File. 11363 */ 11364 if (ret || size == 0) 11365 goto out; 11366 11367 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 11368 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 11369 if ( (size - i) < SF_PAGE_SIZE) 11370 n = size - i; 11371 else 11372 n = SF_PAGE_SIZE; 11373 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 11374 if (ret) 11375 goto out; 11376 11377 addr += SF_PAGE_SIZE; 11378 cfg_data += SF_PAGE_SIZE; 11379 } 11380 11381 out: 11382 if (ret) 11383 CH_ERR(adap, "config file %s failed %d\n", 11384 (size == 0 ? "clear" : "download"), ret); 11385 return ret; 11386 } 11387 11388 /** 11389 * t5_fw_init_extern_mem - initialize the external memory 11390 * @adap: the adapter 11391 * 11392 * Initializes the external memory on T5. 11393 */ 11394 int t5_fw_init_extern_mem(struct adapter *adap) 11395 { 11396 u32 params[1], val[1]; 11397 int ret; 11398 11399 if (!is_t5(adap->params.chip)) 11400 return 0; 11401 11402 val[0] = 0xff; /* Initialize all MCs */ 11403 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 11404 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); 11405 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, 11406 FW_CMD_MAX_TIMEOUT); 11407 11408 return ret; 11409 } 11410 11411 /* BIOS boot headers */ 11412 typedef struct pci_expansion_rom_header { 11413 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 11414 u8 reserved[22]; /* Reserved per processor Architecture data */ 11415 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 11416 } pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 11417 11418 /* Legacy PCI Expansion ROM Header */ 11419 typedef struct legacy_pci_expansion_rom_header { 11420 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 11421 u8 size512; /* Current Image Size in units of 512 bytes */ 11422 u8 initentry_point[4]; 11423 u8 cksum; /* Checksum computed on the entire Image */ 11424 u8 reserved[16]; /* Reserved */ 11425 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 11426 } legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 11427 11428 /* EFI PCI Expansion ROM Header */ 11429 typedef struct efi_pci_expansion_rom_header { 11430 u8 signature[2]; // ROM signature. The value 0xaa55 11431 u8 initialization_size[2]; /* Units 512. Includes this header */ 11432 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 11433 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 11434 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 11435 u8 compression_type[2]; /* Compression type. */ 11436 /* 11437 * Compression type definition 11438 * 0x0: uncompressed 11439 * 0x1: Compressed 11440 * 0x2-0xFFFF: Reserved 11441 */ 11442 u8 reserved[8]; /* Reserved */ 11443 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 11444 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 11445 } efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 11446 11447 /* PCI Data Structure Format */ 11448 typedef struct pcir_data_structure { /* PCI Data Structure */ 11449 u8 signature[4]; /* Signature. The string "PCIR" */ 11450 u8 vendor_id[2]; /* Vendor Identification */ 11451 u8 device_id[2]; /* Device Identification */ 11452 u8 vital_product[2]; /* Pointer to Vital Product Data */ 11453 u8 length[2]; /* PCIR Data Structure Length */ 11454 u8 revision; /* PCIR Data Structure Revision */ 11455 u8 class_code[3]; /* Class Code */ 11456 u8 image_length[2]; /* Image Length. Multiple of 512B */ 11457 u8 code_revision[2]; /* Revision Level of Code/Data */ 11458 u8 code_type; /* Code Type. */ 11459 /* 11460 * PCI Expansion ROM Code Types 11461 * 0x00: Intel IA-32, PC-AT compatible. Legacy 11462 * 0x01: Open Firmware standard for PCI. FCODE 11463 * 0x02: Hewlett-Packard PA RISC. HP reserved 11464 * 0x03: EFI Image. EFI 11465 * 0x04-0xFF: Reserved. 11466 */ 11467 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 11468 u8 reserved[2]; /* Reserved */ 11469 } pcir_data_t; /* PCI__DATA_STRUCTURE */ 11470 11471 /* BOOT constants */ 11472 enum { 11473 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 11474 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 11475 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 11476 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 11477 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 11478 VENDOR_ID = 0x1425, /* Vendor ID */ 11479 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 11480 }; 11481 11482 /* 11483 * modify_device_id - Modifies the device ID of the Boot BIOS image 11484 * @adatper: the device ID to write. 11485 * @boot_data: the boot image to modify. 11486 * 11487 * Write the supplied device ID to the boot BIOS image. 11488 */ 11489 static void modify_device_id(int device_id, u8 *boot_data) 11490 { 11491 legacy_pci_exp_rom_header_t *header; 11492 pcir_data_t *pcir_header; 11493 u32 cur_header = 0; 11494 11495 /* 11496 * Loop through all chained images and change the device ID's 11497 */ 11498 while (1) { 11499 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 11500 pcir_header = (pcir_data_t *) &boot_data[cur_header + 11501 le16_to_cpu(*(u16*)header->pcir_offset)]; 11502 11503 /* 11504 * Only modify the Device ID if code type is Legacy or HP. 11505 * 0x00: Okay to modify 11506 * 0x01: FCODE. Do not be modify 11507 * 0x03: Okay to modify 11508 * 0x04-0xFF: Do not modify 11509 */ 11510 if (pcir_header->code_type == 0x00) { 11511 u8 csum = 0; 11512 int i; 11513 11514 /* 11515 * Modify Device ID to match current adatper 11516 */ 11517 *(u16*) pcir_header->device_id = device_id; 11518 11519 /* 11520 * Set checksum temporarily to 0. 11521 * We will recalculate it later. 11522 */ 11523 header->cksum = 0x0; 11524 11525 /* 11526 * Calculate and update checksum 11527 */ 11528 for (i = 0; i < (header->size512 * 512); i++) 11529 csum += (u8)boot_data[cur_header + i]; 11530 11531 /* 11532 * Invert summed value to create the checksum 11533 * Writing new checksum value directly to the boot data 11534 */ 11535 boot_data[cur_header + 7] = -csum; 11536 11537 } else if (pcir_header->code_type == 0x03) { 11538 11539 /* 11540 * Modify Device ID to match current adatper 11541 */ 11542 *(u16*) pcir_header->device_id = device_id; 11543 11544 } 11545 11546 11547 /* 11548 * Check indicator element to identify if this is the last 11549 * image in the ROM. 11550 */ 11551 if (pcir_header->indicator & 0x80) 11552 break; 11553 11554 /* 11555 * Move header pointer up to the next image in the ROM. 11556 */ 11557 cur_header += header->size512 * 512; 11558 } 11559 } 11560 11561 #ifdef CHELSIO_T4_DIAGS 11562 /* 11563 * t4_earse_sf - Erase entire serial Flash region 11564 * @adapter: the adapter 11565 * 11566 * Clears the entire serial flash region. 11567 */ 11568 int t4_erase_sf(struct adapter *adap) 11569 { 11570 unsigned int nsectors; 11571 int ret; 11572 11573 nsectors = FLASH_END_SEC; 11574 if (nsectors > adap->params.sf_nsec) 11575 nsectors = adap->params.sf_nsec; 11576 11577 // Erase all sectors of flash before and including the FW. 11578 // Flash layout is in t4_hw.h. 11579 ret = t4_flash_erase_sectors(adap, 0, nsectors - 1); 11580 if (ret) 11581 CH_ERR(adap, "Erasing serial flash failed, error %d\n", ret); 11582 return ret; 11583 } 11584 #endif 11585 11586 /* 11587 * t4_load_boot - download boot flash 11588 * @adapter: the adapter 11589 * @boot_data: the boot image to write 11590 * @boot_addr: offset in flash to write boot_data 11591 * @size: image size 11592 * 11593 * Write the supplied boot image to the card's serial flash. 11594 * The boot image has the following sections: a 28-byte header and the 11595 * boot image. 11596 */ 11597 int t4_load_boot(struct adapter *adap, u8 *boot_data, 11598 unsigned int boot_addr, unsigned int size) 11599 { 11600 pci_exp_rom_header_t *header; 11601 int pcir_offset ; 11602 pcir_data_t *pcir_header; 11603 int ret, addr; 11604 uint16_t device_id; 11605 unsigned int i; 11606 unsigned int boot_sector = (boot_addr * 1024 ); 11607 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 11608 11609 /* 11610 * Make sure the boot image does not encroach on the firmware region 11611 */ 11612 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 11613 CH_ERR(adap, "boot image encroaching on firmware region\n"); 11614 return -EFBIG; 11615 } 11616 11617 /* 11618 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, 11619 * and Boot configuration data sections. These 3 boot sections span 11620 * sectors 0 to 7 in flash and live right before the FW image location. 11621 */ 11622 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, 11623 sf_sec_size); 11624 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 11625 (boot_sector >> 16) + i - 1); 11626 11627 /* 11628 * If size == 0 then we're simply erasing the FLASH sectors associated 11629 * with the on-adapter option ROM file 11630 */ 11631 if (ret || (size == 0)) 11632 goto out; 11633 11634 /* Get boot header */ 11635 header = (pci_exp_rom_header_t *)boot_data; 11636 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 11637 /* PCIR Data Structure */ 11638 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 11639 11640 /* 11641 * Perform some primitive sanity testing to avoid accidentally 11642 * writing garbage over the boot sectors. We ought to check for 11643 * more but it's not worth it for now ... 11644 */ 11645 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 11646 CH_ERR(adap, "boot image too small/large\n"); 11647 return -EFBIG; 11648 } 11649 11650 #ifndef CHELSIO_T4_DIAGS 11651 /* 11652 * Check BOOT ROM header signature 11653 */ 11654 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 11655 CH_ERR(adap, "Boot image missing signature\n"); 11656 return -EINVAL; 11657 } 11658 11659 /* 11660 * Check PCI header signature 11661 */ 11662 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 11663 CH_ERR(adap, "PCI header missing signature\n"); 11664 return -EINVAL; 11665 } 11666 11667 /* 11668 * Check Vendor ID matches Chelsio ID 11669 */ 11670 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 11671 CH_ERR(adap, "Vendor ID missing signature\n"); 11672 return -EINVAL; 11673 } 11674 #endif 11675 11676 /* 11677 * Retrieve adapter's device ID 11678 */ 11679 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 11680 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 11681 device_id = device_id & 0xf0ff; 11682 11683 /* 11684 * Check PCIE Device ID 11685 */ 11686 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 11687 /* 11688 * Change the device ID in the Boot BIOS image to match 11689 * the Device ID of the current adapter. 11690 */ 11691 modify_device_id(device_id, boot_data); 11692 } 11693 11694 /* 11695 * Skip over the first SF_PAGE_SIZE worth of data and write it after 11696 * we finish copying the rest of the boot image. This will ensure 11697 * that the BIOS boot header will only be written if the boot image 11698 * was written in full. 11699 */ 11700 addr = boot_sector; 11701 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 11702 addr += SF_PAGE_SIZE; 11703 boot_data += SF_PAGE_SIZE; 11704 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 11705 if (ret) 11706 goto out; 11707 } 11708 11709 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, 11710 (const u8 *)header, 0); 11711 11712 out: 11713 if (ret) 11714 CH_ERR(adap, "boot image download failed, error %d\n", ret); 11715 return ret; 11716 } 11717 11718 /* 11719 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration 11720 * @adapter: the adapter 11721 * 11722 * Return the address within the flash where the OptionROM Configuration 11723 * is stored, or an error if the device FLASH is too small to contain 11724 * a OptionROM Configuration. 11725 */ 11726 static int t4_flash_bootcfg_addr(struct adapter *adapter) 11727 { 11728 /* 11729 * If the device FLASH isn't large enough to hold a Firmware 11730 * Configuration File, return an error. 11731 */ 11732 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) 11733 return -ENOSPC; 11734 11735 return FLASH_BOOTCFG_START; 11736 } 11737 11738 int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) 11739 { 11740 int ret, i, n, cfg_addr; 11741 unsigned int addr; 11742 unsigned int flash_cfg_start_sec; 11743 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 11744 11745 cfg_addr = t4_flash_bootcfg_addr(adap); 11746 if (cfg_addr < 0) 11747 return cfg_addr; 11748 11749 addr = cfg_addr; 11750 flash_cfg_start_sec = addr / SF_SEC_SIZE; 11751 11752 if (size > FLASH_BOOTCFG_MAX_SIZE) { 11753 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", 11754 FLASH_BOOTCFG_MAX_SIZE); 11755 return -EFBIG; 11756 } 11757 11758 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ 11759 sf_sec_size); 11760 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 11761 flash_cfg_start_sec + i - 1); 11762 11763 /* 11764 * If size == 0 then we're simply erasing the FLASH sectors associated 11765 * with the on-adapter OptionROM Configuration File. 11766 */ 11767 if (ret || size == 0) 11768 goto out; 11769 11770 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 11771 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 11772 if ( (size - i) < SF_PAGE_SIZE) 11773 n = size - i; 11774 else 11775 n = SF_PAGE_SIZE; 11776 ret = t4_write_flash(adap, addr, n, cfg_data, 0); 11777 if (ret) 11778 goto out; 11779 11780 addr += SF_PAGE_SIZE; 11781 cfg_data += SF_PAGE_SIZE; 11782 } 11783 11784 out: 11785 if (ret) 11786 CH_ERR(adap, "boot config data %s failed %d\n", 11787 (size == 0 ? "clear" : "download"), ret); 11788 return ret; 11789 } 11790 11791 /** 11792 * t4_read_bootcfg - read the current (boot)OptionROM configuration from FLASH 11793 * @adap: the adapter 11794 * @cfg_data: where to store the read OptionROM configuration data 11795 * 11796 * Read the current OptionROM configuration from FLASH and write to the 11797 * buffer @cfg_data supplied. 11798 */ 11799 int t4_read_bootcfg(struct adapter *adap, u8 *cfg_data, unsigned int size) 11800 { 11801 u32 *ptr = (u32 *)cfg_data; 11802 int i, n, cfg_addr; 11803 int ret = 0; 11804 11805 if (size > FLASH_BOOTCFG_MAX_SIZE) { 11806 CH_ERR(adap, "bootcfg file too big, max is %u bytes\n", 11807 FLASH_BOOTCFG_MAX_SIZE); 11808 return -EINVAL; 11809 } 11810 11811 cfg_addr = t4_flash_bootcfg_addr(adap); 11812 if (cfg_addr < 0) 11813 return cfg_addr; 11814 11815 size = size / sizeof (u32); 11816 for (i = 0; i < size; i += SF_PAGE_SIZE) { 11817 if ( (size - i) < SF_PAGE_SIZE) 11818 n = size - i; 11819 else 11820 n = SF_PAGE_SIZE; 11821 11822 ret = t4_read_flash(adap, cfg_addr, n, ptr, 0); 11823 if (ret) 11824 goto out; 11825 11826 cfg_addr += (n*4); 11827 ptr += n; 11828 } 11829 11830 out: 11831 return ret; 11832 } 11833 11834 /** 11835 * t4_set_filter_mode - configure the optional components of filter tuples 11836 * @adap: the adapter 11837 * @mode_map: a bitmap selcting which optional filter components to enable 11838 * @sleep_ok: if true we may sleep while awaiting command completion 11839 * 11840 * Sets the filter mode by selecting the optional components to enable 11841 * in filter tuples. Returns 0 on success and a negative error if the 11842 * requested mode needs more bits than are available for optional 11843 * components. 11844 */ 11845 int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map, 11846 bool sleep_ok) 11847 { 11848 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 11849 11850 int i, nbits = 0; 11851 11852 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 11853 if (mode_map & (1 << i)) 11854 nbits += width[i]; 11855 if (nbits > FILTER_OPT_LEN) 11856 return -EINVAL; 11857 11858 t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok); 11859 11860 return 0; 11861 } 11862 11863 /** 11864 * t4_clr_port_stats - clear port statistics 11865 * @adap: the adapter 11866 * @idx: the port index 11867 * 11868 * Clear HW statistics for the given port. 11869 */ 11870 void t4_clr_port_stats(struct adapter *adap, int idx) 11871 { 11872 unsigned int i; 11873 u32 bgmap = t4_get_mps_bg_map(adap, idx); 11874 u32 port_base_addr; 11875 11876 if (is_t4(adap->params.chip)) 11877 port_base_addr = PORT_BASE(idx); 11878 else 11879 port_base_addr = T5_PORT_BASE(idx); 11880 11881 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 11882 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 11883 t4_write_reg(adap, port_base_addr + i, 0); 11884 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 11885 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 11886 t4_write_reg(adap, port_base_addr + i, 0); 11887 for (i = 0; i < 4; i++) 11888 if (bgmap & (1 << i)) { 11889 t4_write_reg(adap, 11890 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 11891 t4_write_reg(adap, 11892 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 11893 } 11894 } 11895 11896 /** 11897 * t4_i2c_io - read/write I2C data from adapter 11898 * @adap: the adapter 11899 * @port: Port number if per-port device; <0 if not 11900 * @devid: per-port device ID or absolute device ID 11901 * @offset: byte offset into device I2C space 11902 * @len: byte length of I2C space data 11903 * @buf: buffer in which to return I2C data for read 11904 * buffer which holds the I2C data for write 11905 * @write: if true, do a write; else do a read 11906 * Reads/Writes the I2C data from/to the indicated device and location. 11907 */ 11908 int t4_i2c_io(struct adapter *adap, unsigned int mbox, 11909 int port, unsigned int devid, 11910 unsigned int offset, unsigned int len, 11911 u8 *buf, bool write) 11912 { 11913 struct fw_ldst_cmd ldst_cmd, ldst_rpl; 11914 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data); 11915 int ret = 0; 11916 11917 if (len > I2C_PAGE_SIZE) 11918 return -EINVAL; 11919 11920 /* Dont allow reads that spans multiple pages */ 11921 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE) 11922 return -EINVAL; 11923 11924 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 11925 ldst_cmd.op_to_addrspace = 11926 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 11927 F_FW_CMD_REQUEST | 11928 (write ? F_FW_CMD_WRITE : F_FW_CMD_READ) | 11929 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C)); 11930 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 11931 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port); 11932 ldst_cmd.u.i2c.did = devid; 11933 11934 while (len > 0) { 11935 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max; 11936 11937 ldst_cmd.u.i2c.boffset = offset; 11938 ldst_cmd.u.i2c.blen = i2c_len; 11939 11940 if (write) 11941 memcpy(ldst_cmd.u.i2c.data, buf, i2c_len); 11942 11943 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd), 11944 write ? NULL : &ldst_rpl); 11945 if (ret) 11946 break; 11947 11948 if (!write) 11949 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len); 11950 offset += i2c_len; 11951 buf += i2c_len; 11952 len -= i2c_len; 11953 } 11954 11955 return ret; 11956 } 11957 11958 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 11959 int port, unsigned int devid, 11960 unsigned int offset, unsigned int len, 11961 u8 *buf) 11962 { 11963 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, false); 11964 } 11965 11966 int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 11967 int port, unsigned int devid, 11968 unsigned int offset, unsigned int len, 11969 u8 *buf) 11970 { 11971 return t4_i2c_io(adap, mbox, port, devid, offset, len, buf, true); 11972 } 11973 11974 /** 11975 * t4_sge_ctxt_rd - read an SGE context through FW 11976 * @adap: the adapter 11977 * @mbox: mailbox to use for the FW command 11978 * @cid: the context id 11979 * @ctype: the context type 11980 * @data: where to store the context data 11981 * 11982 * Issues a FW command through the given mailbox to read an SGE context. 11983 */ 11984 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 11985 enum ctxt_type ctype, u32 *data) 11986 { 11987 int ret; 11988 struct fw_ldst_cmd c; 11989 11990 if (ctype == CTXT_EGRESS) 11991 ret = FW_LDST_ADDRSPC_SGE_EGRC; 11992 else if (ctype == CTXT_INGRESS) 11993 ret = FW_LDST_ADDRSPC_SGE_INGC; 11994 else if (ctype == CTXT_FLM) 11995 ret = FW_LDST_ADDRSPC_SGE_FLMC; 11996 else 11997 ret = FW_LDST_ADDRSPC_SGE_CONMC; 11998 11999 memset(&c, 0, sizeof(c)); 12000 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 12001 F_FW_CMD_REQUEST | F_FW_CMD_READ | 12002 V_FW_LDST_CMD_ADDRSPACE(ret)); 12003 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 12004 c.u.idctxt.physid = cpu_to_be32(cid); 12005 12006 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12007 if (ret == 0) { 12008 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); 12009 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); 12010 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); 12011 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); 12012 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); 12013 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); 12014 } 12015 return ret; 12016 } 12017 12018 /** 12019 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 12020 * @adap: the adapter 12021 * @cid: the context id 12022 * @ctype: the context type 12023 * @data: where to store the context data 12024 * 12025 * Reads an SGE context directly, bypassing FW. This is only for 12026 * debugging when FW is unavailable. 12027 */ 12028 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 12029 u32 *data) 12030 { 12031 int i, ret; 12032 12033 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 12034 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 12035 if (!ret) 12036 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 12037 *data++ = t4_read_reg(adap, i); 12038 return ret; 12039 } 12040 12041 int t4_sched_config(struct adapter *adapter, int type, int minmaxen) 12042 { 12043 struct fw_sched_cmd cmd; 12044 12045 memset(&cmd, 0, sizeof(cmd)); 12046 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12047 F_FW_CMD_REQUEST | 12048 F_FW_CMD_WRITE); 12049 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12050 12051 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 12052 cmd.u.config.type = type; 12053 cmd.u.config.minmaxen = minmaxen; 12054 12055 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12056 NULL, 1); 12057 } 12058 12059 int t4_sched_params(struct adapter *adapter, 12060 int channel, int cls, 12061 int level, int mode, int type, 12062 int rateunit, int ratemode, 12063 int minrate, int maxrate, int weight, 12064 int pktsize, int burstsize) 12065 { 12066 struct fw_sched_cmd cmd; 12067 12068 memset(&cmd, 0, sizeof(cmd)); 12069 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12070 F_FW_CMD_REQUEST | 12071 F_FW_CMD_WRITE); 12072 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12073 12074 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 12075 cmd.u.params.type = type; 12076 cmd.u.params.level = level; 12077 cmd.u.params.mode = mode; 12078 cmd.u.params.ch = channel; 12079 cmd.u.params.cl = cls; 12080 cmd.u.params.unit = rateunit; 12081 cmd.u.params.rate = ratemode; 12082 cmd.u.params.min = cpu_to_be32(minrate); 12083 cmd.u.params.max = cpu_to_be32(maxrate); 12084 cmd.u.params.weight = cpu_to_be16(weight); 12085 cmd.u.params.pktsize = cpu_to_be16(pktsize); 12086 cmd.u.params.burstsize = cpu_to_be16(burstsize); 12087 12088 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 12089 NULL, 1); 12090 } 12091 12092 int t4_read_sched_params(struct adapter *adapter, 12093 int channel, int cls, 12094 int *level, int *mode, int *type, 12095 int *rateunit, int *ratemode, 12096 int *minrate, int *maxrate, int *weight, 12097 int *pktsize, int *burstsize) 12098 { 12099 struct fw_sched_cmd cmd; 12100 int ret = 0; 12101 12102 memset(&cmd, 0, sizeof(cmd)); 12103 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 12104 F_FW_CMD_REQUEST | 12105 F_FW_CMD_READ); 12106 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 12107 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 12108 cmd.u.params.ch = channel; 12109 cmd.u.params.cl = cls; 12110 12111 ret = t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), 12112 &cmd, 1); 12113 if (ret) 12114 goto out; 12115 12116 *level = cmd.u.params.level; 12117 *mode = cmd.u.params.mode; 12118 *type = cmd.u.params.type; 12119 *rateunit = cmd.u.params.unit; 12120 *ratemode = cmd.u.params.rate; 12121 *minrate = be32_to_cpu(cmd.u.params.min); 12122 *maxrate = be32_to_cpu(cmd.u.params.max); 12123 *weight = be16_to_cpu(cmd.u.params.weight); 12124 *pktsize = be16_to_cpu(cmd.u.params.pktsize); 12125 *burstsize = be16_to_cpu(cmd.u.params.burstsize); 12126 12127 out: 12128 return ret; 12129 } 12130 12131 /* 12132 * t4_config_watchdog - configure (enable/disable) a watchdog timer 12133 * @adapter: the adapter 12134 * @mbox: mailbox to use for the FW command 12135 * @pf: the PF owning the queue 12136 * @vf: the VF owning the queue 12137 * @timeout: watchdog timeout in ms 12138 * @action: watchdog timer / action 12139 * 12140 * There are separate watchdog timers for each possible watchdog 12141 * action. Configure one of the watchdog timers by setting a non-zero 12142 * timeout. Disable a watchdog timer by using a timeout of zero. 12143 */ 12144 int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, 12145 unsigned int pf, unsigned int vf, 12146 unsigned int timeout, unsigned int action) 12147 { 12148 struct fw_watchdog_cmd wdog; 12149 unsigned int ticks; 12150 12151 /* 12152 * The watchdog command expects a timeout in units of 10ms so we need 12153 * to convert it here (via rounding) and force a minimum of one 10ms 12154 * "tick" if the timeout is non-zero but the convertion results in 0 12155 * ticks. 12156 */ 12157 ticks = (timeout + 5)/10; 12158 if (timeout && !ticks) 12159 ticks = 1; 12160 12161 memset(&wdog, 0, sizeof wdog); 12162 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | 12163 F_FW_CMD_REQUEST | 12164 F_FW_CMD_WRITE | 12165 V_FW_PARAMS_CMD_PFN(pf) | 12166 V_FW_PARAMS_CMD_VFN(vf)); 12167 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); 12168 wdog.timeout = cpu_to_be32(ticks); 12169 wdog.action = cpu_to_be32(action); 12170 12171 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); 12172 } 12173 12174 int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) 12175 { 12176 struct fw_devlog_cmd devlog_cmd; 12177 int ret; 12178 12179 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 12180 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 12181 F_FW_CMD_REQUEST | F_FW_CMD_READ); 12182 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 12183 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 12184 sizeof(devlog_cmd), &devlog_cmd); 12185 if (ret) 12186 return ret; 12187 12188 *level = devlog_cmd.level; 12189 return 0; 12190 } 12191 12192 int t4_set_devlog_level(struct adapter *adapter, unsigned int level) 12193 { 12194 struct fw_devlog_cmd devlog_cmd; 12195 12196 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 12197 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 12198 F_FW_CMD_REQUEST | 12199 F_FW_CMD_WRITE); 12200 devlog_cmd.level = level; 12201 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 12202 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 12203 sizeof(devlog_cmd), &devlog_cmd); 12204 } 12205 12206 int t4_configure_add_smac(struct adapter *adap) 12207 { 12208 unsigned int param, val; 12209 int ret = 0; 12210 12211 adap->params.smac_add_support = 0; 12212 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 12213 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_ADD_SMAC)); 12214 /* Query FW to check if FW supports adding source mac address 12215 * to TCAM feature or not. 12216 * If FW returns 1, driver can use this feature and driver need to send 12217 * FW_PARAMS_PARAM_DEV_ADD_SMAC write command with value 1 to 12218 * enable adding smac to TCAM. 12219 */ 12220 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12221 if (ret) 12222 return ret; 12223 12224 if (val == 1) { 12225 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, 12226 ¶m, &val); 12227 if (!ret) 12228 /* Firmware allows adding explicit TCAM entries. 12229 * Save this internally. 12230 */ 12231 adap->params.smac_add_support = 1; 12232 } 12233 12234 return ret; 12235 } 12236 12237 int t4_configure_ringbb(struct adapter *adap) 12238 { 12239 unsigned int param, val; 12240 int ret = 0; 12241 12242 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 12243 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RING_BACKBONE)); 12244 /* Query FW to check if FW supports ring switch feature or not. 12245 * If FW returns 1, driver can use this feature and driver need to send 12246 * FW_PARAMS_PARAM_DEV_RING_BACKBONE write command with value 1 to 12247 * enable the ring backbone configuration. 12248 */ 12249 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12250 if (ret < 0) { 12251 CH_ERR(adap, "Querying FW using Ring backbone params command failed, err=%d\n", 12252 ret); 12253 goto out; 12254 } 12255 12256 if (val != 1) { 12257 CH_ERR(adap, "FW doesnot support ringbackbone features\n"); 12258 goto out; 12259 } 12260 12261 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); 12262 if (ret < 0) { 12263 CH_ERR(adap, "Could not set Ringbackbone, err= %d\n", 12264 ret); 12265 goto out; 12266 } 12267 12268 out: 12269 return ret; 12270 } 12271 12272 /* 12273 * t4_set_vlan_acl - Set a VLAN id for the specified VF 12274 * @adapter: the adapter 12275 * @mbox: mailbox to use for the FW command 12276 * @vf: one of the VFs instantiated by the specified PF 12277 * @vlan: The vlanid to be set 12278 * 12279 */ 12280 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, 12281 u16 vlan) 12282 { 12283 struct fw_acl_vlan_cmd vlan_cmd; 12284 unsigned int enable; 12285 12286 enable = (vlan ? F_FW_ACL_VLAN_CMD_EN : 0); 12287 memset(&vlan_cmd, 0, sizeof(vlan_cmd)); 12288 vlan_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_VLAN_CMD) | 12289 F_FW_CMD_REQUEST | 12290 F_FW_CMD_WRITE | 12291 F_FW_CMD_EXEC | 12292 V_FW_ACL_VLAN_CMD_PFN(adap->pf) | 12293 V_FW_ACL_VLAN_CMD_VFN(vf)); 12294 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd)); 12295 /* Drop all packets that donot match vlan id */ 12296 vlan_cmd.dropnovlan_fm = (enable 12297 ? (F_FW_ACL_VLAN_CMD_DROPNOVLAN | 12298 F_FW_ACL_VLAN_CMD_FM) 12299 : 0); 12300 if (enable != 0) { 12301 vlan_cmd.nvlan = 1; 12302 vlan_cmd.vlanid[0] = cpu_to_be16(vlan); 12303 } 12304 12305 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL); 12306 } 12307 12308 /** 12309 * t4_del_mac - Removes the exact-match filter for a MAC address 12310 * @adap: the adapter 12311 * @mbox: mailbox to use for the FW command 12312 * @viid: the VI id 12313 * @addr: the MAC address value 12314 * @smac: if true, delete from only the smac region of MPS 12315 * 12316 * Modifies an exact-match filter and sets it to the new MAC address if 12317 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 12318 * latter case the address is added persistently if @persist is %true. 12319 * 12320 * Returns a negative error number or the index of the filter with the new 12321 * MAC value. Note that this index may differ from @idx. 12322 */ 12323 int t4_del_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 12324 const u8 *addr, bool smac) 12325 { 12326 int ret; 12327 struct fw_vi_mac_cmd c; 12328 struct fw_vi_mac_exact *p = c.u.exact; 12329 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size; 12330 12331 memset(&c, 0, sizeof(c)); 12332 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 12333 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 12334 V_FW_VI_MAC_CMD_VIID(viid)); 12335 c.freemacs_to_len16 = cpu_to_be32( 12336 V_FW_CMD_LEN16(1) | 12337 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0)); 12338 12339 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 12340 p->valid_to_idx = cpu_to_be16( 12341 F_FW_VI_MAC_CMD_VALID | 12342 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_MAC_BASED_FREE)); 12343 12344 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12345 if (ret == 0) { 12346 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 12347 if (ret < max_mac_addr) 12348 return -ENOMEM; 12349 } 12350 12351 return ret; 12352 } 12353 12354 /** 12355 * t4_add_mac - Adds an exact-match filter for a MAC address 12356 * @adap: the adapter 12357 * @mbox: mailbox to use for the FW command 12358 * @viid: the VI id 12359 * @idx: index of existing filter for old value of MAC address, or -1 12360 * @addr: the new MAC address value 12361 * @persist: whether a new MAC allocation should be persistent 12362 * @add_smt: if true also add the address to the HW SMT 12363 * @smac: if true, update only the smac region of MPS 12364 * 12365 * Modifies an exact-match filter and sets it to the new MAC address if 12366 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 12367 * latter case the address is added persistently if @persist is %true. 12368 * 12369 * Returns a negative error number or the index of the filter with the new 12370 * MAC value. Note that this index may differ from @idx. 12371 */ 12372 int t4_add_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 12373 int idx, const u8 *addr, bool persist, u8 *smt_idx, bool smac) 12374 { 12375 int ret, mode; 12376 struct fw_vi_mac_cmd c; 12377 struct fw_vi_mac_exact *p = c.u.exact; 12378 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size; 12379 12380 if (idx < 0) /* new allocation */ 12381 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 12382 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 12383 12384 memset(&c, 0, sizeof(c)); 12385 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 12386 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 12387 V_FW_VI_MAC_CMD_VIID(viid)); 12388 c.freemacs_to_len16 = cpu_to_be32( 12389 V_FW_CMD_LEN16(1) | 12390 (smac ? F_FW_VI_MAC_CMD_IS_SMAC : 0)); 12391 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 12392 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 12393 V_FW_VI_MAC_CMD_IDX(idx)); 12394 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 12395 12396 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 12397 if (ret == 0) { 12398 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 12399 if (ret >= max_mac_addr) 12400 return -ENOMEM; 12401 if (smt_idx) { 12402 /* Does fw supports returning smt_idx? */ 12403 if (adap->params.viid_smt_extn_support) 12404 *smt_idx = G_FW_VI_MAC_CMD_SMTID(be32_to_cpu(c.op_to_viid)); 12405 else { 12406 /* In T4/T5, SMT contains 256 SMAC entries 12407 * organized in 128 rows of 2 entries each. 12408 * In T6, SMT contains 256 SMAC entries in 12409 * 256 rows. 12410 */ 12411 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 12412 *smt_idx = ((viid & M_FW_VIID_VIN) << 1); 12413 else 12414 *smt_idx = (viid & M_FW_VIID_VIN); 12415 } 12416 } 12417 } 12418 12419 return ret; 12420 } 12421 12422