156b2bddGireesh Nagabhushana/* 256b2bddGireesh Nagabhushana * This file and its contents are supplied under the terms of the 356b2bddGireesh Nagabhushana * Common Development and Distribution License ("CDDL"), version 1.0. 456b2bddGireesh Nagabhushana * You may only use this file in accordance with the terms of version 556b2bddGireesh Nagabhushana * 1.0 of the CDDL. 656b2bddGireesh Nagabhushana * 756b2bddGireesh Nagabhushana * A full copy of the text of the CDDL should have accompanied this 856b2bddGireesh Nagabhushana * source. A copy of the CDDL is also available via the Internet at 956b2bddGireesh Nagabhushana * http://www.illumos.org/license/CDDL. 1056b2bddGireesh Nagabhushana */ 1156b2bddGireesh Nagabhushana 1256b2bddGireesh Nagabhushana/* 133dde7c9Vishal Kulkarni * This file is part of the Chelsio T4/T5/T6 Ethernet driver. 1456b2bddGireesh Nagabhushana * 157e6ad46Vishal Kulkarni * Copyright (C) 2003-2019 Chelsio Communications. All rights reserved. 1656b2bddGireesh Nagabhushana * 1756b2bddGireesh Nagabhushana * This program is distributed in the hope that it will be useful, but WITHOUT 1856b2bddGireesh Nagabhushana * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1956b2bddGireesh Nagabhushana * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this 2056b2bddGireesh Nagabhushana * release for licensing terms and conditions. 2156b2bddGireesh Nagabhushana */ 223dde7c9Vishal Kulkarni 23d77e6e0Paul Winder/* 24d77e6e0Paul Winder * Copyright 2020 RackTop Systems, Inc. 25d77e6e0Paul Winder */ 26d77e6e0Paul Winder 2756b2bddGireesh Nagabhushana#include "common.h" 2856b2bddGireesh Nagabhushana#include "t4_regs.h" 2956b2bddGireesh Nagabhushana#include "t4_regs_values.h" 3056b2bddGireesh Nagabhushana#include "t4fw_interface.h" 3156b2bddGireesh Nagabhushana 323dde7c9Vishal Kulkarni/** 3356b2bddGireesh Nagabhushana * t4_wait_op_done_val - wait until an operation is completed 3456b2bddGireesh Nagabhushana * @adapter: the adapter performing the operation 3556b2bddGireesh Nagabhushana * @reg: the register to check for completion 3656b2bddGireesh Nagabhushana * @mask: a single-bit field within @reg that indicates completion 3756b2bddGireesh Nagabhushana * @polarity: the value of the field when the operation is completed 3856b2bddGireesh Nagabhushana * @attempts: number of check iterations 3956b2bddGireesh Nagabhushana * @delay: delay in usecs between iterations 4056b2bddGireesh Nagabhushana * @valp: where to store the value of the register at completion time 4156b2bddGireesh Nagabhushana * 4256b2bddGireesh Nagabhushana * Wait until an operation is completed by checking a bit in a register 4356b2bddGireesh Nagabhushana * up to @attempts times. If @valp is not NULL the value of the register 4456b2bddGireesh Nagabhushana * at the time it indicated completion is stored there. Returns 0 if the 4556b2bddGireesh Nagabhushana * operation completes and -EAGAIN otherwise. 4656b2bddGireesh Nagabhushana */ 473dde7c9Vishal Kulkarnistatic int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 483dde7c9Vishal Kulkarni int polarity, int attempts, int delay, u32 *valp) 4956b2bddGireesh Nagabhushana{ 5056b2bddGireesh Nagabhushana while (1) { 5156b2bddGireesh Nagabhushana u32 val = t4_read_reg(adapter, reg); 5256b2bddGireesh Nagabhushana 5356b2bddGireesh Nagabhushana if (!!(val & mask) == polarity) { 543dde7c9Vishal Kulkarni if (valp) 5556b2bddGireesh Nagabhushana *valp = val; 563dde7c9Vishal Kulkarni return 0; 5756b2bddGireesh Nagabhushana } 583dde7c9Vishal Kulkarni if (--attempts == 0) 593dde7c9Vishal Kulkarni return -EAGAIN; 603dde7c9Vishal Kulkarni if (delay) 61de48325Vishal Kulkarni udelay(delay); 6256b2bddGireesh Nagabhushana } 633dde7c9Vishal Kulkarni} 6456b2bddGireesh Nagabhushana 653dde7c9Vishal Kulkarnistatic inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 663dde7c9Vishal Kulkarni int polarity, int attempts, int delay) 673dde7c9Vishal Kulkarni{ 683dde7c9Vishal Kulkarni return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 693dde7c9Vishal Kulkarni delay, NULL); 7056b2bddGireesh Nagabhushana} 7156b2bddGireesh Nagabhushana 723dde7c9Vishal Kulkarni/** 7356b2bddGireesh Nagabhushana * t4_set_reg_field - set a register field to a value 7456b2bddGireesh Nagabhushana * @adapter: the adapter to program 7556b2bddGireesh Nagabhushana * @addr: the register address 7656b2bddGireesh Nagabhushana * @mask: specifies the portion of the register to modify 7756b2bddGireesh Nagabhushana * @val: the new value for the register field 7856b2bddGireesh Nagabhushana * 7956b2bddGireesh Nagabhushana * Sets a register field specified by the supplied mask to the 8056b2bddGireesh Nagabhushana * given value. 8156b2bddGireesh Nagabhushana */ 823dde7c9Vishal Kulkarnivoid t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 833dde7c9Vishal Kulkarni u32 val) 8456b2bddGireesh Nagabhushana{ 8556b2bddGireesh Nagabhushana u32 v = t4_read_reg(adapter, addr) & ~mask; 8656b2bddGireesh Nagabhushana 8756b2bddGireesh Nagabhushana t4_write_reg(adapter, addr, v | val); 883dde7c9Vishal Kulkarni (void) t4_read_reg(adapter, addr); /* flush */ 8956b2bddGireesh Nagabhushana} 9056b2bddGireesh Nagabhushana 913dde7c9Vishal Kulkarni/** 9256b2bddGireesh Nagabhushana * t4_read_indirect - read indirectly addressed registers 9356b2bddGireesh Nagabhushana * @adap: the adapter 9456b2bddGireesh Nagabhushana * @addr_reg: register holding the indirect address 9556b2bddGireesh Nagabhushana * @data_reg: register holding the value of the indirect register 9656b2bddGireesh Nagabhushana * @vals: where the read register values are stored 9756b2bddGireesh Nagabhushana * @nregs: how many indirect registers to read 9856b2bddGireesh Nagabhushana * @start_idx: index of first indirect register to read 9956b2bddGireesh Nagabhushana * 10056b2bddGireesh Nagabhushana * Reads registers that are accessed indirectly through an address/data 10156b2bddGireesh Nagabhushana * register pair. 10256b2bddGireesh Nagabhushana */ 1033dde7c9Vishal Kulkarnivoid t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 1043dde7c9Vishal Kulkarni unsigned int data_reg, u32 *vals, 1053dde7c9Vishal Kulkarni unsigned int nregs, unsigned int start_idx) 10656b2bddGireesh Nagabhushana{ 10756b2bddGireesh Nagabhushana while (nregs--) { 10856b2bddGireesh Nagabhushana t4_write_reg(adap, addr_reg, start_idx); 10956b2bddGireesh Nagabhushana *vals++ = t4_read_reg(adap, data_reg); 11056b2bddGireesh Nagabhushana start_idx++; 11156b2bddGireesh Nagabhushana } 11256b2bddGireesh Nagabhushana} 11356b2bddGireesh Nagabhushana 1143dde7c9Vishal Kulkarni/** 11556b2bddGireesh Nagabhushana * t4_write_indirect - write indirectly addressed registers 11656b2bddGireesh Nagabhushana * @adap: the adapter 11756b2bddGireesh Nagabhushana * @addr_reg: register holding the indirect addresses 11856b2bddGireesh Nagabhushana * @data_reg: register holding the value for the indirect registers 11956b2bddGireesh Nagabhushana * @vals: values to write 12056b2bddGireesh Nagabhushana * @nregs: how many indirect registers to write 12156b2bddGireesh Nagabhushana * @start_idx: address of first indirect register to write 12256b2bddGireesh Nagabhushana * 12356b2bddGireesh Nagabhushana * Writes a sequential block of registers that are accessed indirectly 12456b2bddGireesh Nagabhushana * through an address/data register pair. 12556b2bddGireesh Nagabhushana */ 1263dde7c9Vishal Kulkarnivoid t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 1273dde7c9Vishal Kulkarni unsigned int data_reg, const u32 *vals, 1283dde7c9Vishal Kulkarni unsigned int nregs, unsigned int start_idx) 12956b2bddGireesh Nagabhushana{ 13056b2bddGireesh Nagabhushana while (nregs--) { 13156b2bddGireesh Nagabhushana t4_write_reg(adap, addr_reg, start_idx++); 13256b2bddGireesh Nagabhushana t4_write_reg(adap, data_reg, *vals++); 13356b2bddGireesh Nagabhushana } 13456b2bddGireesh Nagabhushana} 13556b2bddGireesh Nagabhushana 13656b2bddGireesh Nagabhushana/* 1373dde7c9Vishal Kulkarni * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 1383dde7c9Vishal Kulkarni * mechanism. This guarantees that we get the real value even if we're 1393dde7c9Vishal Kulkarni * operating within a Virtual Machine and the Hypervisor is trapping our 1403dde7c9Vishal Kulkarni * Configuration Space accesses. 1413dde7c9Vishal Kulkarni * 1423dde7c9Vishal Kulkarni * N.B. This routine should only be used as a last resort: the firmware uses 1433dde7c9Vishal Kulkarni * the backdoor registers on a regular basis and we can end up 1443dde7c9Vishal Kulkarni * conflicting with it's uses! 1453dde7c9Vishal Kulkarni */ 1463dde7c9Vishal Kulkarnivoid t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) 1473dde7c9Vishal Kulkarni{ 1483dde7c9Vishal Kulkarni u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 1493dde7c9Vishal Kulkarni 1503dde7c9Vishal Kulkarni if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) 1513dde7c9Vishal Kulkarni req |= F_ENABLE; 1523dde7c9Vishal Kulkarni else 1533dde7c9Vishal Kulkarni req |= F_T6_ENABLE; 1543dde7c9Vishal Kulkarni 1553dde7c9Vishal Kulkarni if (is_t4(adap->params.chip)) 1563dde7c9Vishal Kulkarni req |= F_LOCALCFG; 1573dde7c9Vishal Kulkarni 1583dde7c9Vishal Kulkarni t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 1593dde7c9Vishal Kulkarni *val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 1603dde7c9Vishal Kulkarni 1613dde7c9Vishal Kulkarni /* Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 1623dde7c9Vishal Kulkarni * Configuration Space read. (None of the other fields matter when 1633dde7c9Vishal Kulkarni * F_ENABLE is 0 so a simple register write is easier than a 1643dde7c9Vishal Kulkarni * read-modify-write via t4_set_reg_field().) 1653dde7c9Vishal Kulkarni */ 1663dde7c9Vishal Kulkarni t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 1673dde7c9Vishal Kulkarni} 1683dde7c9Vishal Kulkarni 1693dde7c9Vishal Kulkarni/* 1703dde7c9Vishal Kulkarni * t4_report_fw_error - report firmware error 1713dde7c9Vishal Kulkarni * @adap: the adapter 172de48325Vishal Kulkarni * 1733dde7c9Vishal Kulkarni * The adapter firmware can indicate error conditions to the host. 1743dde7c9Vishal Kulkarni * If the firmware has indicated an error, print out the reason for 1753dde7c9Vishal Kulkarni * the firmware error. 176de48325Vishal Kulkarni */ 177de48325Vishal Kulkarnistatic void t4_report_fw_error(struct adapter *adap) 178de48325Vishal Kulkarni{ 1793dde7c9Vishal Kulkarni static const char *const reason[] = { 180de48325Vishal Kulkarni "Crash", /* PCIE_FW_EVAL_CRASH */ 181de48325Vishal Kulkarni "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 182de48325Vishal Kulkarni "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 183de48325Vishal Kulkarni "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 184de48325Vishal Kulkarni "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 185de48325Vishal Kulkarni "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 186de48325Vishal Kulkarni "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 187de48325Vishal Kulkarni "Reserved", /* reserved */ 188de48325Vishal Kulkarni }; 189de48325Vishal Kulkarni u32 pcie_fw; 190de48325Vishal Kulkarni 191de48325Vishal Kulkarni pcie_fw = t4_read_reg(adap, A_PCIE_FW); 1927e6ad46Vishal Kulkarni if (pcie_fw & F_PCIE_FW_ERR) { 193de48325Vishal Kulkarni CH_ERR(adap, "Firmware reports adapter error: %s\n", 1943dde7c9Vishal Kulkarni reason[G_PCIE_FW_EVAL(pcie_fw)]); 1957e6ad46Vishal Kulkarni adap->flags &= ~FW_OK; 1967e6ad46Vishal Kulkarni } 197de48325Vishal Kulkarni} 198de48325Vishal Kulkarni 199de48325Vishal Kulkarni/* 20056b2bddGireesh Nagabhushana * Get the reply to a mailbox command and store it in @rpl in big-endian order. 20156b2bddGireesh Nagabhushana */ 2023dde7c9Vishal Kulkarnistatic void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 2033dde7c9Vishal Kulkarni u32 mbox_addr) 20456b2bddGireesh Nagabhushana{ 2053dde7c9Vishal Kulkarni for ( ; nflit; nflit--, mbox_addr += 8) 20656b2bddGireesh Nagabhushana *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 20756b2bddGireesh Nagabhushana} 20856b2bddGireesh Nagabhushana 20956b2bddGireesh Nagabhushana/* 21056b2bddGireesh Nagabhushana * Handle a FW assertion reported in a mailbox. 21156b2bddGireesh Nagabhushana */ 2123dde7c9Vishal Kulkarnistatic void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 2133dde7c9Vishal Kulkarni{ 2143dde7c9Vishal Kulkarni CH_ALERT(adap, 2153dde7c9Vishal Kulkarni "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 2163dde7c9Vishal Kulkarni asrt->u.assert.filename_0_7, 2173dde7c9Vishal Kulkarni be32_to_cpu(asrt->u.assert.line), 2183dde7c9Vishal Kulkarni be32_to_cpu(asrt->u.assert.x), 2193dde7c9Vishal Kulkarni be32_to_cpu(asrt->u.assert.y)); 2203dde7c9Vishal Kulkarni} 2213dde7c9Vishal Kulkarni 2223dde7c9Vishal Kulkarni#define X_CIM_PF_NOACCESS 0xeeeeeeee 2233dde7c9Vishal Kulkarni 2243dde7c9Vishal Kulkarni/* 2253dde7c9Vishal Kulkarni * If the Host OS Driver needs locking arround accesses to the mailbox, this 2263dde7c9Vishal Kulkarni * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ... 2273dde7c9Vishal Kulkarni */ 2283dde7c9Vishal Kulkarni/* makes single-statement usage a bit cleaner ... */ 2293dde7c9Vishal Kulkarni#ifdef T4_OS_NEEDS_MBOX_LOCKING 2303dde7c9Vishal Kulkarni#define T4_OS_MBOX_LOCKING(x) x 2313dde7c9Vishal Kulkarni#else 2323dde7c9Vishal Kulkarni#define T4_OS_MBOX_LOCKING(x) do {} while (0) 2333dde7c9Vishal Kulkarni#endif 2343dde7c9Vishal Kulkarni 2353dde7c9Vishal Kulkarni/* 2363dde7c9Vishal Kulkarni * If the OS Driver wants busy waits to keep a watchdog happy, tap it during 2373dde7c9Vishal Kulkarni * busy loops which don't sleep. 2383dde7c9Vishal Kulkarni */ 2393dde7c9Vishal Kulkarni#ifdef T4_OS_NEEDS_TOUCH_NMI_WATCHDOG 2403dde7c9Vishal Kulkarni#define T4_OS_TOUCH_NMI_WATCHDOG() t4_os_touch_nmi_watchdog() 2413dde7c9Vishal Kulkarni#else 2423dde7c9Vishal Kulkarni#define T4_OS_TOUCH_NMI_WATCHDOG() 2433dde7c9Vishal Kulkarni#endif 2443dde7c9Vishal Kulkarni 2453dde7c9Vishal Kulkarni#ifdef T4_OS_LOG_MBOX_CMDS 2463dde7c9Vishal Kulkarni/** 2473dde7c9Vishal Kulkarni * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log 2483dde7c9Vishal Kulkarni * @adapter: the adapter 2493dde7c9Vishal Kulkarni * @cmd: the Firmware Mailbox Command or Reply 2503dde7c9Vishal Kulkarni * @size: command length in bytes 2513dde7c9Vishal Kulkarni * @access: the time (ms) needed to access the Firmware Mailbox 2523dde7c9Vishal Kulkarni * @execute: the time (ms) the command spent being executed 2533dde7c9Vishal Kulkarni */ 2543dde7c9Vishal Kulkarnistatic void t4_record_mbox(struct adapter *adapter, 2553dde7c9Vishal Kulkarni const __be64 *cmd, unsigned int size, 2563dde7c9Vishal Kulkarni int access, int execute) 2573dde7c9Vishal Kulkarni{ 2583dde7c9Vishal Kulkarni struct mbox_cmd_log *log = adapter->mbox_log; 2593dde7c9Vishal Kulkarni struct mbox_cmd *entry; 2603dde7c9Vishal Kulkarni int i; 2613dde7c9Vishal Kulkarni 2623dde7c9Vishal Kulkarni entry = mbox_cmd_log_entry(log, log->cursor++); 2633dde7c9Vishal Kulkarni if (log->cursor == log->size) 2643dde7c9Vishal Kulkarni log->cursor = 0; 2653dde7c9Vishal Kulkarni 2663dde7c9Vishal Kulkarni for (i = 0; i < size/8; i++) 2673dde7c9Vishal Kulkarni entry->cmd[i] = be64_to_cpu(cmd[i]); 2683dde7c9Vishal Kulkarni while (i < MBOX_LEN/8) 2693dde7c9Vishal Kulkarni entry->cmd[i++] = 0; 2703dde7c9Vishal Kulkarni entry->timestamp = t4_os_timestamp(); 2713dde7c9Vishal Kulkarni entry->seqno = log->seqno++; 2723dde7c9Vishal Kulkarni entry->access = access; 2733dde7c9Vishal Kulkarni entry->execute = execute; 2743dde7c9Vishal Kulkarni} 2753dde7c9Vishal Kulkarni 2763dde7c9Vishal Kulkarni#define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \ 2773dde7c9Vishal Kulkarni t4_record_mbox(__adapter, __cmd, __size, __access, __execute) 2783dde7c9Vishal Kulkarni 2793dde7c9Vishal Kulkarni#else /* !T4_OS_LOG_MBOX_CMDS */ 2803dde7c9Vishal Kulkarni 2813dde7c9Vishal Kulkarni#define T4_RECORD_MBOX(__adapter, __cmd, __size, __access, __execute) \ 2823dde7c9Vishal Kulkarni /* nothing */ 2833dde7c9Vishal Kulkarni 2843dde7c9Vishal Kulkarni#endif /* !T4_OS_LOG_MBOX_CMDS */ 2853dde7c9Vishal Kulkarni 2863dde7c9Vishal Kulkarni/** 2873dde7c9Vishal Kulkarni * t4_record_mbox_marker - record a marker in the mailbox log 2883dde7c9Vishal Kulkarni * @adapter: the adapter 2893dde7c9Vishal Kulkarni * @marker: byte array marker 2903dde7c9Vishal Kulkarni * @size: marker size in bytes 2913dde7c9Vishal Kulkarni * 2923dde7c9Vishal Kulkarni * We inject a "fake mailbox command" into the Firmware Mailbox Log 2933dde7c9Vishal Kulkarni * using a known command token and then the bytes of the specified 2943dde7c9Vishal Kulkarni * marker. This lets debugging code inject markers into the log to 2953dde7c9Vishal Kulkarni * help identify which commands are in response to higher level code. 2963dde7c9Vishal Kulkarni */ 2973dde7c9Vishal Kulkarnivoid t4_record_mbox_marker(struct adapter *adapter, 2983dde7c9Vishal Kulkarni const void *marker, unsigned int size) 29956b2bddGireesh Nagabhushana{ 3003dde7c9Vishal Kulkarni#ifdef T4_OS_LOG_MBOX_CMDS 3013dde7c9Vishal Kulkarni __be64 marker_cmd[MBOX_LEN/8]; 3023dde7c9Vishal Kulkarni const unsigned int max_marker = sizeof marker_cmd - sizeof (__be64); 3033dde7c9Vishal Kulkarni unsigned int marker_cmd_size; 3043dde7c9Vishal Kulkarni 3053dde7c9Vishal Kulkarni if (size > max_marker) 3063dde7c9Vishal Kulkarni size = max_marker; 30756b2bddGireesh Nagabhushana 3083dde7c9Vishal Kulkarni marker_cmd[0] = cpu_to_be64(~0LLU); 3093dde7c9Vishal Kulkarni memcpy(&marker_cmd[1], marker, size); 3103dde7c9Vishal Kulkarni memset((unsigned char *)&marker_cmd[1] + size, 0, max_marker - size); 3113dde7c9Vishal Kulkarni marker_cmd_size = sizeof (__be64) + roundup(size, sizeof (__be64)); 3123dde7c9Vishal Kulkarni 3133dde7c9Vishal Kulkarni t4_record_mbox(adapter, marker_cmd, marker_cmd_size, 0, 0); 3143dde7c9Vishal Kulkarni#endif /* T4_OS_LOG_MBOX_CMDS */ 31556b2bddGireesh Nagabhushana} 31656b2bddGireesh Nagabhushana 31756b2bddGireesh Nagabhushana/* 3183dde7c9Vishal Kulkarni * Delay time in microseconds to wait for mailbox access/fw reply 3193dde7c9Vishal Kulkarni * to mailbox command 3203dde7c9Vishal Kulkarni */ 3213dde7c9Vishal Kulkarni#define MIN_MBOX_CMD_DELAY 900 3223dde7c9Vishal Kulkarni#define MBOX_CMD_DELAY 1000 3233dde7c9Vishal Kulkarni 3243dde7c9Vishal Kulkarni/** 3253dde7c9Vishal Kulkarni * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 32656b2bddGireesh Nagabhushana * @adap: the adapter 32756b2bddGireesh Nagabhushana * @mbox: index of the mailbox to use 32856b2bddGireesh Nagabhushana * @cmd: the command to write 32956b2bddGireesh Nagabhushana * @size: command length in bytes 33056b2bddGireesh Nagabhushana * @rpl: where to optionally store the reply 33156b2bddGireesh Nagabhushana * @sleep_ok: if true we may sleep while awaiting command completion 3323dde7c9Vishal Kulkarni * @timeout: time to wait for command to finish before timing out 3333dde7c9Vishal Kulkarni * (negative implies @sleep_ok=false) 33456b2bddGireesh Nagabhushana * 33556b2bddGireesh Nagabhushana * Sends the given command to FW through the selected mailbox and waits 33656b2bddGireesh Nagabhushana * for the FW to execute the command. If @rpl is not %NULL it is used to 33756b2bddGireesh Nagabhushana * store the FW's reply to the command. The command and its optional 33856b2bddGireesh Nagabhushana * reply are of the same length. Some FW commands like RESET and 33956b2bddGireesh Nagabhushana * INITIALIZE can take a considerable amount of time to execute. 34056b2bddGireesh Nagabhushana * @sleep_ok determines whether we may sleep while awaiting the response. 34156b2bddGireesh Nagabhushana * If sleeping is allowed we use progressive backoff otherwise we spin. 3423dde7c9Vishal Kulkarni * Note that passing in a negative @timeout is an alternate mechanism 3433dde7c9Vishal Kulkarni * for specifying @sleep_ok=false. This is useful when a higher level 3443dde7c9Vishal Kulkarni * interface allows for specification of @timeout but not @sleep_ok ... 34556b2bddGireesh Nagabhushana * 34656b2bddGireesh Nagabhushana * The return value is 0 on success or a negative errno on failure. A 34756b2bddGireesh Nagabhushana * failure can happen either because we are not able to execute the 34856b2bddGireesh Nagabhushana * command or FW executes it but signals an error. In the latter case 34956b2bddGireesh Nagabhushana * the return value is the error code indicated by FW (negated). 35056b2bddGireesh Nagabhushana */ 3513dde7c9Vishal Kulkarniint t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 3523dde7c9Vishal Kulkarni int size, void *rpl, bool sleep_ok, int timeout) 35356b2bddGireesh Nagabhushana{ 3543dde7c9Vishal Kulkarni#ifdef T4_OS_NEEDS_MBOX_LOCKING 3553dde7c9Vishal Kulkarni u16 access = 0; 3563dde7c9Vishal Kulkarni#endif 35756b2bddGireesh Nagabhushana u32 v; 35856b2bddGireesh Nagabhushana u64 res; 3593dde7c9Vishal Kulkarni int i, ret; 36056b2bddGireesh Nagabhushana const __be64 *p = cmd; 36156b2bddGireesh Nagabhushana u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 36256b2bddGireesh Nagabhushana u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 3633dde7c9Vishal Kulkarni u32 ctl; 3643dde7c9Vishal Kulkarni __be64 cmd_rpl[MBOX_LEN/8]; 3653dde7c9Vishal Kulkarni T4_OS_MBOX_LOCKING(t4_os_list_t entry); 3663dde7c9Vishal Kulkarni u32 pcie_fw; 36756b2bddGireesh Nagabhushana 36856b2bddGireesh Nagabhushana if ((size & 15) || size > MBOX_LEN) 3693dde7c9Vishal Kulkarni return -EINVAL; 3703dde7c9Vishal Kulkarni 3713dde7c9Vishal Kulkarni /* 3723dde7c9Vishal Kulkarni * If we have a negative timeout, that implies that we can't sleep. 3733dde7c9Vishal Kulkarni */ 3743dde7c9Vishal Kulkarni if (timeout < 0) { 3753dde7c9Vishal Kulkarni sleep_ok = false; 3763dde7c9Vishal Kulkarni timeout = -timeout; 3773dde7c9Vishal Kulkarni } 3783dde7c9Vishal Kulkarni 3793dde7c9Vishal Kulkarni#ifdef T4_OS_NEEDS_MBOX_LOCKING 3803dde7c9Vishal Kulkarni /* 3813dde7c9Vishal Kulkarni * Queue ourselves onto the mailbox access list. When our entry is at 3823dde7c9Vishal Kulkarni * the front of the list, we have rights to access the mailbox. So we 3833dde7c9Vishal Kulkarni * wait [for a while] till we're at the front [or bail out with an 3843dde7c9Vishal Kulkarni * EBUSY] ... 3853dde7c9Vishal Kulkarni */ 3863dde7c9Vishal Kulkarni t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock); 3873dde7c9Vishal Kulkarni 3883dde7c9Vishal Kulkarni for (i = 0; ; i++) { 3893dde7c9Vishal Kulkarni /* 3903dde7c9Vishal Kulkarni * If we've waited too long, return a busy indication. This 3913dde7c9Vishal Kulkarni * really ought to be based on our initial position in the 3923dde7c9Vishal Kulkarni * mailbox access list but this is a start. We very rarely 3933dde7c9Vishal Kulkarni * contend on access to the mailbox ... Also check for a 3943dde7c9Vishal Kulkarni * firmware error which we'll report as a device error. 3953dde7c9Vishal Kulkarni */ 3963dde7c9Vishal Kulkarni pcie_fw = t4_read_reg(adap, A_PCIE_FW); 3973dde7c9Vishal Kulkarni if (i > 4*timeout || (pcie_fw & F_PCIE_FW_ERR)) { 3983dde7c9Vishal Kulkarni t4_os_atomic_list_del(&entry, &adap->mbox_lock); 3993dde7c9Vishal Kulkarni t4_report_fw_error(adap); 4003dde7c9Vishal Kulkarni ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; 4013dde7c9Vishal Kulkarni T4_RECORD_MBOX(adap, cmd, size, ret, 0); 4023dde7c9Vishal Kulkarni return ret; 4033dde7c9Vishal Kulkarni } 4043dde7c9Vishal Kulkarni 4053dde7c9Vishal Kulkarni /* 4063dde7c9Vishal Kulkarni * If we're at the head, break out and start the mailbox 4073dde7c9Vishal Kulkarni * protocol. 4083dde7c9Vishal Kulkarni */ 4093dde7c9Vishal Kulkarni if (t4_os_list_first_entry(&adap->mbox_list) == &entry) 4103dde7c9Vishal Kulkarni break; 4113dde7c9Vishal Kulkarni 4123dde7c9Vishal Kulkarni /* 4133dde7c9Vishal Kulkarni * Delay for a bit before checking again ... 4143dde7c9Vishal Kulkarni */ 4153dde7c9Vishal Kulkarni if (sleep_ok) { 4163dde7c9Vishal Kulkarni usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY); 4173dde7c9Vishal Kulkarni } else { 4183dde7c9Vishal Kulkarni T4_OS_TOUCH_NMI_WATCHDOG(); 4193dde7c9Vishal Kulkarni udelay(MBOX_CMD_DELAY); 4203dde7c9Vishal Kulkarni } 4213dde7c9Vishal Kulkarni } 4223dde7c9Vishal Kulkarni access = i; 4233dde7c9Vishal Kulkarni#endif /* T4_OS_NEEDS_MBOX_LOCKING */ 4243dde7c9Vishal Kulkarni 4253dde7c9Vishal Kulkarni /* 4263dde7c9Vishal Kulkarni * Attempt to gain access to the mailbox. 4273dde7c9Vishal Kulkarni */ 4283dde7c9Vishal Kulkarni for (i = 0; i < 4; i++) { 4293dde7c9Vishal Kulkarni ctl = t4_read_reg(adap, ctl_reg); 4303dde7c9Vishal Kulkarni v = G_MBOWNER(ctl); 4313dde7c9Vishal Kulkarni if (v != X_MBOWNER_NONE) 4323dde7c9Vishal Kulkarni break; 4333dde7c9Vishal Kulkarni } 43456b2bddGireesh Nagabhushana 4353dde7c9Vishal Kulkarni /* 4363dde7c9Vishal Kulkarni * If we were unable to gain access, dequeue ourselves from the 4373dde7c9Vishal Kulkarni * mailbox atomic access list and report the error to our caller. 4383dde7c9Vishal Kulkarni */ 4393dde7c9Vishal Kulkarni if (v != X_MBOWNER_PL) { 4403dde7c9Vishal Kulkarni T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, 4413dde7c9Vishal Kulkarni &adap->mbox_lock)); 4423dde7c9Vishal Kulkarni t4_report_fw_error(adap); 4433dde7c9Vishal Kulkarni ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 4443dde7c9Vishal Kulkarni T4_RECORD_MBOX(adap, cmd, size, access, ret); 4453dde7c9Vishal Kulkarni return ret; 4463dde7c9Vishal Kulkarni } 44756b2bddGireesh Nagabhushana 4483dde7c9Vishal Kulkarni /* 4493dde7c9Vishal Kulkarni * If we gain ownership of the mailbox and there's a "valid" message 4503dde7c9Vishal Kulkarni * in it, this is likely an asynchronous error message from the 4513dde7c9Vishal Kulkarni * firmware. So we'll report that and then proceed on with attempting 4523dde7c9Vishal Kulkarni * to issue our own command ... which may well fail if the error 4533dde7c9Vishal Kulkarni * presaged the firmware crashing ... 4543dde7c9Vishal Kulkarni */ 4553dde7c9Vishal Kulkarni if (ctl & F_MBMSGVALID) { 4563dde7c9Vishal Kulkarni CH_ERR(adap, "found VALID command in mbox %u: " 4573dde7c9Vishal Kulkarni "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 4583dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg), 4593dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 8), 4603dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 16), 4613dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 24), 4623dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 32), 4633dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 40), 4643dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 48), 4653dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 4663dde7c9Vishal Kulkarni } 46756b2bddGireesh Nagabhushana 4683dde7c9Vishal Kulkarni /* 4693dde7c9Vishal Kulkarni * Copy in the new mailbox command and send it on its way ... 4703dde7c9Vishal Kulkarni */ 4713dde7c9Vishal Kulkarni T4_RECORD_MBOX(adap, cmd, size, access, 0); 47256b2bddGireesh Nagabhushana for (i = 0; i < size; i += 8, p++) 47356b2bddGireesh Nagabhushana t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 47456b2bddGireesh Nagabhushana 4753dde7c9Vishal Kulkarni /* 4763dde7c9Vishal Kulkarni * XXX It's not clear that we need this anymore now 4773dde7c9Vishal Kulkarni * XXX that we have mailbox logging ... 4783dde7c9Vishal Kulkarni */ 4793dde7c9Vishal Kulkarni CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 4803dde7c9Vishal Kulkarni 48156b2bddGireesh Nagabhushana t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 48256b2bddGireesh Nagabhushana (void) t4_read_reg(adap, ctl_reg); /* flush write */ 48356b2bddGireesh Nagabhushana 4843dde7c9Vishal Kulkarni /* 4853dde7c9Vishal Kulkarni * Loop waiting for the reply; bail out if we time out or the firmware 4863dde7c9Vishal Kulkarni * reports an error. 4873dde7c9Vishal Kulkarni */ 4883dde7c9Vishal Kulkarni for (i = 0; 4893dde7c9Vishal Kulkarni !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) && 4903dde7c9Vishal Kulkarni i < timeout; 4913dde7c9Vishal Kulkarni i++) { 4923dde7c9Vishal Kulkarni if (sleep_ok) { 4933dde7c9Vishal Kulkarni usleep_range(MIN_MBOX_CMD_DELAY, MBOX_CMD_DELAY); 4943dde7c9Vishal Kulkarni } else { 4953dde7c9Vishal Kulkarni T4_OS_TOUCH_NMI_WATCHDOG(); 4963dde7c9Vishal Kulkarni udelay(MBOX_CMD_DELAY); 4973dde7c9Vishal Kulkarni } 49856b2bddGireesh Nagabhushana 49956b2bddGireesh Nagabhushana v = t4_read_reg(adap, ctl_reg); 50056b2bddGireesh Nagabhushana if (v == X_CIM_PF_NOACCESS) 50156b2bddGireesh Nagabhushana continue; 50256b2bddGireesh Nagabhushana if (G_MBOWNER(v) == X_MBOWNER_PL) { 50356b2bddGireesh Nagabhushana if (!(v & F_MBMSGVALID)) { 50456b2bddGireesh Nagabhushana t4_write_reg(adap, ctl_reg, 5053dde7c9Vishal Kulkarni V_MBOWNER(X_MBOWNER_NONE)); 50656b2bddGireesh Nagabhushana continue; 50756b2bddGireesh Nagabhushana } 50856b2bddGireesh Nagabhushana 5093dde7c9Vishal Kulkarni /* 5103dde7c9Vishal Kulkarni * Retrieve the command reply and release the mailbox. 5113dde7c9Vishal Kulkarni */ 5123dde7c9Vishal Kulkarni get_mbox_rpl(adap, cmd_rpl, size/8, data_reg); 5133dde7c9Vishal Kulkarni t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 5143dde7c9Vishal Kulkarni T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, 5153dde7c9Vishal Kulkarni &adap->mbox_lock)); 5163dde7c9Vishal Kulkarni 517baa1db2Toomas Soome T4_RECORD_MBOX(adap, cmd_rpl, size, access, i + 1); 5183dde7c9Vishal Kulkarni 5193dde7c9Vishal Kulkarni /* 5203dde7c9Vishal Kulkarni * XXX It's not clear that we need this anymore now 5213dde7c9Vishal Kulkarni * XXX that we have mailbox logging ... 5223dde7c9Vishal Kulkarni */ 5233dde7c9Vishal Kulkarni CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 5243dde7c9Vishal Kulkarni CH_MSG(adap, INFO, HW, 5253dde7c9Vishal Kulkarni "command completed in %d ms (%ssleeping)\n", 526baa1db2Toomas Soome i + 1, sleep_ok ? "" : "non-"); 5273dde7c9Vishal Kulkarni 5283dde7c9Vishal Kulkarni res = be64_to_cpu(cmd_rpl[0]); 52956b2bddGireesh Nagabhushana if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 5303dde7c9Vishal Kulkarni fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 53156b2bddGireesh Nagabhushana res = V_FW_CMD_RETVAL(EIO); 5323dde7c9Vishal Kulkarni } else if (rpl) 5333dde7c9Vishal Kulkarni memcpy(rpl, cmd_rpl, size); 5343dde7c9Vishal Kulkarni return -G_FW_CMD_RETVAL((int)res); 53556b2bddGireesh Nagabhushana } 53656b2bddGireesh Nagabhushana } 53756b2bddGireesh Nagabhushana 538de48325Vishal Kulkarni /* 5393dde7c9Vishal Kulkarni * We timed out waiting for a reply to our mailbox command. Report 540de48325Vishal Kulkarni * the error and also check to see if the firmware reported any 541de48325Vishal Kulkarni * errors ... 542de48325Vishal Kulkarni */ 5433dde7c9Vishal Kulkarni T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, &adap->mbox_lock)); 5443dde7c9Vishal Kulkarni 5453dde7c9Vishal Kulkarni ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 5463dde7c9Vishal Kulkarni T4_RECORD_MBOX(adap, cmd, size, access, ret); 547de48325Vishal Kulkarni CH_ERR(adap, "command %#x in mailbox %d timed out\n", 5483dde7c9Vishal Kulkarni *(const u8 *)cmd, mbox); 5493dde7c9Vishal Kulkarni 5503dde7c9Vishal Kulkarni t4_report_fw_error(adap); 5513dde7c9Vishal Kulkarni t4_fatal_err(adap); 5523dde7c9Vishal Kulkarni return ret; 55356b2bddGireesh Nagabhushana} 55456b2bddGireesh Nagabhushana 5553dde7c9Vishal Kulkarni#ifdef CONFIG_CUDBG 55656b2bddGireesh Nagabhushana/* 5573dde7c9Vishal Kulkarni * The maximum number of times to iterate for FW reply before 5583dde7c9Vishal Kulkarni * issuing a mailbox timeout 55956b2bddGireesh Nagabhushana */ 5603dde7c9Vishal Kulkarni#define FW_REPLY_WAIT_LOOP 6000000 56156b2bddGireesh Nagabhushana 5623dde7c9Vishal Kulkarni/** 5633dde7c9Vishal Kulkarni * t4_wr_mbox_meat_timeout_panic - send a command to FW through the given 5643dde7c9Vishal Kulkarni * mailbox. This function is a minimal version of t4_wr_mbox_meat_timeout() 5653dde7c9Vishal Kulkarni * and is only invoked during a kernel crash. Since this function is 5663dde7c9Vishal Kulkarni * called through a atomic notifier chain ,we cannot sleep awaiting a 5673dde7c9Vishal Kulkarni * response from FW, hence repeatedly loop until we get a reply. 56856b2bddGireesh Nagabhushana * 5693dde7c9Vishal Kulkarni * @adap: the adapter 5703dde7c9Vishal Kulkarni * @mbox: index of the mailbox to use 5713dde7c9Vishal Kulkarni * @cmd: the command to write 5723dde7c9Vishal Kulkarni * @size: command length in bytes 5733dde7c9Vishal Kulkarni * @rpl: where to optionally store the reply 57456b2bddGireesh Nagabhushana */ 5753dde7c9Vishal Kulkarni 5763dde7c9Vishal Kulkarnistatic int t4_wr_mbox_meat_timeout_panic(struct adapter *adap, int mbox, 5773dde7c9Vishal Kulkarni const void *cmd, int size, void *rpl) 57856b2bddGireesh Nagabhushana{ 5793dde7c9Vishal Kulkarni u32 v; 5803dde7c9Vishal Kulkarni u64 res; 5813dde7c9Vishal Kulkarni int i, ret; 5823dde7c9Vishal Kulkarni u64 cnt; 5833dde7c9Vishal Kulkarni const __be64 *p = cmd; 5843dde7c9Vishal Kulkarni u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 5853dde7c9Vishal Kulkarni u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 5863dde7c9Vishal Kulkarni u32 ctl; 5873dde7c9Vishal Kulkarni __be64 cmd_rpl[MBOX_LEN/8]; 5883dde7c9Vishal Kulkarni u32 pcie_fw; 58956b2bddGireesh Nagabhushana 5903dde7c9Vishal Kulkarni if ((size & 15) || size > MBOX_LEN) 5913dde7c9Vishal Kulkarni return -EINVAL; 592de48325Vishal Kulkarni 5933dde7c9Vishal Kulkarni /* 5943dde7c9Vishal Kulkarni * Check for a firmware error which we'll report as a 5953dde7c9Vishal Kulkarni * device error. 5963dde7c9Vishal Kulkarni */ 5973dde7c9Vishal Kulkarni pcie_fw = t4_read_reg(adap, A_PCIE_FW); 5983dde7c9Vishal Kulkarni if (pcie_fw & F_PCIE_FW_ERR) { 5993dde7c9Vishal Kulkarni t4_report_fw_error(adap); 6003dde7c9Vishal Kulkarni ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; 6013dde7c9Vishal Kulkarni return ret; 6023dde7c9Vishal Kulkarni } 60356b2bddGireesh Nagabhushana 6043dde7c9Vishal Kulkarni /* 6053dde7c9Vishal Kulkarni * Attempt to gain access to the mailbox. 6063dde7c9Vishal Kulkarni */ 6073dde7c9Vishal Kulkarni for (i = 0; i < 4; i++) { 6083dde7c9Vishal Kulkarni ctl = t4_read_reg(adap, ctl_reg); 6093dde7c9Vishal Kulkarni v = G_MBOWNER(ctl); 6103dde7c9Vishal Kulkarni if (v != X_MBOWNER_NONE) 6113dde7c9Vishal Kulkarni break; 6123dde7c9Vishal Kulkarni } 61356b2bddGireesh Nagabhushana 61456b2bddGireesh Nagabhushana /* 6153dde7c9Vishal Kulkarni * If we were unable to gain access, report the error to our caller. 61656b2bddGireesh Nagabhushana */ 6173dde7c9Vishal Kulkarni if (v != X_MBOWNER_PL) { 6183dde7c9Vishal Kulkarni t4_report_fw_error(adap); 6193dde7c9Vishal Kulkarni ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 6203dde7c9Vishal Kulkarni return ret; 6213dde7c9Vishal Kulkarni } 62256b2bddGireesh Nagabhushana 62356b2bddGireesh Nagabhushana /* 6243dde7c9Vishal Kulkarni * If we gain ownership of the mailbox and there's a "valid" message 6253dde7c9Vishal Kulkarni * in it, this is likely an asynchronous error message from the 6263dde7c9Vishal Kulkarni * firmware. So we'll report that and then proceed on with attempting 6273dde7c9Vishal Kulkarni * to issue our own command ... which may well fail if the error 6283dde7c9Vishal Kulkarni * presaged the firmware crashing ... 62956b2bddGireesh Nagabhushana */ 6303dde7c9Vishal Kulkarni if (ctl & F_MBMSGVALID) { 6313dde7c9Vishal Kulkarni CH_ERR(adap, "found VALID command in mbox %u: " 6323dde7c9Vishal Kulkarni "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 6333dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg), 6343dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 8), 6353dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 16), 6363dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 24), 6373dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 32), 6383dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 40), 6393dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 48), 6403dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 6413dde7c9Vishal Kulkarni } 64256b2bddGireesh Nagabhushana 6433dde7c9Vishal Kulkarni /* 6443dde7c9Vishal Kulkarni * Copy in the new mailbox command and send it on its way ... 6453dde7c9Vishal Kulkarni */ 6463dde7c9Vishal Kulkarni for (i = 0; i < size; i += 8, p++) 6473dde7c9Vishal Kulkarni t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 64856b2bddGireesh Nagabhushana 6493dde7c9Vishal Kulkarni CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 65056b2bddGireesh Nagabhushana 6513dde7c9Vishal Kulkarni t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 6523dde7c9Vishal Kulkarni t4_read_reg(adap, ctl_reg); /* flush write */ 65356b2bddGireesh Nagabhushana 6543dde7c9Vishal Kulkarni /* 6553dde7c9Vishal Kulkarni * Loop waiting for the reply; bail out if we time out or the firmware 6563dde7c9Vishal Kulkarni * reports an error. 6573dde7c9Vishal Kulkarni */ 6583dde7c9Vishal Kulkarni for (cnt = 0; 6593dde7c9Vishal Kulkarni !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) && 6603dde7c9Vishal Kulkarni cnt < FW_REPLY_WAIT_LOOP; 6613dde7c9Vishal Kulkarni cnt++) { 6623dde7c9Vishal Kulkarni v = t4_read_reg(adap, ctl_reg); 6633dde7c9Vishal Kulkarni if (v == X_CIM_PF_NOACCESS) 6643dde7c9Vishal Kulkarni continue; 6653dde7c9Vishal Kulkarni if (G_MBOWNER(v) == X_MBOWNER_PL) { 6663dde7c9Vishal Kulkarni if (!(v & F_MBMSGVALID)) { 6673dde7c9Vishal Kulkarni t4_write_reg(adap, ctl_reg, 6683dde7c9Vishal Kulkarni V_MBOWNER(X_MBOWNER_NONE)); 6693dde7c9Vishal Kulkarni continue; 6703dde7c9Vishal Kulkarni } 67156b2bddGireesh Nagabhushana 6723dde7c9Vishal Kulkarni /* 6733dde7c9Vishal Kulkarni * Retrieve the command reply and release the mailbox. 6743dde7c9Vishal Kulkarni */ 6753dde7c9Vishal Kulkarni get_mbox_rpl(adap, cmd_rpl, size/8, data_reg); 6763dde7c9Vishal Kulkarni t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 6773dde7c9Vishal Kulkarni 6783dde7c9Vishal Kulkarni CH_DUMP_MBOX(adap, mbox, data_reg, size / 8); 6793dde7c9Vishal Kulkarni 6803dde7c9Vishal Kulkarni res = be64_to_cpu(cmd_rpl[0]); 6813dde7c9Vishal Kulkarni if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 6823dde7c9Vishal Kulkarni fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 6833dde7c9Vishal Kulkarni res = V_FW_CMD_RETVAL(EIO); 6843dde7c9Vishal Kulkarni } else if (rpl) 6853dde7c9Vishal Kulkarni memcpy(rpl, cmd_rpl, size); 6863dde7c9Vishal Kulkarni return -G_FW_CMD_RETVAL((int)res); 6873dde7c9Vishal Kulkarni } 6883dde7c9Vishal Kulkarni } 68956b2bddGireesh Nagabhushana 69056b2bddGireesh Nagabhushana /* 6913dde7c9Vishal Kulkarni * We timed out waiting for a reply to our mailbox command. Report 6923dde7c9Vishal Kulkarni * the error and also check to see if the firmware reported any 6933dde7c9Vishal Kulkarni * errors ... 69456b2bddGireesh Nagabhushana */ 6953dde7c9Vishal Kulkarni ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 6963dde7c9Vishal Kulkarni CH_ERR(adap, "command %#x in mailbox %d timed out\n", 6973dde7c9Vishal Kulkarni *(const u8 *)cmd, mbox); 69856b2bddGireesh Nagabhushana 6993dde7c9Vishal Kulkarni t4_report_fw_error(adap); 7003dde7c9Vishal Kulkarni t4_fatal_err(adap); 7013dde7c9Vishal Kulkarni return ret; 70256b2bddGireesh Nagabhushana} 7033dde7c9Vishal Kulkarni#endif 70456b2bddGireesh Nagabhushana 7053dde7c9Vishal Kulkarniint t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 7063dde7c9Vishal Kulkarni void *rpl, bool sleep_ok) 70756b2bddGireesh Nagabhushana{ 7083dde7c9Vishal Kulkarni#ifdef CONFIG_CUDBG 7093dde7c9Vishal Kulkarni if (adap->flags & K_CRASH) 7103dde7c9Vishal Kulkarni return t4_wr_mbox_meat_timeout_panic(adap, mbox, cmd, size, 7113dde7c9Vishal Kulkarni rpl); 7123dde7c9Vishal Kulkarni else 7133dde7c9Vishal Kulkarni#endif 7143dde7c9Vishal Kulkarni return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 7153dde7c9Vishal Kulkarni sleep_ok, FW_CMD_MAX_TIMEOUT); 7163dde7c9Vishal Kulkarni 71756b2bddGireesh Nagabhushana} 71856b2bddGireesh Nagabhushana 7193dde7c9Vishal Kulkarnistatic int t4_edc_err_read(struct adapter *adap, int idx) 7203dde7c9Vishal Kulkarni{ 7213dde7c9Vishal Kulkarni u32 edc_ecc_err_addr_reg; 7223dde7c9Vishal Kulkarni u32 edc_bist_status_rdata_reg; 72356b2bddGireesh Nagabhushana 7243dde7c9Vishal Kulkarni if (is_t4(adap->params.chip)) { 7253dde7c9Vishal Kulkarni CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 7263dde7c9Vishal Kulkarni return 0; 7273dde7c9Vishal Kulkarni } 7283dde7c9Vishal Kulkarni if (idx != MEM_EDC0 && idx != MEM_EDC1) { 7293dde7c9Vishal Kulkarni CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 7303dde7c9Vishal Kulkarni return 0; 7313dde7c9Vishal Kulkarni } 73256b2bddGireesh Nagabhushana 7333dde7c9Vishal Kulkarni edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 7343dde7c9Vishal Kulkarni edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 7353dde7c9Vishal Kulkarni 7363dde7c9Vishal Kulkarni CH_WARN(adap, 7373dde7c9Vishal Kulkarni "edc%d err addr 0x%x: 0x%x.\n", 7383dde7c9Vishal Kulkarni idx, edc_ecc_err_addr_reg, 7393dde7c9Vishal Kulkarni t4_read_reg(adap, edc_ecc_err_addr_reg)); 7403dde7c9Vishal Kulkarni CH_WARN(adap, 7413dde7c9Vishal Kulkarni "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 7423dde7c9Vishal Kulkarni edc_bist_status_rdata_reg, 7433dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 7443dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 7453dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 7463dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 7473dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 7483dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 7493dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 7503dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 7513dde7c9Vishal Kulkarni (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 75256b2bddGireesh Nagabhushana 7533dde7c9Vishal Kulkarni return 0; 7543dde7c9Vishal Kulkarni} 75556b2bddGireesh Nagabhushana 7563dde7c9Vishal Kulkarni/** 7573dde7c9Vishal Kulkarni * t4_memory_rw_addr - read/write adapter memory via PCIE memory window 7583dde7c9Vishal Kulkarni * @adap: the adapter 7593dde7c9Vishal Kulkarni * @win: PCI-E Memory Window to use 7603dde7c9Vishal Kulkarni * @addr: address within adapter memory 7613dde7c9Vishal Kulkarni * @len: amount of memory to transfer 7623dde7c9Vishal Kulkarni * @hbuf: host memory buffer 7633dde7c9Vishal Kulkarni * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 7643dde7c9Vishal Kulkarni * 7653dde7c9Vishal Kulkarni * Reads/writes an [almost] arbitrary memory region in the firmware: the 7663dde7c9Vishal Kulkarni * firmware memory address and host buffer must be aligned on 32-bit 7673dde7c9Vishal Kulkarni * boudaries; the length may be arbitrary. 7683dde7c9Vishal Kulkarni * 7693dde7c9Vishal Kulkarni * NOTES: 7703dde7c9Vishal Kulkarni * 1. The memory is transferred as a raw byte sequence from/to the 7713dde7c9Vishal Kulkarni * firmware's memory. If this memory contains data structures which 7723dde7c9Vishal Kulkarni * contain multi-byte integers, it's the caller's responsibility to 7733dde7c9Vishal Kulkarni * perform appropriate byte order conversions. 7743dde7c9Vishal Kulkarni * 7753dde7c9Vishal Kulkarni * 2. It is the Caller's responsibility to ensure that no other code 7763dde7c9Vishal Kulkarni * uses the specified PCI-E Memory Window while this routine is 7773dde7c9Vishal Kulkarni * using it. This is typically done via the use of OS-specific 7783dde7c9Vishal Kulkarni * locks, etc. 7793dde7c9Vishal Kulkarni */ 7803dde7c9Vishal Kulkarniint t4_memory_rw_addr(struct adapter *adap, int win, u32 addr, 7813dde7c9Vishal Kulkarni u32 len, void *hbuf, int dir) 7823dde7c9Vishal Kulkarni{ 7833dde7c9Vishal Kulkarni u32 pos, offset, resid; 7843dde7c9Vishal Kulkarni u32 win_pf, mem_reg, mem_aperture, mem_base; 7853dde7c9Vishal Kulkarni u32 *buf; 7863dde7c9Vishal Kulkarni 7873dde7c9Vishal Kulkarni /* Argument sanity checks ... 7883dde7c9Vishal Kulkarni */ 7893dde7c9Vishal Kulkarni if (addr & 0x3 || (uintptr_t)hbuf & 0x3) 7903dde7c9Vishal Kulkarni return -EINVAL; 7913dde7c9Vishal Kulkarni buf = (u32 *)hbuf; 79256b2bddGireesh Nagabhushana 7933dde7c9Vishal Kulkarni /* It's convenient to be able to handle lengths which aren't a 7943dde7c9Vishal Kulkarni * multiple of 32-bits because we often end up transferring files to 7953dde7c9Vishal Kulkarni * the firmware. So we'll handle that by normalizing the length here 7963dde7c9Vishal Kulkarni * and then handling any residual transfer at the end. 7973dde7c9Vishal Kulkarni */ 7983dde7c9Vishal Kulkarni resid = len & 0x3; 7993dde7c9Vishal Kulkarni len -= resid; 8003dde7c9Vishal Kulkarni 8013dde7c9Vishal Kulkarni /* Each PCI-E Memory Window is programmed with a window size -- or 8023dde7c9Vishal Kulkarni * "aperture" -- which controls the granularity of its mapping onto 8033dde7c9Vishal Kulkarni * adapter memory. We need to grab that aperture in order to know 8043dde7c9Vishal Kulkarni * how to use the specified window. The window is also programmed 8053dde7c9Vishal Kulkarni * with the base address of the Memory Window in BAR0's address 8063dde7c9Vishal Kulkarni * space. For T4 this is an absolute PCI-E Bus Address. For T5 8073dde7c9Vishal Kulkarni * the address is relative to BAR0. 8083dde7c9Vishal Kulkarni */ 8093dde7c9Vishal Kulkarni mem_reg = t4_read_reg(adap, 8103dde7c9Vishal Kulkarni PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 8113dde7c9Vishal Kulkarni win)); 8123dde7c9Vishal Kulkarni 8133dde7c9Vishal Kulkarni /* a dead adapter will return 0xffffffff for PIO reads */ 8143dde7c9Vishal Kulkarni if (mem_reg == 0xffffffff) { 8153dde7c9Vishal Kulkarni CH_WARN(adap, "Unable to read PCI-E Memory Window Base[%d]\n", 8163dde7c9Vishal Kulkarni win); 8173dde7c9Vishal Kulkarni return -ENXIO; 81856b2bddGireesh Nagabhushana } 81956b2bddGireesh Nagabhushana 8203dde7c9Vishal Kulkarni mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT); 8213dde7c9Vishal Kulkarni mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT; 8223dde7c9Vishal Kulkarni if (is_t4(adap->params.chip)) 8233dde7c9Vishal Kulkarni mem_base -= adap->t4_bar0; 8243dde7c9Vishal Kulkarni win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf); 8253dde7c9Vishal Kulkarni 8263dde7c9Vishal Kulkarni /* Calculate our initial PCI-E Memory Window Position and Offset into 8273dde7c9Vishal Kulkarni * that Window. 8283dde7c9Vishal Kulkarni */ 8293dde7c9Vishal Kulkarni pos = addr & ~(mem_aperture-1); 8303dde7c9Vishal Kulkarni offset = addr - pos; 8313dde7c9Vishal Kulkarni 8323dde7c9Vishal Kulkarni /* Set up initial PCI-E Memory Window to cover the start of our 8333dde7c9Vishal Kulkarni * transfer. (Read it back to ensure that changes propagate before we 8343dde7c9Vishal Kulkarni * attempt to use the new value.) 8353dde7c9Vishal Kulkarni */ 8363dde7c9Vishal Kulkarni t4_write_reg(adap, 8373dde7c9Vishal Kulkarni PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win), 8383dde7c9Vishal Kulkarni pos | win_pf); 8393dde7c9Vishal Kulkarni t4_read_reg(adap, 8403dde7c9Vishal Kulkarni PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win)); 8413dde7c9Vishal Kulkarni 8423dde7c9Vishal Kulkarni /* Transfer data to/from the adapter as long as there's an integral 8433dde7c9Vishal Kulkarni * number of 32-bit transfers to complete. 8443dde7c9Vishal Kulkarni * 8453dde7c9Vishal Kulkarni * A note on Endianness issues: 8463dde7c9Vishal Kulkarni * 8473dde7c9Vishal Kulkarni * The "register" reads and writes below from/to the PCI-E Memory 8483dde7c9Vishal Kulkarni * Window invoke the standard adapter Big-Endian to PCI-E Link 8493dde7c9Vishal Kulkarni * Little-Endian "swizzel." As a result, if we have the following 8503dde7c9Vishal Kulkarni * data in adapter memory: 8513dde7c9Vishal Kulkarni * 8523dde7c9Vishal Kulkarni * Memory: ... | b0 | b1 | b2 | b3 | ... 8533dde7c9Vishal Kulkarni * Address: i+0 i+1 i+2 i+3 8543dde7c9Vishal Kulkarni * 8553dde7c9Vishal Kulkarni * Then a read of the adapter memory via the PCI-E Memory Window 8563dde7c9Vishal Kulkarni * will yield: 8573dde7c9Vishal Kulkarni * 8583dde7c9Vishal Kulkarni * x = readl(i) 8593dde7c9Vishal Kulkarni * 31 0 8603dde7c9Vishal Kulkarni * [ b3 | b2 | b1 | b0 ] 8613dde7c9Vishal Kulkarni * 8623dde7c9Vishal Kulkarni * If this value is stored into local memory on a Little-Endian system 8633dde7c9Vishal Kulkarni * it will show up correctly in local memory as: 8643dde7c9Vishal Kulkarni * 8653dde7c9Vishal Kulkarni * ( ..., b0, b1, b2, b3, ... ) 8663dde7c9Vishal Kulkarni * 8673dde7c9Vishal Kulkarni * But on a Big-Endian system, the store will show up in memory 8683dde7c9Vishal Kulkarni * incorrectly swizzled as: 8693dde7c9Vishal Kulkarni * 8703dde7c9Vishal Kulkarni * ( ..., b3, b2, b1, b0, ... ) 8713dde7c9Vishal Kulkarni * 8723dde7c9Vishal Kulkarni * So we need to account for this in the reads and writes to the 8733dde7c9Vishal Kulkarni * PCI-E Memory Window below by undoing the register read/write 8743dde7c9Vishal Kulkarni * swizzels. 8753dde7c9Vishal Kulkarni */ 8763dde7c9Vishal Kulkarni while (len > 0) { 8773dde7c9Vishal Kulkarni if (dir == T4_MEMORY_READ) 8783dde7c9Vishal Kulkarni *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, 8793dde7c9Vishal Kulkarni mem_base + offset)); 8803dde7c9Vishal Kulkarni else 8813dde7c9Vishal Kulkarni t4_write_reg(adap, mem_base + offset, 8823dde7c9Vishal Kulkarni (__force u32)cpu_to_le32(*buf++)); 8833dde7c9Vishal Kulkarni offset += sizeof(__be32); 8843dde7c9Vishal Kulkarni len -= sizeof(__be32); 8853dde7c9Vishal Kulkarni 8863dde7c9Vishal Kulkarni /* If we've reached the end of our current window aperture, 8873dde7c9Vishal Kulkarni * move the PCI-E Memory Window on to the next. Note that 8883dde7c9Vishal Kulkarni * doing this here after "len" may be 0 allows us to set up 8893dde7c9Vishal Kulkarni * the PCI-E Memory Window for a possible final residual 8903dde7c9Vishal Kulkarni * transfer below ... 8913dde7c9Vishal Kulkarni */ 8923dde7c9Vishal Kulkarni if (offset == mem_aperture) { 8933dde7c9Vishal Kulkarni pos += mem_aperture; 8943dde7c9Vishal Kulkarni offset = 0; 8953dde7c9Vishal Kulkarni t4_write_reg(adap, 8963dde7c9Vishal Kulkarni PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 8973dde7c9Vishal Kulkarni win), pos | win_pf); 8983dde7c9Vishal Kulkarni t4_read_reg(adap, 8993dde7c9Vishal Kulkarni PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 9003dde7c9Vishal Kulkarni win)); 9013dde7c9Vishal Kulkarni } 9023dde7c9Vishal Kulkarni } 9033dde7c9Vishal Kulkarni 9043dde7c9Vishal Kulkarni /* If the original transfer had a length which wasn't a multiple of 9053dde7c9Vishal Kulkarni * 32-bits, now's where we need to finish off the transfer of the 9063dde7c9Vishal Kulkarni * residual amount. The PCI-E Memory Window has already been moved 9073dde7c9Vishal Kulkarni * above (if necessary) to cover this final transfer. 9083dde7c9Vishal Kulkarni */ 9093dde7c9Vishal Kulkarni if (resid) { 9103dde7c9Vishal Kulkarni union { 9113dde7c9Vishal Kulkarni u32 word; 9123dde7c9Vishal Kulkarni char byte[4]; 9133dde7c9Vishal Kulkarni } last; 9143dde7c9Vishal Kulkarni unsigned char *bp; 9153dde7c9Vishal Kulkarni int i; 9163dde7c9Vishal Kulkarni 9173dde7c9Vishal Kulkarni if (dir == T4_MEMORY_READ) { 9183dde7c9Vishal Kulkarni last.word = le32_to_cpu( 9193dde7c9Vishal Kulkarni (__force __le32)t4_read_reg(adap, 9203dde7c9Vishal Kulkarni mem_base + offset)); 9213dde7c9Vishal Kulkarni for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 9223dde7c9Vishal Kulkarni bp[i] = last.byte[i]; 9233dde7c9Vishal Kulkarni } else { 9243dde7c9Vishal Kulkarni last.word = *buf; 9253dde7c9Vishal Kulkarni for (i = resid; i < 4; i++) 9263dde7c9Vishal Kulkarni last.byte[i] = 0; 9273dde7c9Vishal Kulkarni t4_write_reg(adap, mem_base + offset, 9283dde7c9Vishal Kulkarni (__force u32)cpu_to_le32(last.word)); 9293dde7c9Vishal Kulkarni } 9303dde7c9Vishal Kulkarni } 9313dde7c9Vishal Kulkarni 9323dde7c9Vishal Kulkarni return 0; 9333dde7c9Vishal Kulkarni} 9343dde7c9Vishal Kulkarni 9353dde7c9Vishal Kulkarni/** 9363dde7c9Vishal Kulkarni * t4_memory_rw_mtype - read/write EDC 0, EDC 1 or MC via PCIE memory window 9373dde7c9Vishal Kulkarni * @adap: the adapter 9383dde7c9Vishal Kulkarni * @win: PCI-E Memory Window to use 9397e6ad46Vishal Kulkarni * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC 9403dde7c9Vishal Kulkarni * @maddr: address within indicated memory type 9413dde7c9Vishal Kulkarni * @len: amount of memory to transfer 9423dde7c9Vishal Kulkarni * @hbuf: host memory buffer 9433dde7c9Vishal Kulkarni * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 9443dde7c9Vishal Kulkarni * 9453dde7c9Vishal Kulkarni * Reads/writes adapter memory using t4_memory_rw_addr(). This routine 9463dde7c9Vishal Kulkarni * provides an (memory type, address withing memory type) interface. 9473dde7c9Vishal Kulkarni */ 9483dde7c9Vishal Kulkarniint t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr, 9493dde7c9Vishal Kulkarni u32 len, void *hbuf, int dir) 9503dde7c9Vishal Kulkarni{ 9513dde7c9Vishal Kulkarni u32 mtype_offset; 9523dde7c9Vishal Kulkarni u32 edc_size, mc_size; 9533dde7c9Vishal Kulkarni 9543dde7c9Vishal Kulkarni /* Offset into the region of memory which is being accessed 9553dde7c9Vishal Kulkarni * MEM_EDC0 = 0 9563dde7c9Vishal Kulkarni * MEM_EDC1 = 1 9573dde7c9Vishal Kulkarni * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller 9583dde7c9Vishal Kulkarni * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) 9597e6ad46Vishal Kulkarni * MEM_HMA = 4 9603dde7c9Vishal Kulkarni */ 9613dde7c9Vishal Kulkarni edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR)); 9627e6ad46Vishal Kulkarni if (mtype == MEM_HMA) { 9637e6ad46Vishal Kulkarni mtype_offset = 2 * (edc_size * 1024 * 1024); 9647e6ad46Vishal Kulkarni } else if (mtype != MEM_MC1) 9653dde7c9Vishal Kulkarni mtype_offset = (mtype * (edc_size * 1024 * 1024)); 9663dde7c9Vishal Kulkarni else { 9673dde7c9Vishal Kulkarni mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap, 9683dde7c9Vishal Kulkarni A_MA_EXT_MEMORY0_BAR)); 9693dde7c9Vishal Kulkarni mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 9703dde7c9Vishal Kulkarni } 9713dde7c9Vishal Kulkarni 9723dde7c9Vishal Kulkarni return t4_memory_rw_addr(adap, win, 9733dde7c9Vishal Kulkarni mtype_offset + maddr, len, 9743dde7c9Vishal Kulkarni hbuf, dir); 9753dde7c9Vishal Kulkarni} 9763dde7c9Vishal Kulkarni 9773dde7c9Vishal Kulkarni/* 9783dde7c9Vishal Kulkarni * Return the specified PCI-E Configuration Space register from our Physical 9793dde7c9Vishal Kulkarni * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 9803dde7c9Vishal Kulkarni * since we prefer to let the firmware own all of these registers, but if that 9813dde7c9Vishal Kulkarni * fails we go for it directly ourselves. 9823dde7c9Vishal Kulkarni */ 9833dde7c9Vishal Kulkarniu32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 9843dde7c9Vishal Kulkarni{ 9853dde7c9Vishal Kulkarni u32 val; 9863dde7c9Vishal Kulkarni 9873dde7c9Vishal Kulkarni /* 9883dde7c9Vishal Kulkarni * If fw_attach != 0, construct and send the Firmware LDST Command to 9893dde7c9Vishal Kulkarni * retrieve the specified PCI-E Configuration Space register. 9903dde7c9Vishal Kulkarni */ 9913dde7c9Vishal Kulkarni if (drv_fw_attach != 0) { 9923dde7c9Vishal Kulkarni struct fw_ldst_cmd ldst_cmd; 9933dde7c9Vishal Kulkarni int ret; 9943dde7c9Vishal Kulkarni 9953dde7c9Vishal Kulkarni memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 9963dde7c9Vishal Kulkarni ldst_cmd.op_to_addrspace = 9973dde7c9Vishal Kulkarni cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9983dde7c9Vishal Kulkarni F_FW_CMD_REQUEST | 9993dde7c9Vishal Kulkarni F_FW_CMD_READ | 10003dde7c9Vishal Kulkarni V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 10013dde7c9Vishal Kulkarni ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 10023dde7c9Vishal Kulkarni ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 10033dde7c9Vishal Kulkarni ldst_cmd.u.pcie.ctrl_to_fn = 10043dde7c9Vishal Kulkarni (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 10053dde7c9Vishal Kulkarni ldst_cmd.u.pcie.r = reg; 10063dde7c9Vishal Kulkarni 10073dde7c9Vishal Kulkarni /* 10083dde7c9Vishal Kulkarni * If the LDST Command succeeds, return the result, otherwise 10093dde7c9Vishal Kulkarni * fall through to reading it directly ourselves ... 10103dde7c9Vishal Kulkarni */ 10113dde7c9Vishal Kulkarni ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 10123dde7c9Vishal Kulkarni &ldst_cmd); 10133dde7c9Vishal Kulkarni if (ret == 0) 10143dde7c9Vishal Kulkarni return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 10153dde7c9Vishal Kulkarni 10163dde7c9Vishal Kulkarni CH_WARN(adap, "Firmware failed to return " 10173dde7c9Vishal Kulkarni "Configuration Space register %d, err = %d\n", 10183dde7c9Vishal Kulkarni reg, -ret); 10193dde7c9Vishal Kulkarni } 10203dde7c9Vishal Kulkarni 10213dde7c9Vishal Kulkarni /* 10223dde7c9Vishal Kulkarni * Read the desired Configuration Space register via the PCI-E 10233dde7c9Vishal Kulkarni * Backdoor mechanism. 10243dde7c9Vishal Kulkarni */ 10253dde7c9Vishal Kulkarni t4_hw_pci_read_cfg4(adap, reg, &val); 10263dde7c9Vishal Kulkarni return val; 10273dde7c9Vishal Kulkarni} 10283dde7c9Vishal Kulkarni 10293dde7c9Vishal Kulkarni/* 10303dde7c9Vishal Kulkarni * Get the window based on base passed to it. 10313dde7c9Vishal Kulkarni * Window aperture is currently unhandled, but there is no use case for it 10323dde7c9Vishal Kulkarni * right now 10333dde7c9Vishal Kulkarni */ 10343dde7c9Vishal Kulkarnistatic int t4_get_window(struct adapter *adap, u64 pci_base, u64 pci_mask, u64 memwin_base, int drv_fw_attach) 10353dde7c9Vishal Kulkarni{ 10363dde7c9Vishal Kulkarni if (is_t4(adap->params.chip)) { 10373dde7c9Vishal Kulkarni u32 bar0; 10383dde7c9Vishal Kulkarni 10393dde7c9Vishal Kulkarni /* 10403dde7c9Vishal Kulkarni * Truncation intentional: we only read the bottom 32-bits of 10413dde7c9Vishal Kulkarni * the 64-bit BAR0/BAR1 ... We use the hardware backdoor 10423dde7c9Vishal Kulkarni * mechanism to read BAR0 instead of using 10433dde7c9Vishal Kulkarni * pci_resource_start() because we could be operating from 10443dde7c9Vishal Kulkarni * within a Virtual Machine which is trapping our accesses to 10453dde7c9Vishal Kulkarni * our Configuration Space and we need to set up the PCI-E 10463dde7c9Vishal Kulkarni * Memory Window decoders with the actual addresses which will 10473dde7c9Vishal Kulkarni * be coming across the PCI-E link. 10483dde7c9Vishal Kulkarni */ 10493dde7c9Vishal Kulkarni bar0 = t4_read_pcie_cfg4(adap, pci_base, drv_fw_attach); 10503dde7c9Vishal Kulkarni bar0 &= pci_mask; 10513dde7c9Vishal Kulkarni adap->t4_bar0 = bar0; 10523dde7c9Vishal Kulkarni 10533dde7c9Vishal Kulkarni return bar0 + memwin_base; 10543dde7c9Vishal Kulkarni } else { 10553dde7c9Vishal Kulkarni /* For T5, only relative offset inside the PCIe BAR is passed */ 10563dde7c9Vishal Kulkarni return memwin_base; 10573dde7c9Vishal Kulkarni } 10583dde7c9Vishal Kulkarni} 10593dde7c9Vishal Kulkarni 10603dde7c9Vishal Kulkarni/* Get the default utility window (win0) used by everyone */ 10613dde7c9Vishal Kulkarniint t4_get_util_window(struct adapter *adap, int drv_fw_attach) 10623dde7c9Vishal Kulkarni{ 10633dde7c9Vishal Kulkarni return t4_get_window(adap, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE, drv_fw_attach); 10643dde7c9Vishal Kulkarni} 10653dde7c9Vishal Kulkarni 10663dde7c9Vishal Kulkarni/* 10673dde7c9Vishal Kulkarni * Set up memory window for accessing adapter memory ranges. (Read 10683dde7c9Vishal Kulkarni * back MA register to ensure that changes propagate before we attempt 10693dde7c9Vishal Kulkarni * to use the new values.) 10703dde7c9Vishal Kulkarni */ 10713dde7c9Vishal Kulkarnivoid t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window) 10723dde7c9Vishal Kulkarni{ 10733dde7c9Vishal Kulkarni t4_write_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window), 10743dde7c9Vishal Kulkarni memwin_base | V_BIR(0) | 10753dde7c9Vishal Kulkarni V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT)); 10763dde7c9Vishal Kulkarni t4_read_reg(adap, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, window)); 10773dde7c9Vishal Kulkarni} 10783dde7c9Vishal Kulkarni 10793dde7c9Vishal Kulkarni/** 10803dde7c9Vishal Kulkarni * t4_get_regs_len - return the size of the chips register set 10813dde7c9Vishal Kulkarni * @adapter: the adapter 10823dde7c9Vishal Kulkarni * 10833dde7c9Vishal Kulkarni * Returns the size of the chip's BAR0 register space. 10843dde7c9Vishal Kulkarni */ 10853dde7c9Vishal Kulkarniunsigned int t4_get_regs_len(struct adapter *adapter) 10863dde7c9Vishal Kulkarni{ 10873dde7c9Vishal Kulkarni unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); 10883dde7c9Vishal Kulkarni 10893dde7c9Vishal Kulkarni switch (chip_version) { 10903dde7c9Vishal Kulkarni case CHELSIO_T4: 10913dde7c9Vishal Kulkarni return T4_REGMAP_SIZE; 10923dde7c9Vishal Kulkarni 10933dde7c9Vishal Kulkarni case CHELSIO_T5: 10943dde7c9Vishal Kulkarni case CHELSIO_T6: 10953dde7c9Vishal Kulkarni return T5_REGMAP_SIZE; 10963dde7c9Vishal Kulkarni } 10973dde7c9Vishal Kulkarni 10983dde7c9Vishal Kulkarni CH_ERR(adapter, 10993dde7c9Vishal Kulkarni "Unsupported chip version %d\n", chip_version); 11003dde7c9Vishal Kulkarni return 0; 11013dde7c9Vishal Kulkarni} 11023dde7c9Vishal Kulkarni 11033dde7c9Vishal Kulkarni/** 11043dde7c9Vishal Kulkarni * t4_get_regs - read chip registers into provided buffer 11053dde7c9Vishal Kulkarni * @adap: the adapter 11063dde7c9Vishal Kulkarni * @buf: register buffer 11073dde7c9Vishal Kulkarni * @buf_size: size (in bytes) of register buffer 11083dde7c9Vishal Kulkarni * 11093dde7c9Vishal Kulkarni * If the provided register buffer isn't large enough for the chip's 11103dde7c9Vishal Kulkarni * full register range, the register dump will be truncated to the 11113dde7c9Vishal Kulkarni * register buffer's size. 11123dde7c9Vishal Kulkarni */ 11133dde7c9Vishal Kulkarnivoid t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 11143dde7c9Vishal Kulkarni{ 11153dde7c9Vishal Kulkarni static const unsigned int t4_reg_ranges[] = { 11163dde7c9Vishal Kulkarni 0x1008, 0x1108, 11173dde7c9Vishal Kulkarni 0x1180, 0x1184, 11183dde7c9Vishal Kulkarni 0x1190, 0x1194, 11193dde7c9Vishal Kulkarni 0x11a0, 0x11a4, 11203dde7c9Vishal Kulkarni 0x11b0, 0x11b4, 11213dde7c9Vishal Kulkarni 0x11fc, 0x123c, 11223dde7c9Vishal Kulkarni 0x1300, 0x173c, 11233dde7c9Vishal Kulkarni 0x1800, 0x18fc, 11243dde7c9Vishal Kulkarni 0x3000, 0x30d8, 11253dde7c9Vishal Kulkarni 0x30e0, 0x30e4, 11263dde7c9Vishal Kulkarni 0x30ec, 0x5910, 11273dde7c9Vishal Kulkarni 0x5920, 0x5924, 11283dde7c9Vishal Kulkarni 0x5960, 0x5960, 11293dde7c9Vishal Kulkarni 0x5968, 0x5968, 11303dde7c9Vishal Kulkarni 0x5970, 0x5970, 11313dde7c9Vishal Kulkarni 0x5978, 0x5978, 11323dde7c9Vishal Kulkarni 0x5980, 0x5980, 11333dde7c9Vishal Kulkarni 0x5988, 0x5988, 11343dde7c9Vishal Kulkarni 0x5990, 0x5990, 11353dde7c9Vishal Kulkarni 0x5998, 0x5998, 11363dde7c9Vishal Kulkarni 0x59a0, 0x59d4, 11373dde7c9Vishal Kulkarni 0x5a00, 0x5ae0, 11383dde7c9Vishal Kulkarni 0x5ae8, 0x5ae8, 11393dde7c9Vishal Kulkarni 0x5af0, 0x5af0, 11403dde7c9Vishal Kulkarni 0x5af8, 0x5af8, 11413dde7c9Vishal Kulkarni 0x6000, 0x6098, 11423dde7c9Vishal Kulkarni 0x6100, 0x6150, 11433dde7c9Vishal Kulkarni 0x6200, 0x6208, 11443dde7c9Vishal Kulkarni 0x6240, 0x6248, 11453dde7c9Vishal Kulkarni 0x6280, 0x62b0, 11463dde7c9Vishal Kulkarni 0x62c0, 0x6338, 11473dde7c9Vishal Kulkarni 0x6370, 0x638c, 11483dde7c9Vishal Kulkarni 0x6400, 0x643c, 11493dde7c9Vishal Kulkarni 0x6500, 0x6524, 11503dde7c9Vishal Kulkarni 0x6a00, 0x6a04, 11513dde7c9Vishal Kulkarni 0x6a14, 0x6a38, 11523dde7c9Vishal Kulkarni 0x6a60, 0x6a70, 11533dde7c9Vishal Kulkarni 0x6a78, 0x6a78, 11543dde7c9Vishal Kulkarni 0x6b00, 0x6b0c, 11553dde7c9Vishal Kulkarni 0x6b1c, 0x6b84, 11563dde7c9Vishal Kulkarni 0x6bf0, 0x6bf8, 11573dde7c9Vishal Kulkarni 0x6c00, 0x6c0c, 1158