t4_hw.c revision 309447
1/*- 2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/common/t4_hw.c 309447 2016-12-02 22:53:33Z jhb $"); 29 30#include "opt_inet.h" 31 32#include "common.h" 33#include "t4_regs.h" 34#include "t4_regs_values.h" 35#include "firmware/t4fw_interface.h" 36 37#undef msleep 38#define msleep(x) do { \ 39 if (cold) \ 40 DELAY((x) * 1000); \ 41 else \ 42 pause("t4hw", (x) * hz / 1000); \ 43} while (0) 44 45/** 46 * t4_wait_op_done_val - wait until an operation is completed 47 * @adapter: the adapter performing the operation 48 * @reg: the register to check for completion 49 * @mask: a single-bit field within @reg that indicates completion 50 * @polarity: the value of the field when the operation is completed 51 * @attempts: number of check iterations 52 * @delay: delay in usecs between iterations 53 * @valp: where to store the value of the register at completion time 54 * 55 * Wait until an operation is completed by checking a bit in a register 56 * up to @attempts times. If @valp is not NULL the value of the register 57 * at the time it indicated completion is stored there. Returns 0 if the 58 * operation completes and -EAGAIN otherwise. 59 */ 60static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 61 int polarity, int attempts, int delay, u32 *valp) 62{ 63 while (1) { 64 u32 val = t4_read_reg(adapter, reg); 65 66 if (!!(val & mask) == polarity) { 67 if (valp) 68 *valp = val; 69 return 0; 70 } 71 if (--attempts == 0) 72 return -EAGAIN; 73 if (delay) 74 udelay(delay); 75 } 76} 77 78static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 79 int polarity, int attempts, int delay) 80{ 81 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 82 delay, NULL); 83} 84 85/** 86 * t4_set_reg_field - set a register field to a value 87 * @adapter: the adapter to program 88 * @addr: the register address 89 * @mask: specifies the portion of the register to modify 90 * @val: the new value for the register field 91 * 92 * Sets a register field specified by the supplied mask to the 93 * given value. 94 */ 95void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 96 u32 val) 97{ 98 u32 v = t4_read_reg(adapter, addr) & ~mask; 99 100 t4_write_reg(adapter, addr, v | val); 101 (void) t4_read_reg(adapter, addr); /* flush */ 102} 103 104/** 105 * t4_read_indirect - read indirectly addressed registers 106 * @adap: the adapter 107 * @addr_reg: register holding the indirect address 108 * @data_reg: register holding the value of the indirect register 109 * @vals: where the read register values are stored 110 * @nregs: how many indirect registers to read 111 * @start_idx: index of first indirect register to read 112 * 113 * Reads registers that are accessed indirectly through an address/data 114 * register pair. 115 */ 116void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 117 unsigned int data_reg, u32 *vals, 118 unsigned int nregs, unsigned int start_idx) 119{ 120 while (nregs--) { 121 t4_write_reg(adap, addr_reg, start_idx); 122 *vals++ = t4_read_reg(adap, data_reg); 123 start_idx++; 124 } 125} 126 127/** 128 * t4_write_indirect - write indirectly addressed registers 129 * @adap: the adapter 130 * @addr_reg: register holding the indirect addresses 131 * @data_reg: register holding the value for the indirect registers 132 * @vals: values to write 133 * @nregs: how many indirect registers to write 134 * @start_idx: address of first indirect register to write 135 * 136 * Writes a sequential block of registers that are accessed indirectly 137 * through an address/data register pair. 138 */ 139void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 140 unsigned int data_reg, const u32 *vals, 141 unsigned int nregs, unsigned int start_idx) 142{ 143 while (nregs--) { 144 t4_write_reg(adap, addr_reg, start_idx++); 145 t4_write_reg(adap, data_reg, *vals++); 146 } 147} 148 149/* 150 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 151 * mechanism. This guarantees that we get the real value even if we're 152 * operating within a Virtual Machine and the Hypervisor is trapping our 153 * Configuration Space accesses. 154 * 155 * N.B. This routine should only be used as a last resort: the firmware uses 156 * the backdoor registers on a regular basis and we can end up 157 * conflicting with it's uses! 158 */ 159u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg) 160{ 161 u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg); 162 u32 val; 163 164 if (chip_id(adap) <= CHELSIO_T5) 165 req |= F_ENABLE; 166 else 167 req |= F_T6_ENABLE; 168 169 if (is_t4(adap)) 170 req |= F_LOCALCFG; 171 172 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req); 173 val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA); 174 175 /* 176 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 177 * Configuration Space read. (None of the other fields matter when 178 * F_ENABLE is 0 so a simple register write is easier than a 179 * read-modify-write via t4_set_reg_field().) 180 */ 181 t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0); 182 183 return val; 184} 185 186/* 187 * t4_report_fw_error - report firmware error 188 * @adap: the adapter 189 * 190 * The adapter firmware can indicate error conditions to the host. 191 * If the firmware has indicated an error, print out the reason for 192 * the firmware error. 193 */ 194static void t4_report_fw_error(struct adapter *adap) 195{ 196 static const char *const reason[] = { 197 "Crash", /* PCIE_FW_EVAL_CRASH */ 198 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 199 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 200 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 201 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 202 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 203 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 204 "Reserved", /* reserved */ 205 }; 206 u32 pcie_fw; 207 208 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 209 if (pcie_fw & F_PCIE_FW_ERR) 210 CH_ERR(adap, "Firmware reports adapter error: %s\n", 211 reason[G_PCIE_FW_EVAL(pcie_fw)]); 212} 213 214/* 215 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 216 */ 217static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 218 u32 mbox_addr) 219{ 220 for ( ; nflit; nflit--, mbox_addr += 8) 221 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 222} 223 224/* 225 * Handle a FW assertion reported in a mailbox. 226 */ 227static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt) 228{ 229 CH_ALERT(adap, 230 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 231 asrt->u.assert.filename_0_7, 232 be32_to_cpu(asrt->u.assert.line), 233 be32_to_cpu(asrt->u.assert.x), 234 be32_to_cpu(asrt->u.assert.y)); 235} 236 237#define X_CIM_PF_NOACCESS 0xeeeeeeee 238/** 239 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox 240 * @adap: the adapter 241 * @mbox: index of the mailbox to use 242 * @cmd: the command to write 243 * @size: command length in bytes 244 * @rpl: where to optionally store the reply 245 * @sleep_ok: if true we may sleep while awaiting command completion 246 * @timeout: time to wait for command to finish before timing out 247 * (negative implies @sleep_ok=false) 248 * 249 * Sends the given command to FW through the selected mailbox and waits 250 * for the FW to execute the command. If @rpl is not %NULL it is used to 251 * store the FW's reply to the command. The command and its optional 252 * reply are of the same length. Some FW commands like RESET and 253 * INITIALIZE can take a considerable amount of time to execute. 254 * @sleep_ok determines whether we may sleep while awaiting the response. 255 * If sleeping is allowed we use progressive backoff otherwise we spin. 256 * Note that passing in a negative @timeout is an alternate mechanism 257 * for specifying @sleep_ok=false. This is useful when a higher level 258 * interface allows for specification of @timeout but not @sleep_ok ... 259 * 260 * The return value is 0 on success or a negative errno on failure. A 261 * failure can happen either because we are not able to execute the 262 * command or FW executes it but signals an error. In the latter case 263 * the return value is the error code indicated by FW (negated). 264 */ 265int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 266 int size, void *rpl, bool sleep_ok, int timeout) 267{ 268 /* 269 * We delay in small increments at first in an effort to maintain 270 * responsiveness for simple, fast executing commands but then back 271 * off to larger delays to a maximum retry delay. 272 */ 273 static const int delay[] = { 274 1, 1, 3, 5, 10, 10, 20, 50, 100 275 }; 276 u32 v; 277 u64 res; 278 int i, ms, delay_idx, ret; 279 const __be64 *p = cmd; 280 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); 281 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); 282 u32 ctl; 283 __be64 cmd_rpl[MBOX_LEN/8]; 284 u32 pcie_fw; 285 286 if ((size & 15) || size > MBOX_LEN) 287 return -EINVAL; 288 289 if (adap->flags & IS_VF) { 290 if (is_t6(adap)) 291 data_reg = FW_T6VF_MBDATA_BASE_ADDR; 292 else 293 data_reg = FW_T4VF_MBDATA_BASE_ADDR; 294 ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL); 295 } 296 297 /* 298 * If we have a negative timeout, that implies that we can't sleep. 299 */ 300 if (timeout < 0) { 301 sleep_ok = false; 302 timeout = -timeout; 303 } 304 305 /* 306 * Attempt to gain access to the mailbox. 307 */ 308 for (i = 0; i < 4; i++) { 309 ctl = t4_read_reg(adap, ctl_reg); 310 v = G_MBOWNER(ctl); 311 if (v != X_MBOWNER_NONE) 312 break; 313 } 314 315 /* 316 * If we were unable to gain access, dequeue ourselves from the 317 * mailbox atomic access list and report the error to our caller. 318 */ 319 if (v != X_MBOWNER_PL) { 320 t4_report_fw_error(adap); 321 ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT; 322 return ret; 323 } 324 325 /* 326 * If we gain ownership of the mailbox and there's a "valid" message 327 * in it, this is likely an asynchronous error message from the 328 * firmware. So we'll report that and then proceed on with attempting 329 * to issue our own command ... which may well fail if the error 330 * presaged the firmware crashing ... 331 */ 332 if (ctl & F_MBMSGVALID) { 333 CH_ERR(adap, "found VALID command in mbox %u: " 334 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 335 (unsigned long long)t4_read_reg64(adap, data_reg), 336 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 337 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 338 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 339 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 340 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 341 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 342 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 343 } 344 345 /* 346 * Copy in the new mailbox command and send it on its way ... 347 */ 348 for (i = 0; i < size; i += 8, p++) 349 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); 350 351 if (adap->flags & IS_VF) { 352 /* 353 * For the VFs, the Mailbox Data "registers" are 354 * actually backed by T4's "MA" interface rather than 355 * PL Registers (as is the case for the PFs). Because 356 * these are in different coherency domains, the write 357 * to the VF's PL-register-backed Mailbox Control can 358 * race in front of the writes to the MA-backed VF 359 * Mailbox Data "registers". So we need to do a 360 * read-back on at least one byte of the VF Mailbox 361 * Data registers before doing the write to the VF 362 * Mailbox Control register. 363 */ 364 t4_read_reg(adap, data_reg); 365 } 366 367 CH_DUMP_MBOX(adap, mbox, data_reg); 368 369 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); 370 t4_read_reg(adap, ctl_reg); /* flush write */ 371 372 delay_idx = 0; 373 ms = delay[0]; 374 375 /* 376 * Loop waiting for the reply; bail out if we time out or the firmware 377 * reports an error. 378 */ 379 pcie_fw = 0; 380 for (i = 0; i < timeout; i += ms) { 381 if (!(adap->flags & IS_VF)) { 382 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 383 if (pcie_fw & F_PCIE_FW_ERR) 384 break; 385 } 386 if (sleep_ok) { 387 ms = delay[delay_idx]; /* last element may repeat */ 388 if (delay_idx < ARRAY_SIZE(delay) - 1) 389 delay_idx++; 390 msleep(ms); 391 } else { 392 mdelay(ms); 393 } 394 395 v = t4_read_reg(adap, ctl_reg); 396 if (v == X_CIM_PF_NOACCESS) 397 continue; 398 if (G_MBOWNER(v) == X_MBOWNER_PL) { 399 if (!(v & F_MBMSGVALID)) { 400 t4_write_reg(adap, ctl_reg, 401 V_MBOWNER(X_MBOWNER_NONE)); 402 continue; 403 } 404 405 /* 406 * Retrieve the command reply and release the mailbox. 407 */ 408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg); 409 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); 410 411 CH_DUMP_MBOX(adap, mbox, data_reg); 412 413 res = be64_to_cpu(cmd_rpl[0]); 414 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { 415 fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl); 416 res = V_FW_CMD_RETVAL(EIO); 417 } else if (rpl) 418 memcpy(rpl, cmd_rpl, size); 419 return -G_FW_CMD_RETVAL((int)res); 420 } 421 } 422 423 /* 424 * We timed out waiting for a reply to our mailbox command. Report 425 * the error and also check to see if the firmware reported any 426 * errors ... 427 */ 428 ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; 429 CH_ERR(adap, "command %#x in mailbox %d timed out\n", 430 *(const u8 *)cmd, mbox); 431 432 t4_report_fw_error(adap); 433 t4_fatal_err(adap); 434 return ret; 435} 436 437int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 438 void *rpl, bool sleep_ok) 439{ 440 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, 441 sleep_ok, FW_CMD_MAX_TIMEOUT); 442 443} 444 445static int t4_edc_err_read(struct adapter *adap, int idx) 446{ 447 u32 edc_ecc_err_addr_reg; 448 u32 edc_bist_status_rdata_reg; 449 450 if (is_t4(adap)) { 451 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__); 452 return 0; 453 } 454 if (idx != 0 && idx != 1) { 455 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx); 456 return 0; 457 } 458 459 edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx); 460 edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx); 461 462 CH_WARN(adap, 463 "edc%d err addr 0x%x: 0x%x.\n", 464 idx, edc_ecc_err_addr_reg, 465 t4_read_reg(adap, edc_ecc_err_addr_reg)); 466 CH_WARN(adap, 467 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n", 468 edc_bist_status_rdata_reg, 469 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg), 470 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8), 471 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16), 472 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24), 473 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32), 474 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40), 475 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48), 476 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56), 477 (unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64)); 478 479 return 0; 480} 481 482/** 483 * t4_mc_read - read from MC through backdoor accesses 484 * @adap: the adapter 485 * @idx: which MC to access 486 * @addr: address of first byte requested 487 * @data: 64 bytes of data containing the requested address 488 * @ecc: where to store the corresponding 64-bit ECC word 489 * 490 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 491 * that covers the requested address @addr. If @parity is not %NULL it 492 * is assigned the 64-bit ECC word for the read data. 493 */ 494int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 495{ 496 int i; 497 u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; 498 u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; 499 500 if (is_t4(adap)) { 501 mc_bist_cmd_reg = A_MC_BIST_CMD; 502 mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR; 503 mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN; 504 mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA; 505 mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN; 506 } else { 507 mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx); 508 mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx); 509 mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx); 510 mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA, 511 idx); 512 mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN, 513 idx); 514 } 515 516 if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST) 517 return -EBUSY; 518 t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU); 519 t4_write_reg(adap, mc_bist_cmd_len_reg, 64); 520 t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc); 521 t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) | 522 F_START_BIST | V_BIST_CMD_GAP(1)); 523 i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 524 if (i) 525 return i; 526 527#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i) 528 529 for (i = 15; i >= 0; i--) 530 *data++ = ntohl(t4_read_reg(adap, MC_DATA(i))); 531 if (ecc) 532 *ecc = t4_read_reg64(adap, MC_DATA(16)); 533#undef MC_DATA 534 return 0; 535} 536 537/** 538 * t4_edc_read - read from EDC through backdoor accesses 539 * @adap: the adapter 540 * @idx: which EDC to access 541 * @addr: address of first byte requested 542 * @data: 64 bytes of data containing the requested address 543 * @ecc: where to store the corresponding 64-bit ECC word 544 * 545 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 546 * that covers the requested address @addr. If @parity is not %NULL it 547 * is assigned the 64-bit ECC word for the read data. 548 */ 549int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 550{ 551 int i; 552 u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; 553 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; 554 555 if (is_t4(adap)) { 556 edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx); 557 edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx); 558 edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx); 559 edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN, 560 idx); 561 edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA, 562 idx); 563 } else { 564/* 565 * These macro are missing in t4_regs.h file. 566 * Added temporarily for testing. 567 */ 568#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) 569#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) 570 edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx); 571 edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx); 572 edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx); 573 edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN, 574 idx); 575 edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA, 576 idx); 577#undef EDC_REG_T5 578#undef EDC_STRIDE_T5 579 } 580 581 if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST) 582 return -EBUSY; 583 t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU); 584 t4_write_reg(adap, edc_bist_cmd_len_reg, 64); 585 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 586 t4_write_reg(adap, edc_bist_cmd_reg, 587 V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST); 588 i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1); 589 if (i) 590 return i; 591 592#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i) 593 594 for (i = 15; i >= 0; i--) 595 *data++ = ntohl(t4_read_reg(adap, EDC_DATA(i))); 596 if (ecc) 597 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 598#undef EDC_DATA 599 return 0; 600} 601 602/** 603 * t4_mem_read - read EDC 0, EDC 1 or MC into buffer 604 * @adap: the adapter 605 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 606 * @addr: address within indicated memory type 607 * @len: amount of memory to read 608 * @buf: host memory buffer 609 * 610 * Reads an [almost] arbitrary memory region in the firmware: the 611 * firmware memory address, length and host buffer must be aligned on 612 * 32-bit boudaries. The memory is returned as a raw byte sequence from 613 * the firmware's memory. If this memory contains data structures which 614 * contain multi-byte integers, it's the callers responsibility to 615 * perform appropriate byte order conversions. 616 */ 617int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len, 618 __be32 *buf) 619{ 620 u32 pos, start, end, offset; 621 int ret; 622 623 /* 624 * Argument sanity checks ... 625 */ 626 if ((addr & 0x3) || (len & 0x3)) 627 return -EINVAL; 628 629 /* 630 * The underlaying EDC/MC read routines read 64 bytes at a time so we 631 * need to round down the start and round up the end. We'll start 632 * copying out of the first line at (addr - start) a word at a time. 633 */ 634 start = addr & ~(64-1); 635 end = (addr + len + 64-1) & ~(64-1); 636 offset = (addr - start)/sizeof(__be32); 637 638 for (pos = start; pos < end; pos += 64, offset = 0) { 639 __be32 data[16]; 640 641 /* 642 * Read the chip's memory block and bail if there's an error. 643 */ 644 if ((mtype == MEM_MC) || (mtype == MEM_MC1)) 645 ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL); 646 else 647 ret = t4_edc_read(adap, mtype, pos, data, NULL); 648 if (ret) 649 return ret; 650 651 /* 652 * Copy the data into the caller's memory buffer. 653 */ 654 while (offset < 16 && len > 0) { 655 *buf++ = data[offset++]; 656 len -= sizeof(__be32); 657 } 658 } 659 660 return 0; 661} 662 663/* 664 * Return the specified PCI-E Configuration Space register from our Physical 665 * Function. We try first via a Firmware LDST Command (if fw_attach != 0) 666 * since we prefer to let the firmware own all of these registers, but if that 667 * fails we go for it directly ourselves. 668 */ 669u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach) 670{ 671 672 /* 673 * If fw_attach != 0, construct and send the Firmware LDST Command to 674 * retrieve the specified PCI-E Configuration Space register. 675 */ 676 if (drv_fw_attach != 0) { 677 struct fw_ldst_cmd ldst_cmd; 678 int ret; 679 680 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 681 ldst_cmd.op_to_addrspace = 682 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 683 F_FW_CMD_REQUEST | 684 F_FW_CMD_READ | 685 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 686 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); 687 ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1); 688 ldst_cmd.u.pcie.ctrl_to_fn = 689 (F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf)); 690 ldst_cmd.u.pcie.r = reg; 691 692 /* 693 * If the LDST Command succeeds, return the result, otherwise 694 * fall through to reading it directly ourselves ... 695 */ 696 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 697 &ldst_cmd); 698 if (ret == 0) 699 return be32_to_cpu(ldst_cmd.u.pcie.data[0]); 700 701 CH_WARN(adap, "Firmware failed to return " 702 "Configuration Space register %d, err = %d\n", 703 reg, -ret); 704 } 705 706 /* 707 * Read the desired Configuration Space register via the PCI-E 708 * Backdoor mechanism. 709 */ 710 return t4_hw_pci_read_cfg4(adap, reg); 711} 712 713/** 714 * t4_get_regs_len - return the size of the chips register set 715 * @adapter: the adapter 716 * 717 * Returns the size of the chip's BAR0 register space. 718 */ 719unsigned int t4_get_regs_len(struct adapter *adapter) 720{ 721 unsigned int chip_version = chip_id(adapter); 722 723 switch (chip_version) { 724 case CHELSIO_T4: 725 if (adapter->flags & IS_VF) 726 return FW_T4VF_REGMAP_SIZE; 727 return T4_REGMAP_SIZE; 728 729 case CHELSIO_T5: 730 case CHELSIO_T6: 731 if (adapter->flags & IS_VF) 732 return FW_T4VF_REGMAP_SIZE; 733 return T5_REGMAP_SIZE; 734 } 735 736 CH_ERR(adapter, 737 "Unsupported chip version %d\n", chip_version); 738 return 0; 739} 740 741/** 742 * t4_get_regs - read chip registers into provided buffer 743 * @adap: the adapter 744 * @buf: register buffer 745 * @buf_size: size (in bytes) of register buffer 746 * 747 * If the provided register buffer isn't large enough for the chip's 748 * full register range, the register dump will be truncated to the 749 * register buffer's size. 750 */ 751void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size) 752{ 753 static const unsigned int t4_reg_ranges[] = { 754 0x1008, 0x1108, 755 0x1180, 0x1184, 756 0x1190, 0x1194, 757 0x11a0, 0x11a4, 758 0x11b0, 0x11b4, 759 0x11fc, 0x123c, 760 0x1300, 0x173c, 761 0x1800, 0x18fc, 762 0x3000, 0x30d8, 763 0x30e0, 0x30e4, 764 0x30ec, 0x5910, 765 0x5920, 0x5924, 766 0x5960, 0x5960, 767 0x5968, 0x5968, 768 0x5970, 0x5970, 769 0x5978, 0x5978, 770 0x5980, 0x5980, 771 0x5988, 0x5988, 772 0x5990, 0x5990, 773 0x5998, 0x5998, 774 0x59a0, 0x59d4, 775 0x5a00, 0x5ae0, 776 0x5ae8, 0x5ae8, 777 0x5af0, 0x5af0, 778 0x5af8, 0x5af8, 779 0x6000, 0x6098, 780 0x6100, 0x6150, 781 0x6200, 0x6208, 782 0x6240, 0x6248, 783 0x6280, 0x62b0, 784 0x62c0, 0x6338, 785 0x6370, 0x638c, 786 0x6400, 0x643c, 787 0x6500, 0x6524, 788 0x6a00, 0x6a04, 789 0x6a14, 0x6a38, 790 0x6a60, 0x6a70, 791 0x6a78, 0x6a78, 792 0x6b00, 0x6b0c, 793 0x6b1c, 0x6b84, 794 0x6bf0, 0x6bf8, 795 0x6c00, 0x6c0c, 796 0x6c1c, 0x6c84, 797 0x6cf0, 0x6cf8, 798 0x6d00, 0x6d0c, 799 0x6d1c, 0x6d84, 800 0x6df0, 0x6df8, 801 0x6e00, 0x6e0c, 802 0x6e1c, 0x6e84, 803 0x6ef0, 0x6ef8, 804 0x6f00, 0x6f0c, 805 0x6f1c, 0x6f84, 806 0x6ff0, 0x6ff8, 807 0x7000, 0x700c, 808 0x701c, 0x7084, 809 0x70f0, 0x70f8, 810 0x7100, 0x710c, 811 0x711c, 0x7184, 812 0x71f0, 0x71f8, 813 0x7200, 0x720c, 814 0x721c, 0x7284, 815 0x72f0, 0x72f8, 816 0x7300, 0x730c, 817 0x731c, 0x7384, 818 0x73f0, 0x73f8, 819 0x7400, 0x7450, 820 0x7500, 0x7530, 821 0x7600, 0x760c, 822 0x7614, 0x761c, 823 0x7680, 0x76cc, 824 0x7700, 0x7798, 825 0x77c0, 0x77fc, 826 0x7900, 0x79fc, 827 0x7b00, 0x7b58, 828 0x7b60, 0x7b84, 829 0x7b8c, 0x7c38, 830 0x7d00, 0x7d38, 831 0x7d40, 0x7d80, 832 0x7d8c, 0x7ddc, 833 0x7de4, 0x7e04, 834 0x7e10, 0x7e1c, 835 0x7e24, 0x7e38, 836 0x7e40, 0x7e44, 837 0x7e4c, 0x7e78, 838 0x7e80, 0x7ea4, 839 0x7eac, 0x7edc, 840 0x7ee8, 0x7efc, 841 0x8dc0, 0x8e04, 842 0x8e10, 0x8e1c, 843 0x8e30, 0x8e78, 844 0x8ea0, 0x8eb8, 845 0x8ec0, 0x8f6c, 846 0x8fc0, 0x9008, 847 0x9010, 0x9058, 848 0x9060, 0x9060, 849 0x9068, 0x9074, 850 0x90fc, 0x90fc, 851 0x9400, 0x9408, 852 0x9410, 0x9458, 853 0x9600, 0x9600, 854 0x9608, 0x9638, 855 0x9640, 0x96bc, 856 0x9800, 0x9808, 857 0x9820, 0x983c, 858 0x9850, 0x9864, 859 0x9c00, 0x9c6c, 860 0x9c80, 0x9cec, 861 0x9d00, 0x9d6c, 862 0x9d80, 0x9dec, 863 0x9e00, 0x9e6c, 864 0x9e80, 0x9eec, 865 0x9f00, 0x9f6c, 866 0x9f80, 0x9fec, 867 0xd004, 0xd004, 868 0xd010, 0xd03c, 869 0xdfc0, 0xdfe0, 870 0xe000, 0xea7c, 871 0xf000, 0x11190, 872 0x19040, 0x1906c, 873 0x19078, 0x19080, 874 0x1908c, 0x190e4, 875 0x190f0, 0x190f8, 876 0x19100, 0x19110, 877 0x19120, 0x19124, 878 0x19150, 0x19194, 879 0x1919c, 0x191b0, 880 0x191d0, 0x191e8, 881 0x19238, 0x1924c, 882 0x193f8, 0x1943c, 883 0x1944c, 0x19474, 884 0x19490, 0x194e0, 885 0x194f0, 0x194f8, 886 0x19800, 0x19c08, 887 0x19c10, 0x19c90, 888 0x19ca0, 0x19ce4, 889 0x19cf0, 0x19d40, 890 0x19d50, 0x19d94, 891 0x19da0, 0x19de8, 892 0x19df0, 0x19e40, 893 0x19e50, 0x19e90, 894 0x19ea0, 0x19f4c, 895 0x1a000, 0x1a004, 896 0x1a010, 0x1a06c, 897 0x1a0b0, 0x1a0e4, 898 0x1a0ec, 0x1a0f4, 899 0x1a100, 0x1a108, 900 0x1a114, 0x1a120, 901 0x1a128, 0x1a130, 902 0x1a138, 0x1a138, 903 0x1a190, 0x1a1c4, 904 0x1a1fc, 0x1a1fc, 905 0x1e040, 0x1e04c, 906 0x1e284, 0x1e28c, 907 0x1e2c0, 0x1e2c0, 908 0x1e2e0, 0x1e2e0, 909 0x1e300, 0x1e384, 910 0x1e3c0, 0x1e3c8, 911 0x1e440, 0x1e44c, 912 0x1e684, 0x1e68c, 913 0x1e6c0, 0x1e6c0, 914 0x1e6e0, 0x1e6e0, 915 0x1e700, 0x1e784, 916 0x1e7c0, 0x1e7c8, 917 0x1e840, 0x1e84c, 918 0x1ea84, 0x1ea8c, 919 0x1eac0, 0x1eac0, 920 0x1eae0, 0x1eae0, 921 0x1eb00, 0x1eb84, 922 0x1ebc0, 0x1ebc8, 923 0x1ec40, 0x1ec4c, 924 0x1ee84, 0x1ee8c, 925 0x1eec0, 0x1eec0, 926 0x1eee0, 0x1eee0, 927 0x1ef00, 0x1ef84, 928 0x1efc0, 0x1efc8, 929 0x1f040, 0x1f04c, 930 0x1f284, 0x1f28c, 931 0x1f2c0, 0x1f2c0, 932 0x1f2e0, 0x1f2e0, 933 0x1f300, 0x1f384, 934 0x1f3c0, 0x1f3c8, 935 0x1f440, 0x1f44c, 936 0x1f684, 0x1f68c, 937 0x1f6c0, 0x1f6c0, 938 0x1f6e0, 0x1f6e0, 939 0x1f700, 0x1f784, 940 0x1f7c0, 0x1f7c8, 941 0x1f840, 0x1f84c, 942 0x1fa84, 0x1fa8c, 943 0x1fac0, 0x1fac0, 944 0x1fae0, 0x1fae0, 945 0x1fb00, 0x1fb84, 946 0x1fbc0, 0x1fbc8, 947 0x1fc40, 0x1fc4c, 948 0x1fe84, 0x1fe8c, 949 0x1fec0, 0x1fec0, 950 0x1fee0, 0x1fee0, 951 0x1ff00, 0x1ff84, 952 0x1ffc0, 0x1ffc8, 953 0x20000, 0x2002c, 954 0x20100, 0x2013c, 955 0x20190, 0x201a0, 956 0x201a8, 0x201b8, 957 0x201c4, 0x201c8, 958 0x20200, 0x20318, 959 0x20400, 0x204b4, 960 0x204c0, 0x20528, 961 0x20540, 0x20614, 962 0x21000, 0x21040, 963 0x2104c, 0x21060, 964 0x210c0, 0x210ec, 965 0x21200, 0x21268, 966 0x21270, 0x21284, 967 0x212fc, 0x21388, 968 0x21400, 0x21404, 969 0x21500, 0x21500, 970 0x21510, 0x21518, 971 0x2152c, 0x21530, 972 0x2153c, 0x2153c, 973 0x21550, 0x21554, 974 0x21600, 0x21600, 975 0x21608, 0x2161c, 976 0x21624, 0x21628, 977 0x21630, 0x21634, 978 0x2163c, 0x2163c, 979 0x21700, 0x2171c, 980 0x21780, 0x2178c, 981 0x21800, 0x21818, 982 0x21820, 0x21828, 983 0x21830, 0x21848, 984 0x21850, 0x21854, 985 0x21860, 0x21868, 986 0x21870, 0x21870, 987 0x21878, 0x21898, 988 0x218a0, 0x218a8, 989 0x218b0, 0x218c8, 990 0x218d0, 0x218d4, 991 0x218e0, 0x218e8, 992 0x218f0, 0x218f0, 993 0x218f8, 0x21a18, 994 0x21a20, 0x21a28, 995 0x21a30, 0x21a48, 996 0x21a50, 0x21a54, 997 0x21a60, 0x21a68, 998 0x21a70, 0x21a70, 999 0x21a78, 0x21a98, 1000 0x21aa0, 0x21aa8, 1001 0x21ab0, 0x21ac8, 1002 0x21ad0, 0x21ad4, 1003 0x21ae0, 0x21ae8, 1004 0x21af0, 0x21af0, 1005 0x21af8, 0x21c18, 1006 0x21c20, 0x21c20, 1007 0x21c28, 0x21c30, 1008 0x21c38, 0x21c38, 1009 0x21c80, 0x21c98, 1010 0x21ca0, 0x21ca8, 1011 0x21cb0, 0x21cc8, 1012 0x21cd0, 0x21cd4, 1013 0x21ce0, 0x21ce8, 1014 0x21cf0, 0x21cf0, 1015 0x21cf8, 0x21d7c, 1016 0x21e00, 0x21e04, 1017 0x22000, 0x2202c, 1018 0x22100, 0x2213c, 1019 0x22190, 0x221a0, 1020 0x221a8, 0x221b8, 1021 0x221c4, 0x221c8, 1022 0x22200, 0x22318, 1023 0x22400, 0x224b4, 1024 0x224c0, 0x22528, 1025 0x22540, 0x22614, 1026 0x23000, 0x23040, 1027 0x2304c, 0x23060, 1028 0x230c0, 0x230ec, 1029 0x23200, 0x23268, 1030 0x23270, 0x23284, 1031 0x232fc, 0x23388, 1032 0x23400, 0x23404, 1033 0x23500, 0x23500, 1034 0x23510, 0x23518, 1035 0x2352c, 0x23530, 1036 0x2353c, 0x2353c, 1037 0x23550, 0x23554, 1038 0x23600, 0x23600, 1039 0x23608, 0x2361c, 1040 0x23624, 0x23628, 1041 0x23630, 0x23634, 1042 0x2363c, 0x2363c, 1043 0x23700, 0x2371c, 1044 0x23780, 0x2378c, 1045 0x23800, 0x23818, 1046 0x23820, 0x23828, 1047 0x23830, 0x23848, 1048 0x23850, 0x23854, 1049 0x23860, 0x23868, 1050 0x23870, 0x23870, 1051 0x23878, 0x23898, 1052 0x238a0, 0x238a8, 1053 0x238b0, 0x238c8, 1054 0x238d0, 0x238d4, 1055 0x238e0, 0x238e8, 1056 0x238f0, 0x238f0, 1057 0x238f8, 0x23a18, 1058 0x23a20, 0x23a28, 1059 0x23a30, 0x23a48, 1060 0x23a50, 0x23a54, 1061 0x23a60, 0x23a68, 1062 0x23a70, 0x23a70, 1063 0x23a78, 0x23a98, 1064 0x23aa0, 0x23aa8, 1065 0x23ab0, 0x23ac8, 1066 0x23ad0, 0x23ad4, 1067 0x23ae0, 0x23ae8, 1068 0x23af0, 0x23af0, 1069 0x23af8, 0x23c18, 1070 0x23c20, 0x23c20, 1071 0x23c28, 0x23c30, 1072 0x23c38, 0x23c38, 1073 0x23c80, 0x23c98, 1074 0x23ca0, 0x23ca8, 1075 0x23cb0, 0x23cc8, 1076 0x23cd0, 0x23cd4, 1077 0x23ce0, 0x23ce8, 1078 0x23cf0, 0x23cf0, 1079 0x23cf8, 0x23d7c, 1080 0x23e00, 0x23e04, 1081 0x24000, 0x2402c, 1082 0x24100, 0x2413c, 1083 0x24190, 0x241a0, 1084 0x241a8, 0x241b8, 1085 0x241c4, 0x241c8, 1086 0x24200, 0x24318, 1087 0x24400, 0x244b4, 1088 0x244c0, 0x24528, 1089 0x24540, 0x24614, 1090 0x25000, 0x25040, 1091 0x2504c, 0x25060, 1092 0x250c0, 0x250ec, 1093 0x25200, 0x25268, 1094 0x25270, 0x25284, 1095 0x252fc, 0x25388, 1096 0x25400, 0x25404, 1097 0x25500, 0x25500, 1098 0x25510, 0x25518, 1099 0x2552c, 0x25530, 1100 0x2553c, 0x2553c, 1101 0x25550, 0x25554, 1102 0x25600, 0x25600, 1103 0x25608, 0x2561c, 1104 0x25624, 0x25628, 1105 0x25630, 0x25634, 1106 0x2563c, 0x2563c, 1107 0x25700, 0x2571c, 1108 0x25780, 0x2578c, 1109 0x25800, 0x25818, 1110 0x25820, 0x25828, 1111 0x25830, 0x25848, 1112 0x25850, 0x25854, 1113 0x25860, 0x25868, 1114 0x25870, 0x25870, 1115 0x25878, 0x25898, 1116 0x258a0, 0x258a8, 1117 0x258b0, 0x258c8, 1118 0x258d0, 0x258d4, 1119 0x258e0, 0x258e8, 1120 0x258f0, 0x258f0, 1121 0x258f8, 0x25a18, 1122 0x25a20, 0x25a28, 1123 0x25a30, 0x25a48, 1124 0x25a50, 0x25a54, 1125 0x25a60, 0x25a68, 1126 0x25a70, 0x25a70, 1127 0x25a78, 0x25a98, 1128 0x25aa0, 0x25aa8, 1129 0x25ab0, 0x25ac8, 1130 0x25ad0, 0x25ad4, 1131 0x25ae0, 0x25ae8, 1132 0x25af0, 0x25af0, 1133 0x25af8, 0x25c18, 1134 0x25c20, 0x25c20, 1135 0x25c28, 0x25c30, 1136 0x25c38, 0x25c38, 1137 0x25c80, 0x25c98, 1138 0x25ca0, 0x25ca8, 1139 0x25cb0, 0x25cc8, 1140 0x25cd0, 0x25cd4, 1141 0x25ce0, 0x25ce8, 1142 0x25cf0, 0x25cf0, 1143 0x25cf8, 0x25d7c, 1144 0x25e00, 0x25e04, 1145 0x26000, 0x2602c, 1146 0x26100, 0x2613c, 1147 0x26190, 0x261a0, 1148 0x261a8, 0x261b8, 1149 0x261c4, 0x261c8, 1150 0x26200, 0x26318, 1151 0x26400, 0x264b4, 1152 0x264c0, 0x26528, 1153 0x26540, 0x26614, 1154 0x27000, 0x27040, 1155 0x2704c, 0x27060, 1156 0x270c0, 0x270ec, 1157 0x27200, 0x27268, 1158 0x27270, 0x27284, 1159 0x272fc, 0x27388, 1160 0x27400, 0x27404, 1161 0x27500, 0x27500, 1162 0x27510, 0x27518, 1163 0x2752c, 0x27530, 1164 0x2753c, 0x2753c, 1165 0x27550, 0x27554, 1166 0x27600, 0x27600, 1167 0x27608, 0x2761c, 1168 0x27624, 0x27628, 1169 0x27630, 0x27634, 1170 0x2763c, 0x2763c, 1171 0x27700, 0x2771c, 1172 0x27780, 0x2778c, 1173 0x27800, 0x27818, 1174 0x27820, 0x27828, 1175 0x27830, 0x27848, 1176 0x27850, 0x27854, 1177 0x27860, 0x27868, 1178 0x27870, 0x27870, 1179 0x27878, 0x27898, 1180 0x278a0, 0x278a8, 1181 0x278b0, 0x278c8, 1182 0x278d0, 0x278d4, 1183 0x278e0, 0x278e8, 1184 0x278f0, 0x278f0, 1185 0x278f8, 0x27a18, 1186 0x27a20, 0x27a28, 1187 0x27a30, 0x27a48, 1188 0x27a50, 0x27a54, 1189 0x27a60, 0x27a68, 1190 0x27a70, 0x27a70, 1191 0x27a78, 0x27a98, 1192 0x27aa0, 0x27aa8, 1193 0x27ab0, 0x27ac8, 1194 0x27ad0, 0x27ad4, 1195 0x27ae0, 0x27ae8, 1196 0x27af0, 0x27af0, 1197 0x27af8, 0x27c18, 1198 0x27c20, 0x27c20, 1199 0x27c28, 0x27c30, 1200 0x27c38, 0x27c38, 1201 0x27c80, 0x27c98, 1202 0x27ca0, 0x27ca8, 1203 0x27cb0, 0x27cc8, 1204 0x27cd0, 0x27cd4, 1205 0x27ce0, 0x27ce8, 1206 0x27cf0, 0x27cf0, 1207 0x27cf8, 0x27d7c, 1208 0x27e00, 0x27e04, 1209 }; 1210 1211 static const unsigned int t4vf_reg_ranges[] = { 1212 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 1213 VF_MPS_REG(A_MPS_VF_CTL), 1214 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 1215 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI), 1216 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 1217 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 1218 FW_T4VF_MBDATA_BASE_ADDR, 1219 FW_T4VF_MBDATA_BASE_ADDR + 1220 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 1221 }; 1222 1223 static const unsigned int t5_reg_ranges[] = { 1224 0x1008, 0x10c0, 1225 0x10cc, 0x10f8, 1226 0x1100, 0x1100, 1227 0x110c, 0x1148, 1228 0x1180, 0x1184, 1229 0x1190, 0x1194, 1230 0x11a0, 0x11a4, 1231 0x11b0, 0x11b4, 1232 0x11fc, 0x123c, 1233 0x1280, 0x173c, 1234 0x1800, 0x18fc, 1235 0x3000, 0x3028, 1236 0x3060, 0x30b0, 1237 0x30b8, 0x30d8, 1238 0x30e0, 0x30fc, 1239 0x3140, 0x357c, 1240 0x35a8, 0x35cc, 1241 0x35ec, 0x35ec, 1242 0x3600, 0x5624, 1243 0x56cc, 0x56ec, 1244 0x56f4, 0x5720, 1245 0x5728, 0x575c, 1246 0x580c, 0x5814, 1247 0x5890, 0x589c, 1248 0x58a4, 0x58ac, 1249 0x58b8, 0x58bc, 1250 0x5940, 0x59c8, 1251 0x59d0, 0x59dc, 1252 0x59fc, 0x5a18, 1253 0x5a60, 0x5a70, 1254 0x5a80, 0x5a9c, 1255 0x5b94, 0x5bfc, 1256 0x6000, 0x6020, 1257 0x6028, 0x6040, 1258 0x6058, 0x609c, 1259 0x60a8, 0x614c, 1260 0x7700, 0x7798, 1261 0x77c0, 0x78fc, 1262 0x7b00, 0x7b58, 1263 0x7b60, 0x7b84, 1264 0x7b8c, 0x7c54, 1265 0x7d00, 0x7d38, 1266 0x7d40, 0x7d80, 1267 0x7d8c, 0x7ddc, 1268 0x7de4, 0x7e04, 1269 0x7e10, 0x7e1c, 1270 0x7e24, 0x7e38, 1271 0x7e40, 0x7e44, 1272 0x7e4c, 0x7e78, 1273 0x7e80, 0x7edc, 1274 0x7ee8, 0x7efc, 1275 0x8dc0, 0x8de0, 1276 0x8df8, 0x8e04, 1277 0x8e10, 0x8e84, 1278 0x8ea0, 0x8f84, 1279 0x8fc0, 0x9058, 1280 0x9060, 0x9060, 1281 0x9068, 0x90f8, 1282 0x9400, 0x9408, 1283 0x9410, 0x9470, 1284 0x9600, 0x9600, 1285 0x9608, 0x9638, 1286 0x9640, 0x96f4, 1287 0x9800, 0x9808, 1288 0x9820, 0x983c, 1289 0x9850, 0x9864, 1290 0x9c00, 0x9c6c, 1291 0x9c80, 0x9cec, 1292 0x9d00, 0x9d6c, 1293 0x9d80, 0x9dec, 1294 0x9e00, 0x9e6c, 1295 0x9e80, 0x9eec, 1296 0x9f00, 0x9f6c, 1297 0x9f80, 0xa020, 1298 0xd004, 0xd004, 1299 0xd010, 0xd03c, 1300 0xdfc0, 0xdfe0, 1301 0xe000, 0x1106c, 1302 0x11074, 0x11088, 1303 0x1109c, 0x1117c, 1304 0x11190, 0x11204, 1305 0x19040, 0x1906c, 1306 0x19078, 0x19080, 1307 0x1908c, 0x190e8, 1308 0x190f0, 0x190f8, 1309 0x19100, 0x19110, 1310 0x19120, 0x19124, 1311 0x19150, 0x19194, 1312 0x1919c, 0x191b0, 1313 0x191d0, 0x191e8, 1314 0x19238, 0x19290, 1315 0x193f8, 0x19428, 1316 0x19430, 0x19444, 1317 0x1944c, 0x1946c, 1318 0x19474, 0x19474, 1319 0x19490, 0x194cc, 1320 0x194f0, 0x194f8, 1321 0x19c00, 0x19c08, 1322 0x19c10, 0x19c60, 1323 0x19c94, 0x19ce4, 1324 0x19cf0, 0x19d40, 1325 0x19d50, 0x19d94, 1326 0x19da0, 0x19de8, 1327 0x19df0, 0x19e10, 1328 0x19e50, 0x19e90, 1329 0x19ea0, 0x19f24, 1330 0x19f34, 0x19f34, 1331 0x19f40, 0x19f50, 1332 0x19f90, 0x19fb4, 1333 0x19fc4, 0x19fe4, 1334 0x1a000, 0x1a004, 1335 0x1a010, 0x1a06c, 1336 0x1a0b0, 0x1a0e4, 1337 0x1a0ec, 0x1a0f8, 1338 0x1a100, 0x1a108, 1339 0x1a114, 0x1a120, 1340 0x1a128, 0x1a130, 1341 0x1a138, 0x1a138, 1342 0x1a190, 0x1a1c4, 1343 0x1a1fc, 0x1a1fc, 1344 0x1e008, 0x1e00c, 1345 0x1e040, 0x1e044, 1346 0x1e04c, 0x1e04c, 1347 0x1e284, 0x1e290, 1348 0x1e2c0, 0x1e2c0, 1349 0x1e2e0, 0x1e2e0, 1350 0x1e300, 0x1e384, 1351 0x1e3c0, 0x1e3c8, 1352 0x1e408, 0x1e40c, 1353 0x1e440, 0x1e444, 1354 0x1e44c, 0x1e44c, 1355 0x1e684, 0x1e690, 1356 0x1e6c0, 0x1e6c0, 1357 0x1e6e0, 0x1e6e0, 1358 0x1e700, 0x1e784, 1359 0x1e7c0, 0x1e7c8, 1360 0x1e808, 0x1e80c, 1361 0x1e840, 0x1e844, 1362 0x1e84c, 0x1e84c, 1363 0x1ea84, 0x1ea90, 1364 0x1eac0, 0x1eac0, 1365 0x1eae0, 0x1eae0, 1366 0x1eb00, 0x1eb84, 1367 0x1ebc0, 0x1ebc8, 1368 0x1ec08, 0x1ec0c, 1369 0x1ec40, 0x1ec44, 1370 0x1ec4c, 0x1ec4c, 1371 0x1ee84, 0x1ee90, 1372 0x1eec0, 0x1eec0, 1373 0x1eee0, 0x1eee0, 1374 0x1ef00, 0x1ef84, 1375 0x1efc0, 0x1efc8, 1376 0x1f008, 0x1f00c, 1377 0x1f040, 0x1f044, 1378 0x1f04c, 0x1f04c, 1379 0x1f284, 0x1f290, 1380 0x1f2c0, 0x1f2c0, 1381 0x1f2e0, 0x1f2e0, 1382 0x1f300, 0x1f384, 1383 0x1f3c0, 0x1f3c8, 1384 0x1f408, 0x1f40c, 1385 0x1f440, 0x1f444, 1386 0x1f44c, 0x1f44c, 1387 0x1f684, 0x1f690, 1388 0x1f6c0, 0x1f6c0, 1389 0x1f6e0, 0x1f6e0, 1390 0x1f700, 0x1f784, 1391 0x1f7c0, 0x1f7c8, 1392 0x1f808, 0x1f80c, 1393 0x1f840, 0x1f844, 1394 0x1f84c, 0x1f84c, 1395 0x1fa84, 0x1fa90, 1396 0x1fac0, 0x1fac0, 1397 0x1fae0, 0x1fae0, 1398 0x1fb00, 0x1fb84, 1399 0x1fbc0, 0x1fbc8, 1400 0x1fc08, 0x1fc0c, 1401 0x1fc40, 0x1fc44, 1402 0x1fc4c, 0x1fc4c, 1403 0x1fe84, 0x1fe90, 1404 0x1fec0, 0x1fec0, 1405 0x1fee0, 0x1fee0, 1406 0x1ff00, 0x1ff84, 1407 0x1ffc0, 0x1ffc8, 1408 0x30000, 0x30030, 1409 0x30038, 0x30038, 1410 0x30040, 0x30040, 1411 0x30100, 0x30144, 1412 0x30190, 0x301a0, 1413 0x301a8, 0x301b8, 1414 0x301c4, 0x301c8, 1415 0x301d0, 0x301d0, 1416 0x30200, 0x30318, 1417 0x30400, 0x304b4, 1418 0x304c0, 0x3052c, 1419 0x30540, 0x3061c, 1420 0x30800, 0x30828, 1421 0x30834, 0x30834, 1422 0x308c0, 0x30908, 1423 0x30910, 0x309ac, 1424 0x30a00, 0x30a14, 1425 0x30a1c, 0x30a2c, 1426 0x30a44, 0x30a50, 1427 0x30a74, 0x30a74, 1428 0x30a7c, 0x30afc, 1429 0x30b08, 0x30c24, 1430 0x30d00, 0x30d00, 1431 0x30d08, 0x30d14, 1432 0x30d1c, 0x30d20, 1433 0x30d3c, 0x30d3c, 1434 0x30d48, 0x30d50, 1435 0x31200, 0x3120c, 1436 0x31220, 0x31220, 1437 0x31240, 0x31240, 1438 0x31600, 0x3160c, 1439 0x31a00, 0x31a1c, 1440 0x31e00, 0x31e20, 1441 0x31e38, 0x31e3c, 1442 0x31e80, 0x31e80, 1443 0x31e88, 0x31ea8, 1444 0x31eb0, 0x31eb4, 1445 0x31ec8, 0x31ed4, 1446 0x31fb8, 0x32004, 1447 0x32200, 0x32200, 1448 0x32208, 0x32240, 1449 0x32248, 0x32280, 1450 0x32288, 0x322c0, 1451 0x322c8, 0x322fc, 1452 0x32600, 0x32630, 1453 0x32a00, 0x32abc, 1454 0x32b00, 0x32b10, 1455 0x32b20, 0x32b30, 1456 0x32b40, 0x32b50, 1457 0x32b60, 0x32b70, 1458 0x33000, 0x33028, 1459 0x33030, 0x33048, 1460 0x33060, 0x33068, 1461 0x33070, 0x3309c, 1462 0x330f0, 0x33128, 1463 0x33130, 0x33148, 1464 0x33160, 0x33168, 1465 0x33170, 0x3319c, 1466 0x331f0, 0x33238, 1467 0x33240, 0x33240, 1468 0x33248, 0x33250, 1469 0x3325c, 0x33264, 1470 0x33270, 0x332b8, 1471 0x332c0, 0x332e4, 1472 0x332f8, 0x33338, 1473 0x33340, 0x33340, 1474 0x33348, 0x33350, 1475 0x3335c, 0x33364, 1476 0x33370, 0x333b8, 1477 0x333c0, 0x333e4, 1478 0x333f8, 0x33428, 1479 0x33430, 0x33448, 1480 0x33460, 0x33468, 1481 0x33470, 0x3349c, 1482 0x334f0, 0x33528, 1483 0x33530, 0x33548, 1484 0x33560, 0x33568, 1485 0x33570, 0x3359c, 1486 0x335f0, 0x33638, 1487 0x33640, 0x33640, 1488 0x33648, 0x33650, 1489 0x3365c, 0x33664, 1490 0x33670, 0x336b8, 1491 0x336c0, 0x336e4, 1492 0x336f8, 0x33738, 1493 0x33740, 0x33740, 1494 0x33748, 0x33750, 1495 0x3375c, 0x33764, 1496 0x33770, 0x337b8, 1497 0x337c0, 0x337e4, 1498 0x337f8, 0x337fc, 1499 0x33814, 0x33814, 1500 0x3382c, 0x3382c, 1501 0x33880, 0x3388c, 1502 0x338e8, 0x338ec, 1503 0x33900, 0x33928, 1504 0x33930, 0x33948, 1505 0x33960, 0x33968, 1506 0x33970, 0x3399c, 1507 0x339f0, 0x33a38, 1508 0x33a40, 0x33a40, 1509 0x33a48, 0x33a50, 1510 0x33a5c, 0x33a64, 1511 0x33a70, 0x33ab8, 1512 0x33ac0, 0x33ae4, 1513 0x33af8, 0x33b10, 1514 0x33b28, 0x33b28, 1515 0x33b3c, 0x33b50, 1516 0x33bf0, 0x33c10, 1517 0x33c28, 0x33c28, 1518 0x33c3c, 0x33c50, 1519 0x33cf0, 0x33cfc, 1520 0x34000, 0x34030, 1521 0x34038, 0x34038, 1522 0x34040, 0x34040, 1523 0x34100, 0x34144, 1524 0x34190, 0x341a0, 1525 0x341a8, 0x341b8, 1526 0x341c4, 0x341c8, 1527 0x341d0, 0x341d0, 1528 0x34200, 0x34318, 1529 0x34400, 0x344b4, 1530 0x344c0, 0x3452c, 1531 0x34540, 0x3461c, 1532 0x34800, 0x34828, 1533 0x34834, 0x34834, 1534 0x348c0, 0x34908, 1535 0x34910, 0x349ac, 1536 0x34a00, 0x34a14, 1537 0x34a1c, 0x34a2c, 1538 0x34a44, 0x34a50, 1539 0x34a74, 0x34a74, 1540 0x34a7c, 0x34afc, 1541 0x34b08, 0x34c24, 1542 0x34d00, 0x34d00, 1543 0x34d08, 0x34d14, 1544 0x34d1c, 0x34d20, 1545 0x34d3c, 0x34d3c, 1546 0x34d48, 0x34d50, 1547 0x35200, 0x3520c, 1548 0x35220, 0x35220, 1549 0x35240, 0x35240, 1550 0x35600, 0x3560c, 1551 0x35a00, 0x35a1c, 1552 0x35e00, 0x35e20, 1553 0x35e38, 0x35e3c, 1554 0x35e80, 0x35e80, 1555 0x35e88, 0x35ea8, 1556 0x35eb0, 0x35eb4, 1557 0x35ec8, 0x35ed4, 1558 0x35fb8, 0x36004, 1559 0x36200, 0x36200, 1560 0x36208, 0x36240, 1561 0x36248, 0x36280, 1562 0x36288, 0x362c0, 1563 0x362c8, 0x362fc, 1564 0x36600, 0x36630, 1565 0x36a00, 0x36abc, 1566 0x36b00, 0x36b10, 1567 0x36b20, 0x36b30, 1568 0x36b40, 0x36b50, 1569 0x36b60, 0x36b70, 1570 0x37000, 0x37028, 1571 0x37030, 0x37048, 1572 0x37060, 0x37068, 1573 0x37070, 0x3709c, 1574 0x370f0, 0x37128, 1575 0x37130, 0x37148, 1576 0x37160, 0x37168, 1577 0x37170, 0x3719c, 1578 0x371f0, 0x37238, 1579 0x37240, 0x37240, 1580 0x37248, 0x37250, 1581 0x3725c, 0x37264, 1582 0x37270, 0x372b8, 1583 0x372c0, 0x372e4, 1584 0x372f8, 0x37338, 1585 0x37340, 0x37340, 1586 0x37348, 0x37350, 1587 0x3735c, 0x37364, 1588 0x37370, 0x373b8, 1589 0x373c0, 0x373e4, 1590 0x373f8, 0x37428, 1591 0x37430, 0x37448, 1592 0x37460, 0x37468, 1593 0x37470, 0x3749c, 1594 0x374f0, 0x37528, 1595 0x37530, 0x37548, 1596 0x37560, 0x37568, 1597 0x37570, 0x3759c, 1598 0x375f0, 0x37638, 1599 0x37640, 0x37640, 1600 0x37648, 0x37650, 1601 0x3765c, 0x37664, 1602 0x37670, 0x376b8, 1603 0x376c0, 0x376e4, 1604 0x376f8, 0x37738, 1605 0x37740, 0x37740, 1606 0x37748, 0x37750, 1607 0x3775c, 0x37764, 1608 0x37770, 0x377b8, 1609 0x377c0, 0x377e4, 1610 0x377f8, 0x377fc, 1611 0x37814, 0x37814, 1612 0x3782c, 0x3782c, 1613 0x37880, 0x3788c, 1614 0x378e8, 0x378ec, 1615 0x37900, 0x37928, 1616 0x37930, 0x37948, 1617 0x37960, 0x37968, 1618 0x37970, 0x3799c, 1619 0x379f0, 0x37a38, 1620 0x37a40, 0x37a40, 1621 0x37a48, 0x37a50, 1622 0x37a5c, 0x37a64, 1623 0x37a70, 0x37ab8, 1624 0x37ac0, 0x37ae4, 1625 0x37af8, 0x37b10, 1626 0x37b28, 0x37b28, 1627 0x37b3c, 0x37b50, 1628 0x37bf0, 0x37c10, 1629 0x37c28, 0x37c28, 1630 0x37c3c, 0x37c50, 1631 0x37cf0, 0x37cfc, 1632 0x38000, 0x38030, 1633 0x38038, 0x38038, 1634 0x38040, 0x38040, 1635 0x38100, 0x38144, 1636 0x38190, 0x381a0, 1637 0x381a8, 0x381b8, 1638 0x381c4, 0x381c8, 1639 0x381d0, 0x381d0, 1640 0x38200, 0x38318, 1641 0x38400, 0x384b4, 1642 0x384c0, 0x3852c, 1643 0x38540, 0x3861c, 1644 0x38800, 0x38828, 1645 0x38834, 0x38834, 1646 0x388c0, 0x38908, 1647 0x38910, 0x389ac, 1648 0x38a00, 0x38a14, 1649 0x38a1c, 0x38a2c, 1650 0x38a44, 0x38a50, 1651 0x38a74, 0x38a74, 1652 0x38a7c, 0x38afc, 1653 0x38b08, 0x38c24, 1654 0x38d00, 0x38d00, 1655 0x38d08, 0x38d14, 1656 0x38d1c, 0x38d20, 1657 0x38d3c, 0x38d3c, 1658 0x38d48, 0x38d50, 1659 0x39200, 0x3920c, 1660 0x39220, 0x39220, 1661 0x39240, 0x39240, 1662 0x39600, 0x3960c, 1663 0x39a00, 0x39a1c, 1664 0x39e00, 0x39e20, 1665 0x39e38, 0x39e3c, 1666 0x39e80, 0x39e80, 1667 0x39e88, 0x39ea8, 1668 0x39eb0, 0x39eb4, 1669 0x39ec8, 0x39ed4, 1670 0x39fb8, 0x3a004, 1671 0x3a200, 0x3a200, 1672 0x3a208, 0x3a240, 1673 0x3a248, 0x3a280, 1674 0x3a288, 0x3a2c0, 1675 0x3a2c8, 0x3a2fc, 1676 0x3a600, 0x3a630, 1677 0x3aa00, 0x3aabc, 1678 0x3ab00, 0x3ab10, 1679 0x3ab20, 0x3ab30, 1680 0x3ab40, 0x3ab50, 1681 0x3ab60, 0x3ab70, 1682 0x3b000, 0x3b028, 1683 0x3b030, 0x3b048, 1684 0x3b060, 0x3b068, 1685 0x3b070, 0x3b09c, 1686 0x3b0f0, 0x3b128, 1687 0x3b130, 0x3b148, 1688 0x3b160, 0x3b168, 1689 0x3b170, 0x3b19c, 1690 0x3b1f0, 0x3b238, 1691 0x3b240, 0x3b240, 1692 0x3b248, 0x3b250, 1693 0x3b25c, 0x3b264, 1694 0x3b270, 0x3b2b8, 1695 0x3b2c0, 0x3b2e4, 1696 0x3b2f8, 0x3b338, 1697 0x3b340, 0x3b340, 1698 0x3b348, 0x3b350, 1699 0x3b35c, 0x3b364, 1700 0x3b370, 0x3b3b8, 1701 0x3b3c0, 0x3b3e4, 1702 0x3b3f8, 0x3b428, 1703 0x3b430, 0x3b448, 1704 0x3b460, 0x3b468, 1705 0x3b470, 0x3b49c, 1706 0x3b4f0, 0x3b528, 1707 0x3b530, 0x3b548, 1708 0x3b560, 0x3b568, 1709 0x3b570, 0x3b59c, 1710 0x3b5f0, 0x3b638, 1711 0x3b640, 0x3b640, 1712 0x3b648, 0x3b650, 1713 0x3b65c, 0x3b664, 1714 0x3b670, 0x3b6b8, 1715 0x3b6c0, 0x3b6e4, 1716 0x3b6f8, 0x3b738, 1717 0x3b740, 0x3b740, 1718 0x3b748, 0x3b750, 1719 0x3b75c, 0x3b764, 1720 0x3b770, 0x3b7b8, 1721 0x3b7c0, 0x3b7e4, 1722 0x3b7f8, 0x3b7fc, 1723 0x3b814, 0x3b814, 1724 0x3b82c, 0x3b82c, 1725 0x3b880, 0x3b88c, 1726 0x3b8e8, 0x3b8ec, 1727 0x3b900, 0x3b928, 1728 0x3b930, 0x3b948, 1729 0x3b960, 0x3b968, 1730 0x3b970, 0x3b99c, 1731 0x3b9f0, 0x3ba38, 1732 0x3ba40, 0x3ba40, 1733 0x3ba48, 0x3ba50, 1734 0x3ba5c, 0x3ba64, 1735 0x3ba70, 0x3bab8, 1736 0x3bac0, 0x3bae4, 1737 0x3baf8, 0x3bb10, 1738 0x3bb28, 0x3bb28, 1739 0x3bb3c, 0x3bb50, 1740 0x3bbf0, 0x3bc10, 1741 0x3bc28, 0x3bc28, 1742 0x3bc3c, 0x3bc50, 1743 0x3bcf0, 0x3bcfc, 1744 0x3c000, 0x3c030, 1745 0x3c038, 0x3c038, 1746 0x3c040, 0x3c040, 1747 0x3c100, 0x3c144, 1748 0x3c190, 0x3c1a0, 1749 0x3c1a8, 0x3c1b8, 1750 0x3c1c4, 0x3c1c8, 1751 0x3c1d0, 0x3c1d0, 1752 0x3c200, 0x3c318, 1753 0x3c400, 0x3c4b4, 1754 0x3c4c0, 0x3c52c, 1755 0x3c540, 0x3c61c, 1756 0x3c800, 0x3c828, 1757 0x3c834, 0x3c834, 1758 0x3c8c0, 0x3c908, 1759 0x3c910, 0x3c9ac, 1760 0x3ca00, 0x3ca14, 1761 0x3ca1c, 0x3ca2c, 1762 0x3ca44, 0x3ca50, 1763 0x3ca74, 0x3ca74, 1764 0x3ca7c, 0x3cafc, 1765 0x3cb08, 0x3cc24, 1766 0x3cd00, 0x3cd00, 1767 0x3cd08, 0x3cd14, 1768 0x3cd1c, 0x3cd20, 1769 0x3cd3c, 0x3cd3c, 1770 0x3cd48, 0x3cd50, 1771 0x3d200, 0x3d20c, 1772 0x3d220, 0x3d220, 1773 0x3d240, 0x3d240, 1774 0x3d600, 0x3d60c, 1775 0x3da00, 0x3da1c, 1776 0x3de00, 0x3de20, 1777 0x3de38, 0x3de3c, 1778 0x3de80, 0x3de80, 1779 0x3de88, 0x3dea8, 1780 0x3deb0, 0x3deb4, 1781 0x3dec8, 0x3ded4, 1782 0x3dfb8, 0x3e004, 1783 0x3e200, 0x3e200, 1784 0x3e208, 0x3e240, 1785 0x3e248, 0x3e280, 1786 0x3e288, 0x3e2c0, 1787 0x3e2c8, 0x3e2fc, 1788 0x3e600, 0x3e630, 1789 0x3ea00, 0x3eabc, 1790 0x3eb00, 0x3eb10, 1791 0x3eb20, 0x3eb30, 1792 0x3eb40, 0x3eb50, 1793 0x3eb60, 0x3eb70, 1794 0x3f000, 0x3f028, 1795 0x3f030, 0x3f048, 1796 0x3f060, 0x3f068, 1797 0x3f070, 0x3f09c, 1798 0x3f0f0, 0x3f128, 1799 0x3f130, 0x3f148, 1800 0x3f160, 0x3f168, 1801 0x3f170, 0x3f19c, 1802 0x3f1f0, 0x3f238, 1803 0x3f240, 0x3f240, 1804 0x3f248, 0x3f250, 1805 0x3f25c, 0x3f264, 1806 0x3f270, 0x3f2b8, 1807 0x3f2c0, 0x3f2e4, 1808 0x3f2f8, 0x3f338, 1809 0x3f340, 0x3f340, 1810 0x3f348, 0x3f350, 1811 0x3f35c, 0x3f364, 1812 0x3f370, 0x3f3b8, 1813 0x3f3c0, 0x3f3e4, 1814 0x3f3f8, 0x3f428, 1815 0x3f430, 0x3f448, 1816 0x3f460, 0x3f468, 1817 0x3f470, 0x3f49c, 1818 0x3f4f0, 0x3f528, 1819 0x3f530, 0x3f548, 1820 0x3f560, 0x3f568, 1821 0x3f570, 0x3f59c, 1822 0x3f5f0, 0x3f638, 1823 0x3f640, 0x3f640, 1824 0x3f648, 0x3f650, 1825 0x3f65c, 0x3f664, 1826 0x3f670, 0x3f6b8, 1827 0x3f6c0, 0x3f6e4, 1828 0x3f6f8, 0x3f738, 1829 0x3f740, 0x3f740, 1830 0x3f748, 0x3f750, 1831 0x3f75c, 0x3f764, 1832 0x3f770, 0x3f7b8, 1833 0x3f7c0, 0x3f7e4, 1834 0x3f7f8, 0x3f7fc, 1835 0x3f814, 0x3f814, 1836 0x3f82c, 0x3f82c, 1837 0x3f880, 0x3f88c, 1838 0x3f8e8, 0x3f8ec, 1839 0x3f900, 0x3f928, 1840 0x3f930, 0x3f948, 1841 0x3f960, 0x3f968, 1842 0x3f970, 0x3f99c, 1843 0x3f9f0, 0x3fa38, 1844 0x3fa40, 0x3fa40, 1845 0x3fa48, 0x3fa50, 1846 0x3fa5c, 0x3fa64, 1847 0x3fa70, 0x3fab8, 1848 0x3fac0, 0x3fae4, 1849 0x3faf8, 0x3fb10, 1850 0x3fb28, 0x3fb28, 1851 0x3fb3c, 0x3fb50, 1852 0x3fbf0, 0x3fc10, 1853 0x3fc28, 0x3fc28, 1854 0x3fc3c, 0x3fc50, 1855 0x3fcf0, 0x3fcfc, 1856 0x40000, 0x4000c, 1857 0x40040, 0x40050, 1858 0x40060, 0x40068, 1859 0x4007c, 0x4008c, 1860 0x40094, 0x400b0, 1861 0x400c0, 0x40144, 1862 0x40180, 0x4018c, 1863 0x40200, 0x40254, 1864 0x40260, 0x40264, 1865 0x40270, 0x40288, 1866 0x40290, 0x40298, 1867 0x402ac, 0x402c8, 1868 0x402d0, 0x402e0, 1869 0x402f0, 0x402f0, 1870 0x40300, 0x4033c, 1871 0x403f8, 0x403fc, 1872 0x41304, 0x413c4, 1873 0x41400, 0x4140c, 1874 0x41414, 0x4141c, 1875 0x41480, 0x414d0, 1876 0x44000, 0x44054, 1877 0x4405c, 0x44078, 1878 0x440c0, 0x44174, 1879 0x44180, 0x441ac, 1880 0x441b4, 0x441b8, 1881 0x441c0, 0x44254, 1882 0x4425c, 0x44278, 1883 0x442c0, 0x44374, 1884 0x44380, 0x443ac, 1885 0x443b4, 0x443b8, 1886 0x443c0, 0x44454, 1887 0x4445c, 0x44478, 1888 0x444c0, 0x44574, 1889 0x44580, 0x445ac, 1890 0x445b4, 0x445b8, 1891 0x445c0, 0x44654, 1892 0x4465c, 0x44678, 1893 0x446c0, 0x44774, 1894 0x44780, 0x447ac, 1895 0x447b4, 0x447b8, 1896 0x447c0, 0x44854, 1897 0x4485c, 0x44878, 1898 0x448c0, 0x44974, 1899 0x44980, 0x449ac, 1900 0x449b4, 0x449b8, 1901 0x449c0, 0x449fc, 1902 0x45000, 0x45004, 1903 0x45010, 0x45030, 1904 0x45040, 0x45060, 1905 0x45068, 0x45068, 1906 0x45080, 0x45084, 1907 0x450a0, 0x450b0, 1908 0x45200, 0x45204, 1909 0x45210, 0x45230, 1910 0x45240, 0x45260, 1911 0x45268, 0x45268, 1912 0x45280, 0x45284, 1913 0x452a0, 0x452b0, 1914 0x460c0, 0x460e4, 1915 0x47000, 0x4703c, 1916 0x47044, 0x4708c, 1917 0x47200, 0x47250, 1918 0x47400, 0x47408, 1919 0x47414, 0x47420, 1920 0x47600, 0x47618, 1921 0x47800, 0x47814, 1922 0x48000, 0x4800c, 1923 0x48040, 0x48050, 1924 0x48060, 0x48068, 1925 0x4807c, 0x4808c, 1926 0x48094, 0x480b0, 1927 0x480c0, 0x48144, 1928 0x48180, 0x4818c, 1929 0x48200, 0x48254, 1930 0x48260, 0x48264, 1931 0x48270, 0x48288, 1932 0x48290, 0x48298, 1933 0x482ac, 0x482c8, 1934 0x482d0, 0x482e0, 1935 0x482f0, 0x482f0, 1936 0x48300, 0x4833c, 1937 0x483f8, 0x483fc, 1938 0x49304, 0x493c4, 1939 0x49400, 0x4940c, 1940 0x49414, 0x4941c, 1941 0x49480, 0x494d0, 1942 0x4c000, 0x4c054, 1943 0x4c05c, 0x4c078, 1944 0x4c0c0, 0x4c174, 1945 0x4c180, 0x4c1ac, 1946 0x4c1b4, 0x4c1b8, 1947 0x4c1c0, 0x4c254, 1948 0x4c25c, 0x4c278, 1949 0x4c2c0, 0x4c374, 1950 0x4c380, 0x4c3ac, 1951 0x4c3b4, 0x4c3b8, 1952 0x4c3c0, 0x4c454, 1953 0x4c45c, 0x4c478, 1954 0x4c4c0, 0x4c574, 1955 0x4c580, 0x4c5ac, 1956 0x4c5b4, 0x4c5b8, 1957 0x4c5c0, 0x4c654, 1958 0x4c65c, 0x4c678, 1959 0x4c6c0, 0x4c774, 1960 0x4c780, 0x4c7ac, 1961 0x4c7b4, 0x4c7b8, 1962 0x4c7c0, 0x4c854, 1963 0x4c85c, 0x4c878, 1964 0x4c8c0, 0x4c974, 1965 0x4c980, 0x4c9ac, 1966 0x4c9b4, 0x4c9b8, 1967 0x4c9c0, 0x4c9fc, 1968 0x4d000, 0x4d004, 1969 0x4d010, 0x4d030, 1970 0x4d040, 0x4d060, 1971 0x4d068, 0x4d068, 1972 0x4d080, 0x4d084, 1973 0x4d0a0, 0x4d0b0, 1974 0x4d200, 0x4d204, 1975 0x4d210, 0x4d230, 1976 0x4d240, 0x4d260, 1977 0x4d268, 0x4d268, 1978 0x4d280, 0x4d284, 1979 0x4d2a0, 0x4d2b0, 1980 0x4e0c0, 0x4e0e4, 1981 0x4f000, 0x4f03c, 1982 0x4f044, 0x4f08c, 1983 0x4f200, 0x4f250, 1984 0x4f400, 0x4f408, 1985 0x4f414, 0x4f420, 1986 0x4f600, 0x4f618, 1987 0x4f800, 0x4f814, 1988 0x50000, 0x50084, 1989 0x50090, 0x500cc, 1990 0x50400, 0x50400, 1991 0x50800, 0x50884, 1992 0x50890, 0x508cc, 1993 0x50c00, 0x50c00, 1994 0x51000, 0x5101c, 1995 0x51300, 0x51308, 1996 }; 1997 1998 static const unsigned int t5vf_reg_ranges[] = { 1999 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2000 VF_MPS_REG(A_MPS_VF_CTL), 2001 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2002 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2003 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2004 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2005 FW_T4VF_MBDATA_BASE_ADDR, 2006 FW_T4VF_MBDATA_BASE_ADDR + 2007 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2008 }; 2009 2010 static const unsigned int t6_reg_ranges[] = { 2011 0x1008, 0x101c, 2012 0x1024, 0x10a8, 2013 0x10b4, 0x10f8, 2014 0x1100, 0x1114, 2015 0x111c, 0x112c, 2016 0x1138, 0x113c, 2017 0x1144, 0x114c, 2018 0x1180, 0x1184, 2019 0x1190, 0x1194, 2020 0x11a0, 0x11a4, 2021 0x11b0, 0x11b4, 2022 0x11fc, 0x1274, 2023 0x1280, 0x133c, 2024 0x1800, 0x18fc, 2025 0x3000, 0x302c, 2026 0x3060, 0x30b0, 2027 0x30b8, 0x30d8, 2028 0x30e0, 0x30fc, 2029 0x3140, 0x357c, 2030 0x35a8, 0x35cc, 2031 0x35ec, 0x35ec, 2032 0x3600, 0x5624, 2033 0x56cc, 0x56ec, 2034 0x56f4, 0x5720, 2035 0x5728, 0x575c, 2036 0x580c, 0x5814, 2037 0x5890, 0x589c, 2038 0x58a4, 0x58ac, 2039 0x58b8, 0x58bc, 2040 0x5940, 0x595c, 2041 0x5980, 0x598c, 2042 0x59b0, 0x59c8, 2043 0x59d0, 0x59dc, 2044 0x59fc, 0x5a18, 2045 0x5a60, 0x5a6c, 2046 0x5a80, 0x5a8c, 2047 0x5a94, 0x5a9c, 2048 0x5b94, 0x5bfc, 2049 0x5c10, 0x5e48, 2050 0x5e50, 0x5e94, 2051 0x5ea0, 0x5eb0, 2052 0x5ec0, 0x5ec0, 2053 0x5ec8, 0x5ed0, 2054 0x5ee0, 0x5ee0, 2055 0x5ef0, 0x5ef0, 2056 0x5f00, 0x5f00, 2057 0x6000, 0x6020, 2058 0x6028, 0x6040, 2059 0x6058, 0x609c, 2060 0x60a8, 0x619c, 2061 0x7700, 0x7798, 2062 0x77c0, 0x7880, 2063 0x78cc, 0x78fc, 2064 0x7b00, 0x7b58, 2065 0x7b60, 0x7b84, 2066 0x7b8c, 0x7c54, 2067 0x7d00, 0x7d38, 2068 0x7d40, 0x7d84, 2069 0x7d8c, 0x7ddc, 2070 0x7de4, 0x7e04, 2071 0x7e10, 0x7e1c, 2072 0x7e24, 0x7e38, 2073 0x7e40, 0x7e44, 2074 0x7e4c, 0x7e78, 2075 0x7e80, 0x7edc, 2076 0x7ee8, 0x7efc, 2077 0x8dc0, 0x8de4, 2078 0x8df8, 0x8e04, 2079 0x8e10, 0x8e84, 2080 0x8ea0, 0x8f88, 2081 0x8fb8, 0x9058, 2082 0x9060, 0x9060, 2083 0x9068, 0x90f8, 2084 0x9100, 0x9124, 2085 0x9400, 0x9470, 2086 0x9600, 0x9600, 2087 0x9608, 0x9638, 2088 0x9640, 0x9704, 2089 0x9710, 0x971c, 2090 0x9800, 0x9808, 2091 0x9820, 0x983c, 2092 0x9850, 0x9864, 2093 0x9c00, 0x9c6c, 2094 0x9c80, 0x9cec, 2095 0x9d00, 0x9d6c, 2096 0x9d80, 0x9dec, 2097 0x9e00, 0x9e6c, 2098 0x9e80, 0x9eec, 2099 0x9f00, 0x9f6c, 2100 0x9f80, 0xa020, 2101 0xd004, 0xd03c, 2102 0xd100, 0xd118, 2103 0xd200, 0xd214, 2104 0xd220, 0xd234, 2105 0xd240, 0xd254, 2106 0xd260, 0xd274, 2107 0xd280, 0xd294, 2108 0xd2a0, 0xd2b4, 2109 0xd2c0, 0xd2d4, 2110 0xd2e0, 0xd2f4, 2111 0xd300, 0xd31c, 2112 0xdfc0, 0xdfe0, 2113 0xe000, 0xf008, 2114 0xf010, 0xf018, 2115 0xf020, 0xf028, 2116 0x11000, 0x11014, 2117 0x11048, 0x1106c, 2118 0x11074, 0x11088, 2119 0x11098, 0x11120, 2120 0x1112c, 0x1117c, 2121 0x11190, 0x112e0, 2122 0x11300, 0x1130c, 2123 0x12000, 0x1206c, 2124 0x19040, 0x1906c, 2125 0x19078, 0x19080, 2126 0x1908c, 0x190e8, 2127 0x190f0, 0x190f8, 2128 0x19100, 0x19110, 2129 0x19120, 0x19124, 2130 0x19150, 0x19194, 2131 0x1919c, 0x191b0, 2132 0x191d0, 0x191e8, 2133 0x19238, 0x19290, 2134 0x192a4, 0x192b0, 2135 0x192bc, 0x192bc, 2136 0x19348, 0x1934c, 2137 0x193f8, 0x19418, 2138 0x19420, 0x19428, 2139 0x19430, 0x19444, 2140 0x1944c, 0x1946c, 2141 0x19474, 0x19474, 2142 0x19490, 0x194cc, 2143 0x194f0, 0x194f8, 2144 0x19c00, 0x19c48, 2145 0x19c50, 0x19c80, 2146 0x19c94, 0x19c98, 2147 0x19ca0, 0x19cbc, 2148 0x19ce4, 0x19ce4, 2149 0x19cf0, 0x19cf8, 2150 0x19d00, 0x19d28, 2151 0x19d50, 0x19d78, 2152 0x19d94, 0x19d98, 2153 0x19da0, 0x19dc8, 2154 0x19df0, 0x19e10, 2155 0x19e50, 0x19e6c, 2156 0x19ea0, 0x19ebc, 2157 0x19ec4, 0x19ef4, 2158 0x19f04, 0x19f2c, 2159 0x19f34, 0x19f34, 2160 0x19f40, 0x19f50, 2161 0x19f90, 0x19fac, 2162 0x19fc4, 0x19fc8, 2163 0x19fd0, 0x19fe4, 2164 0x1a000, 0x1a004, 2165 0x1a010, 0x1a06c, 2166 0x1a0b0, 0x1a0e4, 2167 0x1a0ec, 0x1a0f8, 2168 0x1a100, 0x1a108, 2169 0x1a114, 0x1a120, 2170 0x1a128, 0x1a130, 2171 0x1a138, 0x1a138, 2172 0x1a190, 0x1a1c4, 2173 0x1a1fc, 0x1a1fc, 2174 0x1e008, 0x1e00c, 2175 0x1e040, 0x1e044, 2176 0x1e04c, 0x1e04c, 2177 0x1e284, 0x1e290, 2178 0x1e2c0, 0x1e2c0, 2179 0x1e2e0, 0x1e2e0, 2180 0x1e300, 0x1e384, 2181 0x1e3c0, 0x1e3c8, 2182 0x1e408, 0x1e40c, 2183 0x1e440, 0x1e444, 2184 0x1e44c, 0x1e44c, 2185 0x1e684, 0x1e690, 2186 0x1e6c0, 0x1e6c0, 2187 0x1e6e0, 0x1e6e0, 2188 0x1e700, 0x1e784, 2189 0x1e7c0, 0x1e7c8, 2190 0x1e808, 0x1e80c, 2191 0x1e840, 0x1e844, 2192 0x1e84c, 0x1e84c, 2193 0x1ea84, 0x1ea90, 2194 0x1eac0, 0x1eac0, 2195 0x1eae0, 0x1eae0, 2196 0x1eb00, 0x1eb84, 2197 0x1ebc0, 0x1ebc8, 2198 0x1ec08, 0x1ec0c, 2199 0x1ec40, 0x1ec44, 2200 0x1ec4c, 0x1ec4c, 2201 0x1ee84, 0x1ee90, 2202 0x1eec0, 0x1eec0, 2203 0x1eee0, 0x1eee0, 2204 0x1ef00, 0x1ef84, 2205 0x1efc0, 0x1efc8, 2206 0x1f008, 0x1f00c, 2207 0x1f040, 0x1f044, 2208 0x1f04c, 0x1f04c, 2209 0x1f284, 0x1f290, 2210 0x1f2c0, 0x1f2c0, 2211 0x1f2e0, 0x1f2e0, 2212 0x1f300, 0x1f384, 2213 0x1f3c0, 0x1f3c8, 2214 0x1f408, 0x1f40c, 2215 0x1f440, 0x1f444, 2216 0x1f44c, 0x1f44c, 2217 0x1f684, 0x1f690, 2218 0x1f6c0, 0x1f6c0, 2219 0x1f6e0, 0x1f6e0, 2220 0x1f700, 0x1f784, 2221 0x1f7c0, 0x1f7c8, 2222 0x1f808, 0x1f80c, 2223 0x1f840, 0x1f844, 2224 0x1f84c, 0x1f84c, 2225 0x1fa84, 0x1fa90, 2226 0x1fac0, 0x1fac0, 2227 0x1fae0, 0x1fae0, 2228 0x1fb00, 0x1fb84, 2229 0x1fbc0, 0x1fbc8, 2230 0x1fc08, 0x1fc0c, 2231 0x1fc40, 0x1fc44, 2232 0x1fc4c, 0x1fc4c, 2233 0x1fe84, 0x1fe90, 2234 0x1fec0, 0x1fec0, 2235 0x1fee0, 0x1fee0, 2236 0x1ff00, 0x1ff84, 2237 0x1ffc0, 0x1ffc8, 2238 0x30000, 0x30030, 2239 0x30038, 0x30038, 2240 0x30040, 0x30040, 2241 0x30048, 0x30048, 2242 0x30050, 0x30050, 2243 0x3005c, 0x30060, 2244 0x30068, 0x30068, 2245 0x30070, 0x30070, 2246 0x30100, 0x30168, 2247 0x30190, 0x301a0, 2248 0x301a8, 0x301b8, 2249 0x301c4, 0x301c8, 2250 0x301d0, 0x301d0, 2251 0x30200, 0x30320, 2252 0x30400, 0x304b4, 2253 0x304c0, 0x3052c, 2254 0x30540, 0x3061c, 2255 0x30800, 0x308a0, 2256 0x308c0, 0x30908, 2257 0x30910, 0x309b8, 2258 0x30a00, 0x30a04, 2259 0x30a0c, 0x30a14, 2260 0x30a1c, 0x30a2c, 2261 0x30a44, 0x30a50, 2262 0x30a74, 0x30a74, 2263 0x30a7c, 0x30afc, 2264 0x30b08, 0x30c24, 2265 0x30d00, 0x30d14, 2266 0x30d1c, 0x30d3c, 2267 0x30d44, 0x30d4c, 2268 0x30d54, 0x30d74, 2269 0x30d7c, 0x30d7c, 2270 0x30de0, 0x30de0, 2271 0x30e00, 0x30ed4, 2272 0x30f00, 0x30fa4, 2273 0x30fc0, 0x30fc4, 2274 0x31000, 0x31004, 2275 0x31080, 0x310fc, 2276 0x31208, 0x31220, 2277 0x3123c, 0x31254, 2278 0x31300, 0x31300, 2279 0x31308, 0x3131c, 2280 0x31338, 0x3133c, 2281 0x31380, 0x31380, 2282 0x31388, 0x313a8, 2283 0x313b4, 0x313b4, 2284 0x31400, 0x31420, 2285 0x31438, 0x3143c, 2286 0x31480, 0x31480, 2287 0x314a8, 0x314a8, 2288 0x314b0, 0x314b4, 2289 0x314c8, 0x314d4, 2290 0x31a40, 0x31a4c, 2291 0x31af0, 0x31b20, 2292 0x31b38, 0x31b3c, 2293 0x31b80, 0x31b80, 2294 0x31ba8, 0x31ba8, 2295 0x31bb0, 0x31bb4, 2296 0x31bc8, 0x31bd4, 2297 0x32140, 0x3218c, 2298 0x321f0, 0x321f4, 2299 0x32200, 0x32200, 2300 0x32218, 0x32218, 2301 0x32400, 0x32400, 2302 0x32408, 0x3241c, 2303 0x32618, 0x32620, 2304 0x32664, 0x32664, 2305 0x326a8, 0x326a8, 2306 0x326ec, 0x326ec, 2307 0x32a00, 0x32abc, 2308 0x32b00, 0x32b38, 2309 0x32b40, 0x32b58, 2310 0x32b60, 0x32b78, 2311 0x32c00, 0x32c00, 2312 0x32c08, 0x32c3c, 2313 0x32e00, 0x32e2c, 2314 0x32f00, 0x32f2c, 2315 0x33000, 0x3302c, 2316 0x33034, 0x33050, 2317 0x33058, 0x33058, 2318 0x33060, 0x3308c, 2319 0x3309c, 0x330ac, 2320 0x330c0, 0x330c0, 2321 0x330c8, 0x330d0, 2322 0x330d8, 0x330e0, 2323 0x330ec, 0x3312c, 2324 0x33134, 0x33150, 2325 0x33158, 0x33158, 2326 0x33160, 0x3318c, 2327 0x3319c, 0x331ac, 2328 0x331c0, 0x331c0, 2329 0x331c8, 0x331d0, 2330 0x331d8, 0x331e0, 2331 0x331ec, 0x33290, 2332 0x33298, 0x332c4, 2333 0x332e4, 0x33390, 2334 0x33398, 0x333c4, 2335 0x333e4, 0x3342c, 2336 0x33434, 0x33450, 2337 0x33458, 0x33458, 2338 0x33460, 0x3348c, 2339 0x3349c, 0x334ac, 2340 0x334c0, 0x334c0, 2341 0x334c8, 0x334d0, 2342 0x334d8, 0x334e0, 2343 0x334ec, 0x3352c, 2344 0x33534, 0x33550, 2345 0x33558, 0x33558, 2346 0x33560, 0x3358c, 2347 0x3359c, 0x335ac, 2348 0x335c0, 0x335c0, 2349 0x335c8, 0x335d0, 2350 0x335d8, 0x335e0, 2351 0x335ec, 0x33690, 2352 0x33698, 0x336c4, 2353 0x336e4, 0x33790, 2354 0x33798, 0x337c4, 2355 0x337e4, 0x337fc, 2356 0x33814, 0x33814, 2357 0x33854, 0x33868, 2358 0x33880, 0x3388c, 2359 0x338c0, 0x338d0, 2360 0x338e8, 0x338ec, 2361 0x33900, 0x3392c, 2362 0x33934, 0x33950, 2363 0x33958, 0x33958, 2364 0x33960, 0x3398c, 2365 0x3399c, 0x339ac, 2366 0x339c0, 0x339c0, 2367 0x339c8, 0x339d0, 2368 0x339d8, 0x339e0, 2369 0x339ec, 0x33a90, 2370 0x33a98, 0x33ac4, 2371 0x33ae4, 0x33b10, 2372 0x33b24, 0x33b28, 2373 0x33b38, 0x33b50, 2374 0x33bf0, 0x33c10, 2375 0x33c24, 0x33c28, 2376 0x33c38, 0x33c50, 2377 0x33cf0, 0x33cfc, 2378 0x34000, 0x34030, 2379 0x34038, 0x34038, 2380 0x34040, 0x34040, 2381 0x34048, 0x34048, 2382 0x34050, 0x34050, 2383 0x3405c, 0x34060, 2384 0x34068, 0x34068, 2385 0x34070, 0x34070, 2386 0x34100, 0x34168, 2387 0x34190, 0x341a0, 2388 0x341a8, 0x341b8, 2389 0x341c4, 0x341c8, 2390 0x341d0, 0x341d0, 2391 0x34200, 0x34320, 2392 0x34400, 0x344b4, 2393 0x344c0, 0x3452c, 2394 0x34540, 0x3461c, 2395 0x34800, 0x348a0, 2396 0x348c0, 0x34908, 2397 0x34910, 0x349b8, 2398 0x34a00, 0x34a04, 2399 0x34a0c, 0x34a14, 2400 0x34a1c, 0x34a2c, 2401 0x34a44, 0x34a50, 2402 0x34a74, 0x34a74, 2403 0x34a7c, 0x34afc, 2404 0x34b08, 0x34c24, 2405 0x34d00, 0x34d14, 2406 0x34d1c, 0x34d3c, 2407 0x34d44, 0x34d4c, 2408 0x34d54, 0x34d74, 2409 0x34d7c, 0x34d7c, 2410 0x34de0, 0x34de0, 2411 0x34e00, 0x34ed4, 2412 0x34f00, 0x34fa4, 2413 0x34fc0, 0x34fc4, 2414 0x35000, 0x35004, 2415 0x35080, 0x350fc, 2416 0x35208, 0x35220, 2417 0x3523c, 0x35254, 2418 0x35300, 0x35300, 2419 0x35308, 0x3531c, 2420 0x35338, 0x3533c, 2421 0x35380, 0x35380, 2422 0x35388, 0x353a8, 2423 0x353b4, 0x353b4, 2424 0x35400, 0x35420, 2425 0x35438, 0x3543c, 2426 0x35480, 0x35480, 2427 0x354a8, 0x354a8, 2428 0x354b0, 0x354b4, 2429 0x354c8, 0x354d4, 2430 0x35a40, 0x35a4c, 2431 0x35af0, 0x35b20, 2432 0x35b38, 0x35b3c, 2433 0x35b80, 0x35b80, 2434 0x35ba8, 0x35ba8, 2435 0x35bb0, 0x35bb4, 2436 0x35bc8, 0x35bd4, 2437 0x36140, 0x3618c, 2438 0x361f0, 0x361f4, 2439 0x36200, 0x36200, 2440 0x36218, 0x36218, 2441 0x36400, 0x36400, 2442 0x36408, 0x3641c, 2443 0x36618, 0x36620, 2444 0x36664, 0x36664, 2445 0x366a8, 0x366a8, 2446 0x366ec, 0x366ec, 2447 0x36a00, 0x36abc, 2448 0x36b00, 0x36b38, 2449 0x36b40, 0x36b58, 2450 0x36b60, 0x36b78, 2451 0x36c00, 0x36c00, 2452 0x36c08, 0x36c3c, 2453 0x36e00, 0x36e2c, 2454 0x36f00, 0x36f2c, 2455 0x37000, 0x3702c, 2456 0x37034, 0x37050, 2457 0x37058, 0x37058, 2458 0x37060, 0x3708c, 2459 0x3709c, 0x370ac, 2460 0x370c0, 0x370c0, 2461 0x370c8, 0x370d0, 2462 0x370d8, 0x370e0, 2463 0x370ec, 0x3712c, 2464 0x37134, 0x37150, 2465 0x37158, 0x37158, 2466 0x37160, 0x3718c, 2467 0x3719c, 0x371ac, 2468 0x371c0, 0x371c0, 2469 0x371c8, 0x371d0, 2470 0x371d8, 0x371e0, 2471 0x371ec, 0x37290, 2472 0x37298, 0x372c4, 2473 0x372e4, 0x37390, 2474 0x37398, 0x373c4, 2475 0x373e4, 0x3742c, 2476 0x37434, 0x37450, 2477 0x37458, 0x37458, 2478 0x37460, 0x3748c, 2479 0x3749c, 0x374ac, 2480 0x374c0, 0x374c0, 2481 0x374c8, 0x374d0, 2482 0x374d8, 0x374e0, 2483 0x374ec, 0x3752c, 2484 0x37534, 0x37550, 2485 0x37558, 0x37558, 2486 0x37560, 0x3758c, 2487 0x3759c, 0x375ac, 2488 0x375c0, 0x375c0, 2489 0x375c8, 0x375d0, 2490 0x375d8, 0x375e0, 2491 0x375ec, 0x37690, 2492 0x37698, 0x376c4, 2493 0x376e4, 0x37790, 2494 0x37798, 0x377c4, 2495 0x377e4, 0x377fc, 2496 0x37814, 0x37814, 2497 0x37854, 0x37868, 2498 0x37880, 0x3788c, 2499 0x378c0, 0x378d0, 2500 0x378e8, 0x378ec, 2501 0x37900, 0x3792c, 2502 0x37934, 0x37950, 2503 0x37958, 0x37958, 2504 0x37960, 0x3798c, 2505 0x3799c, 0x379ac, 2506 0x379c0, 0x379c0, 2507 0x379c8, 0x379d0, 2508 0x379d8, 0x379e0, 2509 0x379ec, 0x37a90, 2510 0x37a98, 0x37ac4, 2511 0x37ae4, 0x37b10, 2512 0x37b24, 0x37b28, 2513 0x37b38, 0x37b50, 2514 0x37bf0, 0x37c10, 2515 0x37c24, 0x37c28, 2516 0x37c38, 0x37c50, 2517 0x37cf0, 0x37cfc, 2518 0x40040, 0x40040, 2519 0x40080, 0x40084, 2520 0x40100, 0x40100, 2521 0x40140, 0x401bc, 2522 0x40200, 0x40214, 2523 0x40228, 0x40228, 2524 0x40240, 0x40258, 2525 0x40280, 0x40280, 2526 0x40304, 0x40304, 2527 0x40330, 0x4033c, 2528 0x41304, 0x413c8, 2529 0x413d0, 0x413dc, 2530 0x413f0, 0x413f0, 2531 0x41400, 0x4140c, 2532 0x41414, 0x4141c, 2533 0x41480, 0x414d0, 2534 0x44000, 0x4407c, 2535 0x440c0, 0x441ac, 2536 0x441b4, 0x4427c, 2537 0x442c0, 0x443ac, 2538 0x443b4, 0x4447c, 2539 0x444c0, 0x445ac, 2540 0x445b4, 0x4467c, 2541 0x446c0, 0x447ac, 2542 0x447b4, 0x4487c, 2543 0x448c0, 0x449ac, 2544 0x449b4, 0x44a7c, 2545 0x44ac0, 0x44bac, 2546 0x44bb4, 0x44c7c, 2547 0x44cc0, 0x44dac, 2548 0x44db4, 0x44e7c, 2549 0x44ec0, 0x44fac, 2550 0x44fb4, 0x4507c, 2551 0x450c0, 0x451ac, 2552 0x451b4, 0x451fc, 2553 0x45800, 0x45804, 2554 0x45810, 0x45830, 2555 0x45840, 0x45860, 2556 0x45868, 0x45868, 2557 0x45880, 0x45884, 2558 0x458a0, 0x458b0, 2559 0x45a00, 0x45a04, 2560 0x45a10, 0x45a30, 2561 0x45a40, 0x45a60, 2562 0x45a68, 0x45a68, 2563 0x45a80, 0x45a84, 2564 0x45aa0, 0x45ab0, 2565 0x460c0, 0x460e4, 2566 0x47000, 0x4703c, 2567 0x47044, 0x4708c, 2568 0x47200, 0x47250, 2569 0x47400, 0x47408, 2570 0x47414, 0x47420, 2571 0x47600, 0x47618, 2572 0x47800, 0x47814, 2573 0x47820, 0x4782c, 2574 0x50000, 0x50084, 2575 0x50090, 0x500cc, 2576 0x50300, 0x50384, 2577 0x50400, 0x50400, 2578 0x50800, 0x50884, 2579 0x50890, 0x508cc, 2580 0x50b00, 0x50b84, 2581 0x50c00, 0x50c00, 2582 0x51000, 0x51020, 2583 0x51028, 0x510b0, 2584 0x51300, 0x51324, 2585 }; 2586 2587 static const unsigned int t6vf_reg_ranges[] = { 2588 VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS), 2589 VF_MPS_REG(A_MPS_VF_CTL), 2590 VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H), 2591 VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION), 2592 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL), 2593 VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS), 2594 FW_T6VF_MBDATA_BASE_ADDR, 2595 FW_T6VF_MBDATA_BASE_ADDR + 2596 ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4), 2597 }; 2598 2599 u32 *buf_end = (u32 *)(buf + buf_size); 2600 const unsigned int *reg_ranges; 2601 int reg_ranges_size, range; 2602 unsigned int chip_version = chip_id(adap); 2603 2604 /* 2605 * Select the right set of register ranges to dump depending on the 2606 * adapter chip type. 2607 */ 2608 switch (chip_version) { 2609 case CHELSIO_T4: 2610 if (adap->flags & IS_VF) { 2611 reg_ranges = t4vf_reg_ranges; 2612 reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges); 2613 } else { 2614 reg_ranges = t4_reg_ranges; 2615 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); 2616 } 2617 break; 2618 2619 case CHELSIO_T5: 2620 if (adap->flags & IS_VF) { 2621 reg_ranges = t5vf_reg_ranges; 2622 reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges); 2623 } else { 2624 reg_ranges = t5_reg_ranges; 2625 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); 2626 } 2627 break; 2628 2629 case CHELSIO_T6: 2630 if (adap->flags & IS_VF) { 2631 reg_ranges = t6vf_reg_ranges; 2632 reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges); 2633 } else { 2634 reg_ranges = t6_reg_ranges; 2635 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); 2636 } 2637 break; 2638 2639 default: 2640 CH_ERR(adap, 2641 "Unsupported chip version %d\n", chip_version); 2642 return; 2643 } 2644 2645 /* 2646 * Clear the register buffer and insert the appropriate register 2647 * values selected by the above register ranges. 2648 */ 2649 memset(buf, 0, buf_size); 2650 for (range = 0; range < reg_ranges_size; range += 2) { 2651 unsigned int reg = reg_ranges[range]; 2652 unsigned int last_reg = reg_ranges[range + 1]; 2653 u32 *bufp = (u32 *)(buf + reg); 2654 2655 /* 2656 * Iterate across the register range filling in the register 2657 * buffer but don't write past the end of the register buffer. 2658 */ 2659 while (reg <= last_reg && bufp < buf_end) { 2660 *bufp++ = t4_read_reg(adap, reg); 2661 reg += sizeof(u32); 2662 } 2663 } 2664} 2665 2666/* 2667 * Partial EEPROM Vital Product Data structure. Includes only the ID and 2668 * VPD-R sections. 2669 */ 2670struct t4_vpd_hdr { 2671 u8 id_tag; 2672 u8 id_len[2]; 2673 u8 id_data[ID_LEN]; 2674 u8 vpdr_tag; 2675 u8 vpdr_len[2]; 2676}; 2677 2678/* 2679 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 2680 */ 2681#define EEPROM_DELAY 10 /* 10us per poll spin */ 2682#define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ 2683 2684#define EEPROM_STAT_ADDR 0x7bfc 2685#define VPD_BASE 0x400 2686#define VPD_BASE_OLD 0 2687#define VPD_LEN 1024 2688#define VPD_INFO_FLD_HDR_SIZE 3 2689#define CHELSIO_VPD_UNIQUE_ID 0x82 2690 2691/* 2692 * Small utility function to wait till any outstanding VPD Access is complete. 2693 * We have a per-adapter state variable "VPD Busy" to indicate when we have a 2694 * VPD Access in flight. This allows us to handle the problem of having a 2695 * previous VPD Access time out and prevent an attempt to inject a new VPD 2696 * Request before any in-flight VPD reguest has completed. 2697 */ 2698static int t4_seeprom_wait(struct adapter *adapter) 2699{ 2700 unsigned int base = adapter->params.pci.vpd_cap_addr; 2701 int max_poll; 2702 2703 /* 2704 * If no VPD Access is in flight, we can just return success right 2705 * away. 2706 */ 2707 if (!adapter->vpd_busy) 2708 return 0; 2709 2710 /* 2711 * Poll the VPD Capability Address/Flag register waiting for it 2712 * to indicate that the operation is complete. 2713 */ 2714 max_poll = EEPROM_MAX_POLL; 2715 do { 2716 u16 val; 2717 2718 udelay(EEPROM_DELAY); 2719 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); 2720 2721 /* 2722 * If the operation is complete, mark the VPD as no longer 2723 * busy and return success. 2724 */ 2725 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { 2726 adapter->vpd_busy = 0; 2727 return 0; 2728 } 2729 } while (--max_poll); 2730 2731 /* 2732 * Failure! Note that we leave the VPD Busy status set in order to 2733 * avoid pushing a new VPD Access request into the VPD Capability till 2734 * the current operation eventually succeeds. It's a bug to issue a 2735 * new request when an existing request is in flight and will result 2736 * in corrupt hardware state. 2737 */ 2738 return -ETIMEDOUT; 2739} 2740 2741/** 2742 * t4_seeprom_read - read a serial EEPROM location 2743 * @adapter: adapter to read 2744 * @addr: EEPROM virtual address 2745 * @data: where to store the read data 2746 * 2747 * Read a 32-bit word from a location in serial EEPROM using the card's PCI 2748 * VPD capability. Note that this function must be called with a virtual 2749 * address. 2750 */ 2751int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) 2752{ 2753 unsigned int base = adapter->params.pci.vpd_cap_addr; 2754 int ret; 2755 2756 /* 2757 * VPD Accesses must alway be 4-byte aligned! 2758 */ 2759 if (addr >= EEPROMVSIZE || (addr & 3)) 2760 return -EINVAL; 2761 2762 /* 2763 * Wait for any previous operation which may still be in flight to 2764 * complete. 2765 */ 2766 ret = t4_seeprom_wait(adapter); 2767 if (ret) { 2768 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2769 return ret; 2770 } 2771 2772 /* 2773 * Issue our new VPD Read request, mark the VPD as being busy and wait 2774 * for our request to complete. If it doesn't complete, note the 2775 * error and return it to our caller. Note that we do not reset the 2776 * VPD Busy status! 2777 */ 2778 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); 2779 adapter->vpd_busy = 1; 2780 adapter->vpd_flag = PCI_VPD_ADDR_F; 2781 ret = t4_seeprom_wait(adapter); 2782 if (ret) { 2783 CH_ERR(adapter, "VPD read of address %#x failed\n", addr); 2784 return ret; 2785 } 2786 2787 /* 2788 * Grab the returned data, swizzle it into our endianess and 2789 * return success. 2790 */ 2791 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); 2792 *data = le32_to_cpu(*data); 2793 return 0; 2794} 2795 2796/** 2797 * t4_seeprom_write - write a serial EEPROM location 2798 * @adapter: adapter to write 2799 * @addr: virtual EEPROM address 2800 * @data: value to write 2801 * 2802 * Write a 32-bit word to a location in serial EEPROM using the card's PCI 2803 * VPD capability. Note that this function must be called with a virtual 2804 * address. 2805 */ 2806int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) 2807{ 2808 unsigned int base = adapter->params.pci.vpd_cap_addr; 2809 int ret; 2810 u32 stats_reg; 2811 int max_poll; 2812 2813 /* 2814 * VPD Accesses must alway be 4-byte aligned! 2815 */ 2816 if (addr >= EEPROMVSIZE || (addr & 3)) 2817 return -EINVAL; 2818 2819 /* 2820 * Wait for any previous operation which may still be in flight to 2821 * complete. 2822 */ 2823 ret = t4_seeprom_wait(adapter); 2824 if (ret) { 2825 CH_ERR(adapter, "VPD still busy from previous operation\n"); 2826 return ret; 2827 } 2828 2829 /* 2830 * Issue our new VPD Read request, mark the VPD as being busy and wait 2831 * for our request to complete. If it doesn't complete, note the 2832 * error and return it to our caller. Note that we do not reset the 2833 * VPD Busy status! 2834 */ 2835 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 2836 cpu_to_le32(data)); 2837 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, 2838 (u16)addr | PCI_VPD_ADDR_F); 2839 adapter->vpd_busy = 1; 2840 adapter->vpd_flag = 0; 2841 ret = t4_seeprom_wait(adapter); 2842 if (ret) { 2843 CH_ERR(adapter, "VPD write of address %#x failed\n", addr); 2844 return ret; 2845 } 2846 2847 /* 2848 * Reset PCI_VPD_DATA register after a transaction and wait for our 2849 * request to complete. If it doesn't complete, return error. 2850 */ 2851 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); 2852 max_poll = EEPROM_MAX_POLL; 2853 do { 2854 udelay(EEPROM_DELAY); 2855 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); 2856 } while ((stats_reg & 0x1) && --max_poll); 2857 if (!max_poll) 2858 return -ETIMEDOUT; 2859 2860 /* Return success! */ 2861 return 0; 2862} 2863 2864/** 2865 * t4_eeprom_ptov - translate a physical EEPROM address to virtual 2866 * @phys_addr: the physical EEPROM address 2867 * @fn: the PCI function number 2868 * @sz: size of function-specific area 2869 * 2870 * Translate a physical EEPROM address to virtual. The first 1K is 2871 * accessed through virtual addresses starting at 31K, the rest is 2872 * accessed through virtual addresses starting at 0. 2873 * 2874 * The mapping is as follows: 2875 * [0..1K) -> [31K..32K) 2876 * [1K..1K+A) -> [ES-A..ES) 2877 * [1K+A..ES) -> [0..ES-A-1K) 2878 * 2879 * where A = @fn * @sz, and ES = EEPROM size. 2880 */ 2881int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 2882{ 2883 fn *= sz; 2884 if (phys_addr < 1024) 2885 return phys_addr + (31 << 10); 2886 if (phys_addr < 1024 + fn) 2887 return EEPROMSIZE - fn + phys_addr - 1024; 2888 if (phys_addr < EEPROMSIZE) 2889 return phys_addr - 1024 - fn; 2890 return -EINVAL; 2891} 2892 2893/** 2894 * t4_seeprom_wp - enable/disable EEPROM write protection 2895 * @adapter: the adapter 2896 * @enable: whether to enable or disable write protection 2897 * 2898 * Enables or disables write protection on the serial EEPROM. 2899 */ 2900int t4_seeprom_wp(struct adapter *adapter, int enable) 2901{ 2902 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 2903} 2904 2905/** 2906 * get_vpd_keyword_val - Locates an information field keyword in the VPD 2907 * @v: Pointer to buffered vpd data structure 2908 * @kw: The keyword to search for 2909 * 2910 * Returns the value of the information field keyword or 2911 * -ENOENT otherwise. 2912 */ 2913static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw) 2914{ 2915 int i; 2916 unsigned int offset , len; 2917 const u8 *buf = (const u8 *)v; 2918 const u8 *vpdr_len = &v->vpdr_len[0]; 2919 offset = sizeof(struct t4_vpd_hdr); 2920 len = (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8); 2921 2922 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 2923 return -ENOENT; 2924 } 2925 2926 for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) { 2927 if(memcmp(buf + i , kw , 2) == 0){ 2928 i += VPD_INFO_FLD_HDR_SIZE; 2929 return i; 2930 } 2931 2932 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2]; 2933 } 2934 2935 return -ENOENT; 2936} 2937 2938 2939/** 2940 * get_vpd_params - read VPD parameters from VPD EEPROM 2941 * @adapter: adapter to read 2942 * @p: where to store the parameters 2943 * @vpd: caller provided temporary space to read the VPD into 2944 * 2945 * Reads card parameters stored in VPD EEPROM. 2946 */ 2947static int get_vpd_params(struct adapter *adapter, struct vpd_params *p, 2948 u8 *vpd) 2949{ 2950 int i, ret, addr; 2951 int ec, sn, pn, na; 2952 u8 csum; 2953 const struct t4_vpd_hdr *v; 2954 2955 /* 2956 * Card information normally starts at VPD_BASE but early cards had 2957 * it at 0. 2958 */ 2959 ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd)); 2960 if (ret) 2961 return (ret); 2962 2963 /* 2964 * The VPD shall have a unique identifier specified by the PCI SIG. 2965 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 2966 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 2967 * is expected to automatically put this entry at the 2968 * beginning of the VPD. 2969 */ 2970 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 2971 2972 for (i = 0; i < VPD_LEN; i += 4) { 2973 ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i)); 2974 if (ret) 2975 return ret; 2976 } 2977 v = (const struct t4_vpd_hdr *)vpd; 2978 2979#define FIND_VPD_KW(var,name) do { \ 2980 var = get_vpd_keyword_val(v , name); \ 2981 if (var < 0) { \ 2982 CH_ERR(adapter, "missing VPD keyword " name "\n"); \ 2983 return -EINVAL; \ 2984 } \ 2985} while (0) 2986 2987 FIND_VPD_KW(i, "RV"); 2988 for (csum = 0; i >= 0; i--) 2989 csum += vpd[i]; 2990 2991 if (csum) { 2992 CH_ERR(adapter, 2993 "corrupted VPD EEPROM, actual csum %u\n", csum); 2994 return -EINVAL; 2995 } 2996 2997 FIND_VPD_KW(ec, "EC"); 2998 FIND_VPD_KW(sn, "SN"); 2999 FIND_VPD_KW(pn, "PN"); 3000 FIND_VPD_KW(na, "NA"); 3001#undef FIND_VPD_KW 3002 3003 memcpy(p->id, v->id_data, ID_LEN); 3004 strstrip(p->id); 3005 memcpy(p->ec, vpd + ec, EC_LEN); 3006 strstrip(p->ec); 3007 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2]; 3008 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 3009 strstrip(p->sn); 3010 i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2]; 3011 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 3012 strstrip((char *)p->pn); 3013 i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2]; 3014 memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); 3015 strstrip((char *)p->na); 3016 3017 return 0; 3018} 3019 3020/* serial flash and firmware constants and flash config file constants */ 3021enum { 3022 SF_ATTEMPTS = 10, /* max retries for SF operations */ 3023 3024 /* flash command opcodes */ 3025 SF_PROG_PAGE = 2, /* program page */ 3026 SF_WR_DISABLE = 4, /* disable writes */ 3027 SF_RD_STATUS = 5, /* read status register */ 3028 SF_WR_ENABLE = 6, /* enable writes */ 3029 SF_RD_DATA_FAST = 0xb, /* read flash */ 3030 SF_RD_ID = 0x9f, /* read ID */ 3031 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 3032}; 3033 3034/** 3035 * sf1_read - read data from the serial flash 3036 * @adapter: the adapter 3037 * @byte_cnt: number of bytes to read 3038 * @cont: whether another operation will be chained 3039 * @lock: whether to lock SF for PL access only 3040 * @valp: where to store the read data 3041 * 3042 * Reads up to 4 bytes of data from the serial flash. The location of 3043 * the read needs to be specified prior to calling this by issuing the 3044 * appropriate commands to the serial flash. 3045 */ 3046static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 3047 int lock, u32 *valp) 3048{ 3049 int ret; 3050 3051 if (!byte_cnt || byte_cnt > 4) 3052 return -EINVAL; 3053 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3054 return -EBUSY; 3055 t4_write_reg(adapter, A_SF_OP, 3056 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); 3057 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3058 if (!ret) 3059 *valp = t4_read_reg(adapter, A_SF_DATA); 3060 return ret; 3061} 3062 3063/** 3064 * sf1_write - write data to the serial flash 3065 * @adapter: the adapter 3066 * @byte_cnt: number of bytes to write 3067 * @cont: whether another operation will be chained 3068 * @lock: whether to lock SF for PL access only 3069 * @val: value to write 3070 * 3071 * Writes up to 4 bytes of data to the serial flash. The location of 3072 * the write needs to be specified prior to calling this by issuing the 3073 * appropriate commands to the serial flash. 3074 */ 3075static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 3076 int lock, u32 val) 3077{ 3078 if (!byte_cnt || byte_cnt > 4) 3079 return -EINVAL; 3080 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) 3081 return -EBUSY; 3082 t4_write_reg(adapter, A_SF_DATA, val); 3083 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | 3084 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); 3085 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); 3086} 3087 3088/** 3089 * flash_wait_op - wait for a flash operation to complete 3090 * @adapter: the adapter 3091 * @attempts: max number of polls of the status register 3092 * @delay: delay between polls in ms 3093 * 3094 * Wait for a flash operation to complete by polling the status register. 3095 */ 3096static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 3097{ 3098 int ret; 3099 u32 status; 3100 3101 while (1) { 3102 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 3103 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 3104 return ret; 3105 if (!(status & 1)) 3106 return 0; 3107 if (--attempts == 0) 3108 return -EAGAIN; 3109 if (delay) 3110 msleep(delay); 3111 } 3112} 3113 3114/** 3115 * t4_read_flash - read words from serial flash 3116 * @adapter: the adapter 3117 * @addr: the start address for the read 3118 * @nwords: how many 32-bit words to read 3119 * @data: where to store the read data 3120 * @byte_oriented: whether to store data as bytes or as words 3121 * 3122 * Read the specified number of 32-bit words from the serial flash. 3123 * If @byte_oriented is set the read data is stored as a byte array 3124 * (i.e., big-endian), otherwise as 32-bit words in the platform's 3125 * natural endianness. 3126 */ 3127int t4_read_flash(struct adapter *adapter, unsigned int addr, 3128 unsigned int nwords, u32 *data, int byte_oriented) 3129{ 3130 int ret; 3131 3132 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 3133 return -EINVAL; 3134 3135 addr = swab32(addr) | SF_RD_DATA_FAST; 3136 3137 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 3138 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 3139 return ret; 3140 3141 for ( ; nwords; nwords--, data++) { 3142 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 3143 if (nwords == 1) 3144 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3145 if (ret) 3146 return ret; 3147 if (byte_oriented) 3148 *data = (__force __u32)(cpu_to_be32(*data)); 3149 } 3150 return 0; 3151} 3152 3153/** 3154 * t4_write_flash - write up to a page of data to the serial flash 3155 * @adapter: the adapter 3156 * @addr: the start address to write 3157 * @n: length of data to write in bytes 3158 * @data: the data to write 3159 * @byte_oriented: whether to store data as bytes or as words 3160 * 3161 * Writes up to a page of data (256 bytes) to the serial flash starting 3162 * at the given address. All the data must be written to the same page. 3163 * If @byte_oriented is set the write data is stored as byte stream 3164 * (i.e. matches what on disk), otherwise in big-endian. 3165 */ 3166int t4_write_flash(struct adapter *adapter, unsigned int addr, 3167 unsigned int n, const u8 *data, int byte_oriented) 3168{ 3169 int ret; 3170 u32 buf[SF_PAGE_SIZE / 4]; 3171 unsigned int i, c, left, val, offset = addr & 0xff; 3172 3173 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 3174 return -EINVAL; 3175 3176 val = swab32(addr) | SF_PROG_PAGE; 3177 3178 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3179 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 3180 goto unlock; 3181 3182 for (left = n; left; left -= c) { 3183 c = min(left, 4U); 3184 for (val = 0, i = 0; i < c; ++i) 3185 val = (val << 8) + *data++; 3186 3187 if (!byte_oriented) 3188 val = cpu_to_be32(val); 3189 3190 ret = sf1_write(adapter, c, c != left, 1, val); 3191 if (ret) 3192 goto unlock; 3193 } 3194 ret = flash_wait_op(adapter, 8, 1); 3195 if (ret) 3196 goto unlock; 3197 3198 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3199 3200 /* Read the page to verify the write succeeded */ 3201 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 3202 byte_oriented); 3203 if (ret) 3204 return ret; 3205 3206 if (memcmp(data - n, (u8 *)buf + offset, n)) { 3207 CH_ERR(adapter, 3208 "failed to correctly write the flash page at %#x\n", 3209 addr); 3210 return -EIO; 3211 } 3212 return 0; 3213 3214unlock: 3215 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3216 return ret; 3217} 3218 3219/** 3220 * t4_get_fw_version - read the firmware version 3221 * @adapter: the adapter 3222 * @vers: where to place the version 3223 * 3224 * Reads the FW version from flash. 3225 */ 3226int t4_get_fw_version(struct adapter *adapter, u32 *vers) 3227{ 3228 return t4_read_flash(adapter, FLASH_FW_START + 3229 offsetof(struct fw_hdr, fw_ver), 1, 3230 vers, 0); 3231} 3232 3233/** 3234 * t4_get_tp_version - read the TP microcode version 3235 * @adapter: the adapter 3236 * @vers: where to place the version 3237 * 3238 * Reads the TP microcode version from flash. 3239 */ 3240int t4_get_tp_version(struct adapter *adapter, u32 *vers) 3241{ 3242 return t4_read_flash(adapter, FLASH_FW_START + 3243 offsetof(struct fw_hdr, tp_microcode_ver), 3244 1, vers, 0); 3245} 3246 3247/** 3248 * t4_get_exprom_version - return the Expansion ROM version (if any) 3249 * @adapter: the adapter 3250 * @vers: where to place the version 3251 * 3252 * Reads the Expansion ROM header from FLASH and returns the version 3253 * number (if present) through the @vers return value pointer. We return 3254 * this in the Firmware Version Format since it's convenient. Return 3255 * 0 on success, -ENOENT if no Expansion ROM is present. 3256 */ 3257int t4_get_exprom_version(struct adapter *adap, u32 *vers) 3258{ 3259 struct exprom_header { 3260 unsigned char hdr_arr[16]; /* must start with 0x55aa */ 3261 unsigned char hdr_ver[4]; /* Expansion ROM version */ 3262 } *hdr; 3263 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), 3264 sizeof(u32))]; 3265 int ret; 3266 3267 ret = t4_read_flash(adap, FLASH_EXP_ROM_START, 3268 ARRAY_SIZE(exprom_header_buf), exprom_header_buf, 3269 0); 3270 if (ret) 3271 return ret; 3272 3273 hdr = (struct exprom_header *)exprom_header_buf; 3274 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) 3275 return -ENOENT; 3276 3277 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) | 3278 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) | 3279 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) | 3280 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3])); 3281 return 0; 3282} 3283 3284/** 3285 * t4_flash_erase_sectors - erase a range of flash sectors 3286 * @adapter: the adapter 3287 * @start: the first sector to erase 3288 * @end: the last sector to erase 3289 * 3290 * Erases the sectors in the given inclusive range. 3291 */ 3292int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 3293{ 3294 int ret = 0; 3295 3296 if (end >= adapter->params.sf_nsec) 3297 return -EINVAL; 3298 3299 while (start <= end) { 3300 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 3301 (ret = sf1_write(adapter, 4, 0, 1, 3302 SF_ERASE_SECTOR | (start << 8))) != 0 || 3303 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 3304 CH_ERR(adapter, 3305 "erase of flash sector %d failed, error %d\n", 3306 start, ret); 3307 break; 3308 } 3309 start++; 3310 } 3311 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 3312 return ret; 3313} 3314 3315/** 3316 * t4_flash_cfg_addr - return the address of the flash configuration file 3317 * @adapter: the adapter 3318 * 3319 * Return the address within the flash where the Firmware Configuration 3320 * File is stored, or an error if the device FLASH is too small to contain 3321 * a Firmware Configuration File. 3322 */ 3323int t4_flash_cfg_addr(struct adapter *adapter) 3324{ 3325 /* 3326 * If the device FLASH isn't large enough to hold a Firmware 3327 * Configuration File, return an error. 3328 */ 3329 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) 3330 return -ENOSPC; 3331 3332 return FLASH_CFG_START; 3333} 3334 3335/* 3336 * Return TRUE if the specified firmware matches the adapter. I.e. T4 3337 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead 3338 * and emit an error message for mismatched firmware to save our caller the 3339 * effort ... 3340 */ 3341static int t4_fw_matches_chip(struct adapter *adap, 3342 const struct fw_hdr *hdr) 3343{ 3344 /* 3345 * The expression below will return FALSE for any unsupported adapter 3346 * which will keep us "honest" in the future ... 3347 */ 3348 if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) || 3349 (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) || 3350 (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6)) 3351 return 1; 3352 3353 CH_ERR(adap, 3354 "FW image (%d) is not suitable for this adapter (%d)\n", 3355 hdr->chip, chip_id(adap)); 3356 return 0; 3357} 3358 3359/** 3360 * t4_load_fw - download firmware 3361 * @adap: the adapter 3362 * @fw_data: the firmware image to write 3363 * @size: image size 3364 * 3365 * Write the supplied firmware image to the card's serial flash. 3366 */ 3367int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 3368{ 3369 u32 csum; 3370 int ret, addr; 3371 unsigned int i; 3372 u8 first_page[SF_PAGE_SIZE]; 3373 const u32 *p = (const u32 *)fw_data; 3374 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 3375 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 3376 unsigned int fw_start_sec; 3377 unsigned int fw_start; 3378 unsigned int fw_size; 3379 3380 if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) { 3381 fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC; 3382 fw_start = FLASH_FWBOOTSTRAP_START; 3383 fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE; 3384 } else { 3385 fw_start_sec = FLASH_FW_START_SEC; 3386 fw_start = FLASH_FW_START; 3387 fw_size = FLASH_FW_MAX_SIZE; 3388 } 3389 3390 if (!size) { 3391 CH_ERR(adap, "FW image has no data\n"); 3392 return -EINVAL; 3393 } 3394 if (size & 511) { 3395 CH_ERR(adap, 3396 "FW image size not multiple of 512 bytes\n"); 3397 return -EINVAL; 3398 } 3399 if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) { 3400 CH_ERR(adap, 3401 "FW image size differs from size in FW header\n"); 3402 return -EINVAL; 3403 } 3404 if (size > fw_size) { 3405 CH_ERR(adap, "FW image too large, max is %u bytes\n", 3406 fw_size); 3407 return -EFBIG; 3408 } 3409 if (!t4_fw_matches_chip(adap, hdr)) 3410 return -EINVAL; 3411 3412 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 3413 csum += be32_to_cpu(p[i]); 3414 3415 if (csum != 0xffffffff) { 3416 CH_ERR(adap, 3417 "corrupted firmware image, checksum %#x\n", csum); 3418 return -EINVAL; 3419 } 3420 3421 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 3422 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 3423 if (ret) 3424 goto out; 3425 3426 /* 3427 * We write the correct version at the end so the driver can see a bad 3428 * version if the FW write fails. Start by writing a copy of the 3429 * first page with a bad version. 3430 */ 3431 memcpy(first_page, fw_data, SF_PAGE_SIZE); 3432 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); 3433 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1); 3434 if (ret) 3435 goto out; 3436 3437 addr = fw_start; 3438 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 3439 addr += SF_PAGE_SIZE; 3440 fw_data += SF_PAGE_SIZE; 3441 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1); 3442 if (ret) 3443 goto out; 3444 } 3445 3446 ret = t4_write_flash(adap, 3447 fw_start + offsetof(struct fw_hdr, fw_ver), 3448 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1); 3449out: 3450 if (ret) 3451 CH_ERR(adap, "firmware download failed, error %d\n", 3452 ret); 3453 return ret; 3454} 3455 3456/** 3457 * t4_fwcache - firmware cache operation 3458 * @adap: the adapter 3459 * @op : the operation (flush or flush and invalidate) 3460 */ 3461int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) 3462{ 3463 struct fw_params_cmd c; 3464 3465 memset(&c, 0, sizeof(c)); 3466 c.op_to_vfn = 3467 cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 3468 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 3469 V_FW_PARAMS_CMD_PFN(adap->pf) | 3470 V_FW_PARAMS_CMD_VFN(0)); 3471 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 3472 c.param[0].mnem = 3473 cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3474 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE)); 3475 c.param[0].val = (__force __be32)op; 3476 3477 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); 3478} 3479 3480void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, 3481 unsigned int *pif_req_wrptr, 3482 unsigned int *pif_rsp_wrptr) 3483{ 3484 int i, j; 3485 u32 cfg, val, req, rsp; 3486 3487 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3488 if (cfg & F_LADBGEN) 3489 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3490 3491 val = t4_read_reg(adap, A_CIM_DEBUGSTS); 3492 req = G_POLADBGWRPTR(val); 3493 rsp = G_PILADBGWRPTR(val); 3494 if (pif_req_wrptr) 3495 *pif_req_wrptr = req; 3496 if (pif_rsp_wrptr) 3497 *pif_rsp_wrptr = rsp; 3498 3499 for (i = 0; i < CIM_PIFLA_SIZE; i++) { 3500 for (j = 0; j < 6; j++) { 3501 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) | 3502 V_PILADBGRDPTR(rsp)); 3503 *pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA); 3504 *pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA); 3505 req++; 3506 rsp++; 3507 } 3508 req = (req + 2) & M_POLADBGRDPTR; 3509 rsp = (rsp + 2) & M_PILADBGRDPTR; 3510 } 3511 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3512} 3513 3514void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) 3515{ 3516 u32 cfg; 3517 int i, j, idx; 3518 3519 cfg = t4_read_reg(adap, A_CIM_DEBUGCFG); 3520 if (cfg & F_LADBGEN) 3521 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN); 3522 3523 for (i = 0; i < CIM_MALA_SIZE; i++) { 3524 for (j = 0; j < 5; j++) { 3525 idx = 8 * i + j; 3526 t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) | 3527 V_PILADBGRDPTR(idx)); 3528 *ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA); 3529 *ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA); 3530 } 3531 } 3532 t4_write_reg(adap, A_CIM_DEBUGCFG, cfg); 3533} 3534 3535void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) 3536{ 3537 unsigned int i, j; 3538 3539 for (i = 0; i < 8; i++) { 3540 u32 *p = la_buf + i; 3541 3542 t4_write_reg(adap, A_ULP_RX_LA_CTL, i); 3543 j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR); 3544 t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j); 3545 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) 3546 *p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA); 3547 } 3548} 3549 3550#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 3551 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 3552 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 3553 3554/** 3555 * t4_link_l1cfg - apply link configuration to MAC/PHY 3556 * @phy: the PHY to setup 3557 * @mac: the MAC to setup 3558 * @lc: the requested link configuration 3559 * 3560 * Set up a port's MAC and PHY according to a desired link configuration. 3561 * - If the PHY can auto-negotiate first decide what to advertise, then 3562 * enable/disable auto-negotiation as desired, and reset. 3563 * - If the PHY does not auto-negotiate just reset it. 3564 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 3565 * otherwise do it later based on the outcome of auto-negotiation. 3566 */ 3567int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, 3568 struct link_config *lc) 3569{ 3570 struct fw_port_cmd c; 3571 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); 3572 3573 lc->link_ok = 0; 3574 if (lc->requested_fc & PAUSE_RX) 3575 fc |= FW_PORT_CAP_FC_RX; 3576 if (lc->requested_fc & PAUSE_TX) 3577 fc |= FW_PORT_CAP_FC_TX; 3578 3579 memset(&c, 0, sizeof(c)); 3580 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3581 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3582 V_FW_PORT_CMD_PORTID(port)); 3583 c.action_to_len16 = 3584 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3585 FW_LEN16(c)); 3586 3587 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 3588 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | 3589 fc); 3590 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 3591 } else if (lc->autoneg == AUTONEG_DISABLE) { 3592 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi); 3593 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 3594 } else 3595 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi); 3596 3597 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3598} 3599 3600/** 3601 * t4_restart_aneg - restart autonegotiation 3602 * @adap: the adapter 3603 * @mbox: mbox to use for the FW command 3604 * @port: the port id 3605 * 3606 * Restarts autonegotiation for the selected port. 3607 */ 3608int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 3609{ 3610 struct fw_port_cmd c; 3611 3612 memset(&c, 0, sizeof(c)); 3613 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | 3614 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 3615 V_FW_PORT_CMD_PORTID(port)); 3616 c.action_to_len16 = 3617 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 3618 FW_LEN16(c)); 3619 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); 3620 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3621} 3622 3623typedef void (*int_handler_t)(struct adapter *adap); 3624 3625struct intr_info { 3626 unsigned int mask; /* bits to check in interrupt status */ 3627 const char *msg; /* message to print or NULL */ 3628 short stat_idx; /* stat counter to increment or -1 */ 3629 unsigned short fatal; /* whether the condition reported is fatal */ 3630 int_handler_t int_handler; /* platform-specific int handler */ 3631}; 3632 3633/** 3634 * t4_handle_intr_status - table driven interrupt handler 3635 * @adapter: the adapter that generated the interrupt 3636 * @reg: the interrupt status register to process 3637 * @acts: table of interrupt actions 3638 * 3639 * A table driven interrupt handler that applies a set of masks to an 3640 * interrupt status word and performs the corresponding actions if the 3641 * interrupts described by the mask have occurred. The actions include 3642 * optionally emitting a warning or alert message. The table is terminated 3643 * by an entry specifying mask 0. Returns the number of fatal interrupt 3644 * conditions. 3645 */ 3646static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 3647 const struct intr_info *acts) 3648{ 3649 int fatal = 0; 3650 unsigned int mask = 0; 3651 unsigned int status = t4_read_reg(adapter, reg); 3652 3653 for ( ; acts->mask; ++acts) { 3654 if (!(status & acts->mask)) 3655 continue; 3656 if (acts->fatal) { 3657 fatal++; 3658 CH_ALERT(adapter, "%s (0x%x)\n", acts->msg, 3659 status & acts->mask); 3660 } else if (acts->msg) 3661 CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg, 3662 status & acts->mask); 3663 if (acts->int_handler) 3664 acts->int_handler(adapter); 3665 mask |= acts->mask; 3666 } 3667 status &= mask; 3668 if (status) /* clear processed interrupts */ 3669 t4_write_reg(adapter, reg, status); 3670 return fatal; 3671} 3672 3673/* 3674 * Interrupt handler for the PCIE module. 3675 */ 3676static void pcie_intr_handler(struct adapter *adapter) 3677{ 3678 static const struct intr_info sysbus_intr_info[] = { 3679 { F_RNPP, "RXNP array parity error", -1, 1 }, 3680 { F_RPCP, "RXPC array parity error", -1, 1 }, 3681 { F_RCIP, "RXCIF array parity error", -1, 1 }, 3682 { F_RCCP, "Rx completions control array parity error", -1, 1 }, 3683 { F_RFTP, "RXFT array parity error", -1, 1 }, 3684 { 0 } 3685 }; 3686 static const struct intr_info pcie_port_intr_info[] = { 3687 { F_TPCP, "TXPC array parity error", -1, 1 }, 3688 { F_TNPP, "TXNP array parity error", -1, 1 }, 3689 { F_TFTP, "TXFT array parity error", -1, 1 }, 3690 { F_TCAP, "TXCA array parity error", -1, 1 }, 3691 { F_TCIP, "TXCIF array parity error", -1, 1 }, 3692 { F_RCAP, "RXCA array parity error", -1, 1 }, 3693 { F_OTDD, "outbound request TLP discarded", -1, 1 }, 3694 { F_RDPE, "Rx data parity error", -1, 1 }, 3695 { F_TDUE, "Tx uncorrectable data error", -1, 1 }, 3696 { 0 } 3697 }; 3698 static const struct intr_info pcie_intr_info[] = { 3699 { F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 3700 { F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 3701 { F_MSIDATAPERR, "MSI data parity error", -1, 1 }, 3702 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3703 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3704 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3705 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3706 { F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 3707 { F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 3708 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3709 { F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 3710 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3711 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3712 { F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 3713 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3714 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3715 { F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 3716 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3717 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3718 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3719 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 3720 { F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 3721 { F_MATAGPERR, "PCI MA tag parity error", -1, 1 }, 3722 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3723 { F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 3724 { F_RXWRPERR, "PCI Rx write parity error", -1, 1 }, 3725 { F_RPLPERR, "PCI replay buffer parity error", -1, 1 }, 3726 { F_PCIESINT, "PCI core secondary fault", -1, 1 }, 3727 { F_PCIEPINT, "PCI core primary fault", -1, 1 }, 3728 { F_UNXSPLCPLERR, "PCI unexpected split completion error", -1, 3729 0 }, 3730 { 0 } 3731 }; 3732 3733 static const struct intr_info t5_pcie_intr_info[] = { 3734 { F_MSTGRPPERR, "Master Response Read Queue parity error", 3735 -1, 1 }, 3736 { F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 3737 { F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 3738 { F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 3739 { F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 3740 { F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 3741 { F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 3742 { F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 3743 -1, 1 }, 3744 { F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 3745 -1, 1 }, 3746 { F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 3747 { F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 3748 { F_CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 3749 { F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 3750 { F_DREQWRPERR, "PCI DMA channel write request parity error", 3751 -1, 1 }, 3752 { F_DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 3753 { F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 3754 { F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 3755 { F_HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 3756 { F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 3757 { F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 3758 { F_FIDPERR, "PCI FID parity error", -1, 1 }, 3759 { F_VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 3760 { F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 3761 { F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 3762 { F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 3763 -1, 1 }, 3764 { F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error", 3765 -1, 1 }, 3766 { F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 3767 { F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 3768 { F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 3769 { F_READRSPERR, "Outbound read error", -1, 3770 0 }, 3771 { 0 } 3772 }; 3773 3774 int fat; 3775 3776 if (is_t4(adapter)) 3777 fat = t4_handle_intr_status(adapter, 3778 A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 3779 sysbus_intr_info) + 3780 t4_handle_intr_status(adapter, 3781 A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 3782 pcie_port_intr_info) + 3783 t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 3784 pcie_intr_info); 3785 else 3786 fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, 3787 t5_pcie_intr_info); 3788 if (fat) 3789 t4_fatal_err(adapter); 3790} 3791 3792/* 3793 * TP interrupt handler. 3794 */ 3795static void tp_intr_handler(struct adapter *adapter) 3796{ 3797 static const struct intr_info tp_intr_info[] = { 3798 { 0x3fffffff, "TP parity error", -1, 1 }, 3799 { F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 3800 { 0 } 3801 }; 3802 3803 if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info)) 3804 t4_fatal_err(adapter); 3805} 3806 3807/* 3808 * SGE interrupt handler. 3809 */ 3810static void sge_intr_handler(struct adapter *adapter) 3811{ 3812 u64 v; 3813 u32 err; 3814 3815 static const struct intr_info sge_intr_info[] = { 3816 { F_ERR_CPL_EXCEED_IQE_SIZE, 3817 "SGE received CPL exceeding IQE size", -1, 1 }, 3818 { F_ERR_INVALID_CIDX_INC, 3819 "SGE GTS CIDX increment too large", -1, 0 }, 3820 { F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 3821 { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 3822 { F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0, 3823 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 3824 { F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 3825 0 }, 3826 { F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 3827 0 }, 3828 { F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 3829 0 }, 3830 { F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 3831 0 }, 3832 { F_ERR_ING_CTXT_PRIO, 3833 "SGE too many priority ingress contexts", -1, 0 }, 3834 { F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 3835 { F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 3836 { 0 } 3837 }; 3838 3839 static const struct intr_info t4t5_sge_intr_info[] = { 3840 { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 3841 { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 3842 { F_ERR_EGR_CTXT_PRIO, 3843 "SGE too many priority egress contexts", -1, 0 }, 3844 { 0 } 3845 }; 3846 3847 /* 3848 * For now, treat below interrupts as fatal so that we disable SGE and 3849 * get better debug */ 3850 static const struct intr_info t6_sge_intr_info[] = { 3851 { F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1, 3852 "SGE PCIe error for a DBP thread", -1, 1 }, 3853 { F_FATAL_WRE_LEN, 3854 "SGE Actual WRE packet is less than advertized length", 3855 -1, 1 }, 3856 { 0 } 3857 }; 3858 3859 v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) | 3860 ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32); 3861 if (v) { 3862 CH_ALERT(adapter, "SGE parity error (%#llx)\n", 3863 (unsigned long long)v); 3864 t4_write_reg(adapter, A_SGE_INT_CAUSE1, v); 3865 t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32); 3866 } 3867 3868 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info); 3869 if (chip_id(adapter) <= CHELSIO_T5) 3870 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 3871 t4t5_sge_intr_info); 3872 else 3873 v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, 3874 t6_sge_intr_info); 3875 3876 err = t4_read_reg(adapter, A_SGE_ERROR_STATS); 3877 if (err & F_ERROR_QID_VALID) { 3878 CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err)); 3879 if (err & F_UNCAPTURED_ERROR) 3880 CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n"); 3881 t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID | 3882 F_UNCAPTURED_ERROR); 3883 } 3884 3885 if (v != 0) 3886 t4_fatal_err(adapter); 3887} 3888 3889#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\ 3890 F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR) 3891#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\ 3892 F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR) 3893 3894/* 3895 * CIM interrupt handler. 3896 */ 3897static void cim_intr_handler(struct adapter *adapter) 3898{ 3899 static const struct intr_info cim_intr_info[] = { 3900 { F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 3901 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, 3902 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, 3903 { F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 3904 { F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 3905 { F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 3906 { F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 3907 { 0 } 3908 }; 3909 static const struct intr_info cim_upintr_info[] = { 3910 { F_RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 3911 { F_ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 3912 { F_ILLWRINT, "CIM illegal write", -1, 1 }, 3913 { F_ILLRDINT, "CIM illegal read", -1, 1 }, 3914 { F_ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 3915 { F_ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 3916 { F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 3917 { F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 3918 { F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 3919 { F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 3920 { F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 3921 { F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 3922 { F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 3923 { F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 3924 { F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 3925 { F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 3926 { F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 3927 { F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 3928 { F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 3929 { F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 3930 { F_SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 3931 { F_SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 3932 { F_BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 3933 { F_BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 3934 { F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 3935 { F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 3936 { F_TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 3937 { F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 3938 { 0 } 3939 }; 3940 int fat; 3941 3942 if (t4_read_reg(adapter, A_PCIE_FW) & F_PCIE_FW_ERR) 3943 t4_report_fw_error(adapter); 3944 3945 fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 3946 cim_intr_info) + 3947 t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE, 3948 cim_upintr_info); 3949 if (fat) 3950 t4_fatal_err(adapter); 3951} 3952 3953/* 3954 * ULP RX interrupt handler. 3955 */ 3956static void ulprx_intr_handler(struct adapter *adapter) 3957{ 3958 static const struct intr_info ulprx_intr_info[] = { 3959 { F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 }, 3960 { F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 }, 3961 { 0x7fffff, "ULPRX parity error", -1, 1 }, 3962 { 0 } 3963 }; 3964 3965 if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info)) 3966 t4_fatal_err(adapter); 3967} 3968 3969/* 3970 * ULP TX interrupt handler. 3971 */ 3972static void ulptx_intr_handler(struct adapter *adapter) 3973{ 3974 static const struct intr_info ulptx_intr_info[] = { 3975 { F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 3976 0 }, 3977 { F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 3978 0 }, 3979 { F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 3980 0 }, 3981 { F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 3982 0 }, 3983 { 0xfffffff, "ULPTX parity error", -1, 1 }, 3984 { 0 } 3985 }; 3986 3987 if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info)) 3988 t4_fatal_err(adapter); 3989} 3990 3991/* 3992 * PM TX interrupt handler. 3993 */ 3994static void pmtx_intr_handler(struct adapter *adapter) 3995{ 3996 static const struct intr_info pmtx_intr_info[] = { 3997 { F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 3998 { F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 3999 { F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 4000 { F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 4001 { 0xffffff0, "PMTX framing error", -1, 1 }, 4002 { F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 4003 { F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 4004 1 }, 4005 { F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 4006 { F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 4007 { 0 } 4008 }; 4009 4010 if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info)) 4011 t4_fatal_err(adapter); 4012} 4013 4014/* 4015 * PM RX interrupt handler. 4016 */ 4017static void pmrx_intr_handler(struct adapter *adapter) 4018{ 4019 static const struct intr_info pmrx_intr_info[] = { 4020 { F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 4021 { 0x3ffff0, "PMRX framing error", -1, 1 }, 4022 { F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 4023 { F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 4024 1 }, 4025 { F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 4026 { F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 4027 { 0 } 4028 }; 4029 4030 if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info)) 4031 t4_fatal_err(adapter); 4032} 4033 4034/* 4035 * CPL switch interrupt handler. 4036 */ 4037static void cplsw_intr_handler(struct adapter *adapter) 4038{ 4039 static const struct intr_info cplsw_intr_info[] = { 4040 { F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 4041 { F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 4042 { F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 4043 { F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 4044 { F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 4045 { F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 4046 { 0 } 4047 }; 4048 4049 if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info)) 4050 t4_fatal_err(adapter); 4051} 4052 4053/* 4054 * LE interrupt handler. 4055 */ 4056static void le_intr_handler(struct adapter *adap) 4057{ 4058 unsigned int chip_ver = chip_id(adap); 4059 static const struct intr_info le_intr_info[] = { 4060 { F_LIPMISS, "LE LIP miss", -1, 0 }, 4061 { F_LIP0, "LE 0 LIP error", -1, 0 }, 4062 { F_PARITYERR, "LE parity error", -1, 1 }, 4063 { F_UNKNOWNCMD, "LE unknown command", -1, 1 }, 4064 { F_REQQPARERR, "LE request queue parity error", -1, 1 }, 4065 { 0 } 4066 }; 4067 4068 static const struct intr_info t6_le_intr_info[] = { 4069 { F_T6_LIPMISS, "LE LIP miss", -1, 0 }, 4070 { F_T6_LIP0, "LE 0 LIP error", -1, 0 }, 4071 { F_TCAMINTPERR, "LE parity error", -1, 1 }, 4072 { F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 }, 4073 { F_SSRAMINTPERR, "LE request queue parity error", -1, 1 }, 4074 { 0 } 4075 }; 4076 4077 if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, 4078 (chip_ver <= CHELSIO_T5) ? 4079 le_intr_info : t6_le_intr_info)) 4080 t4_fatal_err(adap); 4081} 4082 4083/* 4084 * MPS interrupt handler. 4085 */ 4086static void mps_intr_handler(struct adapter *adapter) 4087{ 4088 static const struct intr_info mps_rx_intr_info[] = { 4089 { 0xffffff, "MPS Rx parity error", -1, 1 }, 4090 { 0 } 4091 }; 4092 static const struct intr_info mps_tx_intr_info[] = { 4093 { V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 }, 4094 { F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 4095 { V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error", 4096 -1, 1 }, 4097 { V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error", 4098 -1, 1 }, 4099 { F_BUBBLE, "MPS Tx underflow", -1, 1 }, 4100 { F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 4101 { F_FRMERR, "MPS Tx framing error", -1, 1 }, 4102 { 0 } 4103 }; 4104 static const struct intr_info mps_trc_intr_info[] = { 4105 { V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 }, 4106 { V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1, 4107 1 }, 4108 { F_MISCPERR, "MPS TRC misc parity error", -1, 1 }, 4109 { 0 } 4110 }; 4111 static const struct intr_info mps_stat_sram_intr_info[] = { 4112 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 4113 { 0 } 4114 }; 4115 static const struct intr_info mps_stat_tx_intr_info[] = { 4116 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 4117 { 0 } 4118 }; 4119 static const struct intr_info mps_stat_rx_intr_info[] = { 4120 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 4121 { 0 } 4122 }; 4123 static const struct intr_info mps_cls_intr_info[] = { 4124 { F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 4125 { F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 4126 { F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 4127 { 0 } 4128 }; 4129 4130 int fat; 4131 4132 fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE, 4133 mps_rx_intr_info) + 4134 t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE, 4135 mps_tx_intr_info) + 4136 t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE, 4137 mps_trc_intr_info) + 4138 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM, 4139 mps_stat_sram_intr_info) + 4140 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 4141 mps_stat_tx_intr_info) + 4142 t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 4143 mps_stat_rx_intr_info) + 4144 t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE, 4145 mps_cls_intr_info); 4146 4147 t4_write_reg(adapter, A_MPS_INT_CAUSE, 0); 4148 t4_read_reg(adapter, A_MPS_INT_CAUSE); /* flush */ 4149 if (fat) 4150 t4_fatal_err(adapter); 4151} 4152 4153#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \ 4154 F_ECC_UE_INT_CAUSE) 4155 4156/* 4157 * EDC/MC interrupt handler. 4158 */ 4159static void mem_intr_handler(struct adapter *adapter, int idx) 4160{ 4161 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; 4162 4163 unsigned int addr, cnt_addr, v; 4164 4165 if (idx <= MEM_EDC1) { 4166 addr = EDC_REG(A_EDC_INT_CAUSE, idx); 4167 cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx); 4168 } else if (idx == MEM_MC) { 4169 if (is_t4(adapter)) { 4170 addr = A_MC_INT_CAUSE; 4171 cnt_addr = A_MC_ECC_STATUS; 4172 } else { 4173 addr = A_MC_P_INT_CAUSE; 4174 cnt_addr = A_MC_P_ECC_STATUS; 4175 } 4176 } else { 4177 addr = MC_REG(A_MC_P_INT_CAUSE, 1); 4178 cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1); 4179 } 4180 4181 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 4182 if (v & F_PERR_INT_CAUSE) 4183 CH_ALERT(adapter, "%s FIFO parity error\n", 4184 name[idx]); 4185 if (v & F_ECC_CE_INT_CAUSE) { 4186 u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr)); 4187 4188 t4_edc_err_read(adapter, idx); 4189 4190 t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT)); 4191 CH_WARN_RATELIMIT(adapter, 4192 "%u %s correctable ECC data error%s\n", 4193 cnt, name[idx], cnt > 1 ? "s" : ""); 4194 } 4195 if (v & F_ECC_UE_INT_CAUSE) 4196 CH_ALERT(adapter, 4197 "%s uncorrectable ECC data error\n", name[idx]); 4198 4199 t4_write_reg(adapter, addr, v); 4200 if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE)) 4201 t4_fatal_err(adapter); 4202} 4203 4204/* 4205 * MA interrupt handler. 4206 */ 4207static void ma_intr_handler(struct adapter *adapter) 4208{ 4209 u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE); 4210 4211 if (status & F_MEM_PERR_INT_CAUSE) { 4212 CH_ALERT(adapter, 4213 "MA parity error, parity status %#x\n", 4214 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1)); 4215 if (is_t5(adapter)) 4216 CH_ALERT(adapter, 4217 "MA parity error, parity status %#x\n", 4218 t4_read_reg(adapter, 4219 A_MA_PARITY_ERROR_STATUS2)); 4220 } 4221 if (status & F_MEM_WRAP_INT_CAUSE) { 4222 v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS); 4223 CH_ALERT(adapter, "MA address wrap-around error by " 4224 "client %u to address %#x\n", 4225 G_MEM_WRAP_CLIENT_NUM(v), 4226 G_MEM_WRAP_ADDRESS(v) << 4); 4227 } 4228 t4_write_reg(adapter, A_MA_INT_CAUSE, status); 4229 t4_fatal_err(adapter); 4230} 4231 4232/* 4233 * SMB interrupt handler. 4234 */ 4235static void smb_intr_handler(struct adapter *adap) 4236{ 4237 static const struct intr_info smb_intr_info[] = { 4238 { F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 4239 { F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 4240 { F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 4241 { 0 } 4242 }; 4243 4244 if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info)) 4245 t4_fatal_err(adap); 4246} 4247 4248/* 4249 * NC-SI interrupt handler. 4250 */ 4251static void ncsi_intr_handler(struct adapter *adap) 4252{ 4253 static const struct intr_info ncsi_intr_info[] = { 4254 { F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 4255 { F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 4256 { F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 4257 { F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 4258 { 0 } 4259 }; 4260 4261 if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info)) 4262 t4_fatal_err(adap); 4263} 4264 4265/* 4266 * XGMAC interrupt handler. 4267 */ 4268static void xgmac_intr_handler(struct adapter *adap, int port) 4269{ 4270 u32 v, int_cause_reg; 4271 4272 if (is_t4(adap)) 4273 int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE); 4274 else 4275 int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE); 4276 4277 v = t4_read_reg(adap, int_cause_reg); 4278 4279 v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR); 4280 if (!v) 4281 return; 4282 4283 if (v & F_TXFIFO_PRTY_ERR) 4284 CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", 4285 port); 4286 if (v & F_RXFIFO_PRTY_ERR) 4287 CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", 4288 port); 4289 t4_write_reg(adap, int_cause_reg, v); 4290 t4_fatal_err(adap); 4291} 4292 4293/* 4294 * PL interrupt handler. 4295 */ 4296static void pl_intr_handler(struct adapter *adap) 4297{ 4298 static const struct intr_info pl_intr_info[] = { 4299 { F_FATALPERR, "Fatal parity error", -1, 1 }, 4300 { F_PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 4301 { 0 } 4302 }; 4303 4304 static const struct intr_info t5_pl_intr_info[] = { 4305 { F_FATALPERR, "Fatal parity error", -1, 1 }, 4306 { 0 } 4307 }; 4308 4309 if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, 4310 is_t4(adap) ? 4311 pl_intr_info : t5_pl_intr_info)) 4312 t4_fatal_err(adap); 4313} 4314 4315#define PF_INTR_MASK (F_PFSW | F_PFCIM) 4316 4317/** 4318 * t4_slow_intr_handler - control path interrupt handler 4319 * @adapter: the adapter 4320 * 4321 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 4322 * The designation 'slow' is because it involves register reads, while 4323 * data interrupts typically don't involve any MMIOs. 4324 */ 4325int t4_slow_intr_handler(struct adapter *adapter) 4326{ 4327 u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE); 4328 4329 if (!(cause & GLBL_INTR_MASK)) 4330 return 0; 4331 if (cause & F_CIM) 4332 cim_intr_handler(adapter); 4333 if (cause & F_MPS) 4334 mps_intr_handler(adapter); 4335 if (cause & F_NCSI) 4336 ncsi_intr_handler(adapter); 4337 if (cause & F_PL) 4338 pl_intr_handler(adapter); 4339 if (cause & F_SMB) 4340 smb_intr_handler(adapter); 4341 if (cause & F_MAC0) 4342 xgmac_intr_handler(adapter, 0); 4343 if (cause & F_MAC1) 4344 xgmac_intr_handler(adapter, 1); 4345 if (cause & F_MAC2) 4346 xgmac_intr_handler(adapter, 2); 4347 if (cause & F_MAC3) 4348 xgmac_intr_handler(adapter, 3); 4349 if (cause & F_PCIE) 4350 pcie_intr_handler(adapter); 4351 if (cause & F_MC0) 4352 mem_intr_handler(adapter, MEM_MC); 4353 if (is_t5(adapter) && (cause & F_MC1)) 4354 mem_intr_handler(adapter, MEM_MC1); 4355 if (cause & F_EDC0) 4356 mem_intr_handler(adapter, MEM_EDC0); 4357 if (cause & F_EDC1) 4358 mem_intr_handler(adapter, MEM_EDC1); 4359 if (cause & F_LE) 4360 le_intr_handler(adapter); 4361 if (cause & F_TP) 4362 tp_intr_handler(adapter); 4363 if (cause & F_MA) 4364 ma_intr_handler(adapter); 4365 if (cause & F_PM_TX) 4366 pmtx_intr_handler(adapter); 4367 if (cause & F_PM_RX) 4368 pmrx_intr_handler(adapter); 4369 if (cause & F_ULP_RX) 4370 ulprx_intr_handler(adapter); 4371 if (cause & F_CPL_SWITCH) 4372 cplsw_intr_handler(adapter); 4373 if (cause & F_SGE) 4374 sge_intr_handler(adapter); 4375 if (cause & F_ULP_TX) 4376 ulptx_intr_handler(adapter); 4377 4378 /* Clear the interrupts just processed for which we are the master. */ 4379 t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK); 4380 (void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 4381 return 1; 4382} 4383 4384/** 4385 * t4_intr_enable - enable interrupts 4386 * @adapter: the adapter whose interrupts should be enabled 4387 * 4388 * Enable PF-specific interrupts for the calling function and the top-level 4389 * interrupt concentrator for global interrupts. Interrupts are already 4390 * enabled at each module, here we just enable the roots of the interrupt 4391 * hierarchies. 4392 * 4393 * Note: this function should be called only when the driver manages 4394 * non PF-specific interrupts from the various HW modules. Only one PCI 4395 * function at a time should be doing this. 4396 */ 4397void t4_intr_enable(struct adapter *adapter) 4398{ 4399 u32 val = 0; 4400 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 4401 u32 pf = (chip_id(adapter) <= CHELSIO_T5 4402 ? G_SOURCEPF(whoami) 4403 : G_T6_SOURCEPF(whoami)); 4404 4405 if (chip_id(adapter) <= CHELSIO_T5) 4406 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; 4407 else 4408 val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN; 4409 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | 4410 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | 4411 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | 4412 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | 4413 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | 4414 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | 4415 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); 4416 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); 4417 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); 4418} 4419 4420/** 4421 * t4_intr_disable - disable interrupts 4422 * @adapter: the adapter whose interrupts should be disabled 4423 * 4424 * Disable interrupts. We only disable the top-level interrupt 4425 * concentrators. The caller must be a PCI function managing global 4426 * interrupts. 4427 */ 4428void t4_intr_disable(struct adapter *adapter) 4429{ 4430 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI); 4431 u32 pf = (chip_id(adapter) <= CHELSIO_T5 4432 ? G_SOURCEPF(whoami) 4433 : G_T6_SOURCEPF(whoami)); 4434 4435 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); 4436 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); 4437} 4438 4439/** 4440 * t4_intr_clear - clear all interrupts 4441 * @adapter: the adapter whose interrupts should be cleared 4442 * 4443 * Clears all interrupts. The caller must be a PCI function managing 4444 * global interrupts. 4445 */ 4446void t4_intr_clear(struct adapter *adapter) 4447{ 4448 static const unsigned int cause_reg[] = { 4449 A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3, 4450 A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE, 4451 A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE, 4452 A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1), 4453 A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE, 4454 MYPF_REG(A_CIM_PF_HOST_INT_CAUSE), 4455 A_TP_INT_CAUSE, 4456 A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE, 4457 A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE, 4458 A_MPS_RX_PERR_INT_CAUSE, 4459 A_CPL_INTR_CAUSE, 4460 MYPF_REG(A_PL_PF_INT_CAUSE), 4461 A_PL_PL_INT_CAUSE, 4462 A_LE_DB_INT_CAUSE, 4463 }; 4464 4465 unsigned int i; 4466 4467 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) 4468 t4_write_reg(adapter, cause_reg[i], 0xffffffff); 4469 4470 t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE : 4471 A_MC_P_INT_CAUSE, 0xffffffff); 4472 4473 if (is_t4(adapter)) { 4474 t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 4475 0xffffffff); 4476 t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 4477 0xffffffff); 4478 } else 4479 t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff); 4480 4481 t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK); 4482 (void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */ 4483} 4484 4485/** 4486 * hash_mac_addr - return the hash value of a MAC address 4487 * @addr: the 48-bit Ethernet MAC address 4488 * 4489 * Hashes a MAC address according to the hash function used by HW inexact 4490 * (hash) address matching. 4491 */ 4492static int hash_mac_addr(const u8 *addr) 4493{ 4494 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 4495 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 4496 a ^= b; 4497 a ^= (a >> 12); 4498 a ^= (a >> 6); 4499 return a & 0x3f; 4500} 4501 4502/** 4503 * t4_config_rss_range - configure a portion of the RSS mapping table 4504 * @adapter: the adapter 4505 * @mbox: mbox to use for the FW command 4506 * @viid: virtual interface whose RSS subtable is to be written 4507 * @start: start entry in the table to write 4508 * @n: how many table entries to write 4509 * @rspq: values for the "response queue" (Ingress Queue) lookup table 4510 * @nrspq: number of values in @rspq 4511 * 4512 * Programs the selected part of the VI's RSS mapping table with the 4513 * provided values. If @nrspq < @n the supplied values are used repeatedly 4514 * until the full table range is populated. 4515 * 4516 * The caller must ensure the values in @rspq are in the range allowed for 4517 * @viid. 4518 */ 4519int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 4520 int start, int n, const u16 *rspq, unsigned int nrspq) 4521{ 4522 int ret; 4523 const u16 *rsp = rspq; 4524 const u16 *rsp_end = rspq + nrspq; 4525 struct fw_rss_ind_tbl_cmd cmd; 4526 4527 memset(&cmd, 0, sizeof(cmd)); 4528 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 4529 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4530 V_FW_RSS_IND_TBL_CMD_VIID(viid)); 4531 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 4532 4533 /* 4534 * Each firmware RSS command can accommodate up to 32 RSS Ingress 4535 * Queue Identifiers. These Ingress Queue IDs are packed three to 4536 * a 32-bit word as 10-bit values with the upper remaining 2 bits 4537 * reserved. 4538 */ 4539 while (n > 0) { 4540 int nq = min(n, 32); 4541 int nq_packed = 0; 4542 __be32 *qp = &cmd.iq0_to_iq2; 4543 4544 /* 4545 * Set up the firmware RSS command header to send the next 4546 * "nq" Ingress Queue IDs to the firmware. 4547 */ 4548 cmd.niqid = cpu_to_be16(nq); 4549 cmd.startidx = cpu_to_be16(start); 4550 4551 /* 4552 * "nq" more done for the start of the next loop. 4553 */ 4554 start += nq; 4555 n -= nq; 4556 4557 /* 4558 * While there are still Ingress Queue IDs to stuff into the 4559 * current firmware RSS command, retrieve them from the 4560 * Ingress Queue ID array and insert them into the command. 4561 */ 4562 while (nq > 0) { 4563 /* 4564 * Grab up to the next 3 Ingress Queue IDs (wrapping 4565 * around the Ingress Queue ID array if necessary) and 4566 * insert them into the firmware RSS command at the 4567 * current 3-tuple position within the commad. 4568 */ 4569 u16 qbuf[3]; 4570 u16 *qbp = qbuf; 4571 int nqbuf = min(3, nq); 4572 4573 nq -= nqbuf; 4574 qbuf[0] = qbuf[1] = qbuf[2] = 0; 4575 while (nqbuf && nq_packed < 32) { 4576 nqbuf--; 4577 nq_packed++; 4578 *qbp++ = *rsp++; 4579 if (rsp >= rsp_end) 4580 rsp = rspq; 4581 } 4582 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | 4583 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | 4584 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); 4585 } 4586 4587 /* 4588 * Send this portion of the RRS table update to the firmware; 4589 * bail out on any errors. 4590 */ 4591 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 4592 if (ret) 4593 return ret; 4594 } 4595 return 0; 4596} 4597 4598/** 4599 * t4_config_glbl_rss - configure the global RSS mode 4600 * @adapter: the adapter 4601 * @mbox: mbox to use for the FW command 4602 * @mode: global RSS mode 4603 * @flags: mode-specific flags 4604 * 4605 * Sets the global RSS mode. 4606 */ 4607int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 4608 unsigned int flags) 4609{ 4610 struct fw_rss_glb_config_cmd c; 4611 4612 memset(&c, 0, sizeof(c)); 4613 c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 4614 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 4615 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4616 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 4617 c.u.manual.mode_pkd = 4618 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 4619 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 4620 c.u.basicvirtual.mode_pkd = 4621 cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 4622 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags); 4623 } else 4624 return -EINVAL; 4625 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 4626} 4627 4628/** 4629 * t4_config_vi_rss - configure per VI RSS settings 4630 * @adapter: the adapter 4631 * @mbox: mbox to use for the FW command 4632 * @viid: the VI id 4633 * @flags: RSS flags 4634 * @defq: id of the default RSS queue for the VI. 4635 * 4636 * Configures VI-specific RSS properties. 4637 */ 4638int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, 4639 unsigned int flags, unsigned int defq) 4640{ 4641 struct fw_rss_vi_config_cmd c; 4642 4643 memset(&c, 0, sizeof(c)); 4644 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 4645 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 4646 V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); 4647 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 4648 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | 4649 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); 4650 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 4651} 4652 4653/* Read an RSS table row */ 4654static int rd_rss_row(struct adapter *adap, int row, u32 *val) 4655{ 4656 t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row); 4657 return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1, 4658 5, 0, val); 4659} 4660 4661/** 4662 * t4_read_rss - read the contents of the RSS mapping table 4663 * @adapter: the adapter 4664 * @map: holds the contents of the RSS mapping table 4665 * 4666 * Reads the contents of the RSS hash->queue mapping table. 4667 */ 4668int t4_read_rss(struct adapter *adapter, u16 *map) 4669{ 4670 u32 val; 4671 int i, ret; 4672 4673 for (i = 0; i < RSS_NENTRIES / 2; ++i) { 4674 ret = rd_rss_row(adapter, i, &val); 4675 if (ret) 4676 return ret; 4677 *map++ = G_LKPTBLQUEUE0(val); 4678 *map++ = G_LKPTBLQUEUE1(val); 4679 } 4680 return 0; 4681} 4682 4683/** 4684 * t4_fw_tp_pio_rw - Access TP PIO through LDST 4685 * @adap: the adapter 4686 * @vals: where the indirect register values are stored/written 4687 * @nregs: how many indirect registers to read/write 4688 * @start_idx: index of first indirect register to read/write 4689 * @rw: Read (1) or Write (0) 4690 * 4691 * Access TP PIO registers through LDST 4692 */ 4693void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, 4694 unsigned int start_index, unsigned int rw) 4695{ 4696 int ret, i; 4697 int cmd = FW_LDST_ADDRSPC_TP_PIO; 4698 struct fw_ldst_cmd c; 4699 4700 for (i = 0 ; i < nregs; i++) { 4701 memset(&c, 0, sizeof(c)); 4702 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 4703 F_FW_CMD_REQUEST | 4704 (rw ? F_FW_CMD_READ : 4705 F_FW_CMD_WRITE) | 4706 V_FW_LDST_CMD_ADDRSPACE(cmd)); 4707 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 4708 4709 c.u.addrval.addr = cpu_to_be32(start_index + i); 4710 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); 4711 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); 4712 if (ret == 0) { 4713 if (rw) 4714 vals[i] = be32_to_cpu(c.u.addrval.val); 4715 } 4716 } 4717} 4718 4719/** 4720 * t4_read_rss_key - read the global RSS key 4721 * @adap: the adapter 4722 * @key: 10-entry array holding the 320-bit RSS key 4723 * 4724 * Reads the global 320-bit RSS key. 4725 */ 4726void t4_read_rss_key(struct adapter *adap, u32 *key) 4727{ 4728 if (t4_use_ldst(adap)) 4729 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1); 4730 else 4731 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 4732 A_TP_RSS_SECRET_KEY0); 4733} 4734 4735/** 4736 * t4_write_rss_key - program one of the RSS keys 4737 * @adap: the adapter 4738 * @key: 10-entry array holding the 320-bit RSS key 4739 * @idx: which RSS key to write 4740 * 4741 * Writes one of the RSS keys with the given 320-bit value. If @idx is 4742 * 0..15 the corresponding entry in the RSS key table is written, 4743 * otherwise the global RSS key is written. 4744 */ 4745void t4_write_rss_key(struct adapter *adap, u32 *key, int idx) 4746{ 4747 u8 rss_key_addr_cnt = 16; 4748 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT); 4749 4750 /* 4751 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble), 4752 * allows access to key addresses 16-63 by using KeyWrAddrX 4753 * as index[5:4](upper 2) into key table 4754 */ 4755 if ((chip_id(adap) > CHELSIO_T5) && 4756 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3)) 4757 rss_key_addr_cnt = 32; 4758 4759 if (t4_use_ldst(adap)) 4760 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0); 4761 else 4762 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10, 4763 A_TP_RSS_SECRET_KEY0); 4764 4765 if (idx >= 0 && idx < rss_key_addr_cnt) { 4766 if (rss_key_addr_cnt > 16) 4767 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 4768 V_KEYWRADDRX(idx >> 4) | 4769 V_T6_VFWRADDR(idx) | F_KEYWREN); 4770 else 4771 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT, 4772 V_KEYWRADDR(idx) | F_KEYWREN); 4773 } 4774} 4775 4776/** 4777 * t4_read_rss_pf_config - read PF RSS Configuration Table 4778 * @adapter: the adapter 4779 * @index: the entry in the PF RSS table to read 4780 * @valp: where to store the returned value 4781 * 4782 * Reads the PF RSS Configuration Table at the specified index and returns 4783 * the value found there. 4784 */ 4785void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, 4786 u32 *valp) 4787{ 4788 if (t4_use_ldst(adapter)) 4789 t4_fw_tp_pio_rw(adapter, valp, 1, 4790 A_TP_RSS_PF0_CONFIG + index, 1); 4791 else 4792 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4793 valp, 1, A_TP_RSS_PF0_CONFIG + index); 4794} 4795 4796/** 4797 * t4_write_rss_pf_config - write PF RSS Configuration Table 4798 * @adapter: the adapter 4799 * @index: the entry in the VF RSS table to read 4800 * @val: the value to store 4801 * 4802 * Writes the PF RSS Configuration Table at the specified index with the 4803 * specified value. 4804 */ 4805void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, 4806 u32 val) 4807{ 4808 if (t4_use_ldst(adapter)) 4809 t4_fw_tp_pio_rw(adapter, &val, 1, 4810 A_TP_RSS_PF0_CONFIG + index, 0); 4811 else 4812 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4813 &val, 1, A_TP_RSS_PF0_CONFIG + index); 4814} 4815 4816/** 4817 * t4_read_rss_vf_config - read VF RSS Configuration Table 4818 * @adapter: the adapter 4819 * @index: the entry in the VF RSS table to read 4820 * @vfl: where to store the returned VFL 4821 * @vfh: where to store the returned VFH 4822 * 4823 * Reads the VF RSS Configuration Table at the specified index and returns 4824 * the (VFL, VFH) values found there. 4825 */ 4826void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, 4827 u32 *vfl, u32 *vfh) 4828{ 4829 u32 vrt, mask, data; 4830 4831 if (chip_id(adapter) <= CHELSIO_T5) { 4832 mask = V_VFWRADDR(M_VFWRADDR); 4833 data = V_VFWRADDR(index); 4834 } else { 4835 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 4836 data = V_T6_VFWRADDR(index); 4837 } 4838 /* 4839 * Request that the index'th VF Table values be read into VFL/VFH. 4840 */ 4841 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 4842 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 4843 vrt |= data | F_VFRDEN; 4844 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 4845 4846 /* 4847 * Grab the VFL/VFH values ... 4848 */ 4849 if (t4_use_ldst(adapter)) { 4850 t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1); 4851 t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1); 4852 } else { 4853 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4854 vfl, 1, A_TP_RSS_VFL_CONFIG); 4855 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4856 vfh, 1, A_TP_RSS_VFH_CONFIG); 4857 } 4858} 4859 4860/** 4861 * t4_write_rss_vf_config - write VF RSS Configuration Table 4862 * 4863 * @adapter: the adapter 4864 * @index: the entry in the VF RSS table to write 4865 * @vfl: the VFL to store 4866 * @vfh: the VFH to store 4867 * 4868 * Writes the VF RSS Configuration Table at the specified index with the 4869 * specified (VFL, VFH) values. 4870 */ 4871void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index, 4872 u32 vfl, u32 vfh) 4873{ 4874 u32 vrt, mask, data; 4875 4876 if (chip_id(adapter) <= CHELSIO_T5) { 4877 mask = V_VFWRADDR(M_VFWRADDR); 4878 data = V_VFWRADDR(index); 4879 } else { 4880 mask = V_T6_VFWRADDR(M_T6_VFWRADDR); 4881 data = V_T6_VFWRADDR(index); 4882 } 4883 4884 /* 4885 * Load up VFL/VFH with the values to be written ... 4886 */ 4887 if (t4_use_ldst(adapter)) { 4888 t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0); 4889 t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0); 4890 } else { 4891 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4892 &vfl, 1, A_TP_RSS_VFL_CONFIG); 4893 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4894 &vfh, 1, A_TP_RSS_VFH_CONFIG); 4895 } 4896 4897 /* 4898 * Write the VFL/VFH into the VF Table at index'th location. 4899 */ 4900 vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT); 4901 vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask); 4902 vrt |= data | F_VFRDEN; 4903 t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt); 4904} 4905 4906/** 4907 * t4_read_rss_pf_map - read PF RSS Map 4908 * @adapter: the adapter 4909 * 4910 * Reads the PF RSS Map register and returns its value. 4911 */ 4912u32 t4_read_rss_pf_map(struct adapter *adapter) 4913{ 4914 u32 pfmap; 4915 4916 if (t4_use_ldst(adapter)) 4917 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1); 4918 else 4919 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4920 &pfmap, 1, A_TP_RSS_PF_MAP); 4921 return pfmap; 4922} 4923 4924/** 4925 * t4_write_rss_pf_map - write PF RSS Map 4926 * @adapter: the adapter 4927 * @pfmap: PF RSS Map value 4928 * 4929 * Writes the specified value to the PF RSS Map register. 4930 */ 4931void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap) 4932{ 4933 if (t4_use_ldst(adapter)) 4934 t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0); 4935 else 4936 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4937 &pfmap, 1, A_TP_RSS_PF_MAP); 4938} 4939 4940/** 4941 * t4_read_rss_pf_mask - read PF RSS Mask 4942 * @adapter: the adapter 4943 * 4944 * Reads the PF RSS Mask register and returns its value. 4945 */ 4946u32 t4_read_rss_pf_mask(struct adapter *adapter) 4947{ 4948 u32 pfmask; 4949 4950 if (t4_use_ldst(adapter)) 4951 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1); 4952 else 4953 t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4954 &pfmask, 1, A_TP_RSS_PF_MSK); 4955 return pfmask; 4956} 4957 4958/** 4959 * t4_write_rss_pf_mask - write PF RSS Mask 4960 * @adapter: the adapter 4961 * @pfmask: PF RSS Mask value 4962 * 4963 * Writes the specified value to the PF RSS Mask register. 4964 */ 4965void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask) 4966{ 4967 if (t4_use_ldst(adapter)) 4968 t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0); 4969 else 4970 t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA, 4971 &pfmask, 1, A_TP_RSS_PF_MSK); 4972} 4973 4974/** 4975 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 4976 * @adap: the adapter 4977 * @v4: holds the TCP/IP counter values 4978 * @v6: holds the TCP/IPv6 counter values 4979 * 4980 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 4981 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 4982 */ 4983void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 4984 struct tp_tcp_stats *v6) 4985{ 4986 u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1]; 4987 4988#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST) 4989#define STAT(x) val[STAT_IDX(x)] 4990#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 4991 4992 if (v4) { 4993 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4994 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST); 4995 v4->tcp_out_rsts = STAT(OUT_RST); 4996 v4->tcp_in_segs = STAT64(IN_SEG); 4997 v4->tcp_out_segs = STAT64(OUT_SEG); 4998 v4->tcp_retrans_segs = STAT64(RXT_SEG); 4999 } 5000 if (v6) { 5001 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 5002 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST); 5003 v6->tcp_out_rsts = STAT(OUT_RST); 5004 v6->tcp_in_segs = STAT64(IN_SEG); 5005 v6->tcp_out_segs = STAT64(OUT_SEG); 5006 v6->tcp_retrans_segs = STAT64(RXT_SEG); 5007 } 5008#undef STAT64 5009#undef STAT 5010#undef STAT_IDX 5011} 5012 5013/** 5014 * t4_tp_get_err_stats - read TP's error MIB counters 5015 * @adap: the adapter 5016 * @st: holds the counter values 5017 * 5018 * Returns the values of TP's error counters. 5019 */ 5020void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) 5021{ 5022 int nchan = adap->chip_params->nchan; 5023 5024 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5025 st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0); 5026 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5027 st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0); 5028 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5029 st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0); 5030 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5031 st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0); 5032 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5033 st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0); 5034 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5035 st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0); 5036 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5037 st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0); 5038 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5039 st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0); 5040 5041 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, 5042 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP); 5043} 5044 5045/** 5046 * t4_tp_get_proxy_stats - read TP's proxy MIB counters 5047 * @adap: the adapter 5048 * @st: holds the counter values 5049 * 5050 * Returns the values of TP's proxy counters. 5051 */ 5052void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st) 5053{ 5054 int nchan = adap->chip_params->nchan; 5055 5056 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy, 5057 nchan, A_TP_MIB_TNL_LPBK_0); 5058} 5059 5060/** 5061 * t4_tp_get_cpl_stats - read TP's CPL MIB counters 5062 * @adap: the adapter 5063 * @st: holds the counter values 5064 * 5065 * Returns the values of TP's CPL counters. 5066 */ 5067void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) 5068{ 5069 int nchan = adap->chip_params->nchan; 5070 5071 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req, 5072 nchan, A_TP_MIB_CPL_IN_REQ_0); 5073 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp, 5074 nchan, A_TP_MIB_CPL_OUT_RSP_0); 5075} 5076 5077/** 5078 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters 5079 * @adap: the adapter 5080 * @st: holds the counter values 5081 * 5082 * Returns the values of TP's RDMA counters. 5083 */ 5084void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) 5085{ 5086 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt, 5087 2, A_TP_MIB_RQE_DFR_PKT); 5088} 5089 5090/** 5091 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port 5092 * @adap: the adapter 5093 * @idx: the port index 5094 * @st: holds the counter values 5095 * 5096 * Returns the values of TP's FCoE counters for the selected port. 5097 */ 5098void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, 5099 struct tp_fcoe_stats *st) 5100{ 5101 u32 val[2]; 5102 5103 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp, 5104 1, A_TP_MIB_FCOE_DDP_0 + idx); 5105 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop, 5106 1, A_TP_MIB_FCOE_DROP_0 + idx); 5107 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 5108 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx); 5109 st->octets_ddp = ((u64)val[0] << 32) | val[1]; 5110} 5111 5112/** 5113 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters 5114 * @adap: the adapter 5115 * @st: holds the counter values 5116 * 5117 * Returns the values of TP's counters for non-TCP directly-placed packets. 5118 */ 5119void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) 5120{ 5121 u32 val[4]; 5122 5123 t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val, 4, 5124 A_TP_MIB_USM_PKTS); 5125 st->frames = val[0]; 5126 st->drops = val[1]; 5127 st->octets = ((u64)val[2] << 32) | val[3]; 5128} 5129 5130/** 5131 * t4_read_mtu_tbl - returns the values in the HW path MTU table 5132 * @adap: the adapter 5133 * @mtus: where to store the MTU values 5134 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 5135 * 5136 * Reads the HW path MTU table. 5137 */ 5138void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 5139{ 5140 u32 v; 5141 int i; 5142 5143 for (i = 0; i < NMTUS; ++i) { 5144 t4_write_reg(adap, A_TP_MTU_TABLE, 5145 V_MTUINDEX(0xff) | V_MTUVALUE(i)); 5146 v = t4_read_reg(adap, A_TP_MTU_TABLE); 5147 mtus[i] = G_MTUVALUE(v); 5148 if (mtu_log) 5149 mtu_log[i] = G_MTUWIDTH(v); 5150 } 5151} 5152 5153/** 5154 * t4_read_cong_tbl - reads the congestion control table 5155 * @adap: the adapter 5156 * @incr: where to store the alpha values 5157 * 5158 * Reads the additive increments programmed into the HW congestion 5159 * control table. 5160 */ 5161void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) 5162{ 5163 unsigned int mtu, w; 5164 5165 for (mtu = 0; mtu < NMTUS; ++mtu) 5166 for (w = 0; w < NCCTRL_WIN; ++w) { 5167 t4_write_reg(adap, A_TP_CCTRL_TABLE, 5168 V_ROWINDEX(0xffff) | (mtu << 5) | w); 5169 incr[mtu][w] = (u16)t4_read_reg(adap, 5170 A_TP_CCTRL_TABLE) & 0x1fff; 5171 } 5172} 5173 5174/** 5175 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 5176 * @adap: the adapter 5177 * @addr: the indirect TP register address 5178 * @mask: specifies the field within the register to modify 5179 * @val: new value for the field 5180 * 5181 * Sets a field of an indirect TP register to the given value. 5182 */ 5183void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 5184 unsigned int mask, unsigned int val) 5185{ 5186 t4_write_reg(adap, A_TP_PIO_ADDR, addr); 5187 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; 5188 t4_write_reg(adap, A_TP_PIO_DATA, val); 5189} 5190 5191/** 5192 * init_cong_ctrl - initialize congestion control parameters 5193 * @a: the alpha values for congestion control 5194 * @b: the beta values for congestion control 5195 * 5196 * Initialize the congestion control parameters. 5197 */ 5198static void init_cong_ctrl(unsigned short *a, unsigned short *b) 5199{ 5200 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 5201 a[9] = 2; 5202 a[10] = 3; 5203 a[11] = 4; 5204 a[12] = 5; 5205 a[13] = 6; 5206 a[14] = 7; 5207 a[15] = 8; 5208 a[16] = 9; 5209 a[17] = 10; 5210 a[18] = 14; 5211 a[19] = 17; 5212 a[20] = 21; 5213 a[21] = 25; 5214 a[22] = 30; 5215 a[23] = 35; 5216 a[24] = 45; 5217 a[25] = 60; 5218 a[26] = 80; 5219 a[27] = 100; 5220 a[28] = 200; 5221 a[29] = 300; 5222 a[30] = 400; 5223 a[31] = 500; 5224 5225 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 5226 b[9] = b[10] = 1; 5227 b[11] = b[12] = 2; 5228 b[13] = b[14] = b[15] = b[16] = 3; 5229 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 5230 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 5231 b[28] = b[29] = 6; 5232 b[30] = b[31] = 7; 5233} 5234 5235/* The minimum additive increment value for the congestion control table */ 5236#define CC_MIN_INCR 2U 5237 5238/** 5239 * t4_load_mtus - write the MTU and congestion control HW tables 5240 * @adap: the adapter 5241 * @mtus: the values for the MTU table 5242 * @alpha: the values for the congestion control alpha parameter 5243 * @beta: the values for the congestion control beta parameter 5244 * 5245 * Write the HW MTU table with the supplied MTUs and the high-speed 5246 * congestion control table with the supplied alpha, beta, and MTUs. 5247 * We write the two tables together because the additive increments 5248 * depend on the MTUs. 5249 */ 5250void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 5251 const unsigned short *alpha, const unsigned short *beta) 5252{ 5253 static const unsigned int avg_pkts[NCCTRL_WIN] = { 5254 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 5255 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 5256 28672, 40960, 57344, 81920, 114688, 163840, 229376 5257 }; 5258 5259 unsigned int i, w; 5260 5261 for (i = 0; i < NMTUS; ++i) { 5262 unsigned int mtu = mtus[i]; 5263 unsigned int log2 = fls(mtu); 5264 5265 if (!(mtu & ((1 << log2) >> 2))) /* round */ 5266 log2--; 5267 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | 5268 V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); 5269 5270 for (w = 0; w < NCCTRL_WIN; ++w) { 5271 unsigned int inc; 5272 5273 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 5274 CC_MIN_INCR); 5275 5276 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | 5277 (w << 16) | (beta[w] << 13) | inc); 5278 } 5279 } 5280} 5281 5282/** 5283 * t4_set_pace_tbl - set the pace table 5284 * @adap: the adapter 5285 * @pace_vals: the pace values in microseconds 5286 * @start: index of the first entry in the HW pace table to set 5287 * @n: how many entries to set 5288 * 5289 * Sets (a subset of the) HW pace table. 5290 */ 5291int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals, 5292 unsigned int start, unsigned int n) 5293{ 5294 unsigned int vals[NTX_SCHED], i; 5295 unsigned int tick_ns = dack_ticks_to_usec(adap, 1000); 5296 5297 if (n > NTX_SCHED) 5298 return -ERANGE; 5299 5300 /* convert values from us to dack ticks, rounding to closest value */ 5301 for (i = 0; i < n; i++, pace_vals++) { 5302 vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns; 5303 if (vals[i] > 0x7ff) 5304 return -ERANGE; 5305 if (*pace_vals && vals[i] == 0) 5306 return -ERANGE; 5307 } 5308 for (i = 0; i < n; i++, start++) 5309 t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]); 5310 return 0; 5311} 5312 5313/** 5314 * t4_set_sched_bps - set the bit rate for a HW traffic scheduler 5315 * @adap: the adapter 5316 * @kbps: target rate in Kbps 5317 * @sched: the scheduler index 5318 * 5319 * Configure a Tx HW scheduler for the target rate. 5320 */ 5321int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps) 5322{ 5323 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; 5324 unsigned int clk = adap->params.vpd.cclk * 1000; 5325 unsigned int selected_cpt = 0, selected_bpt = 0; 5326 5327 if (kbps > 0) { 5328 kbps *= 125; /* -> bytes */ 5329 for (cpt = 1; cpt <= 255; cpt++) { 5330 tps = clk / cpt; 5331 bpt = (kbps + tps / 2) / tps; 5332 if (bpt > 0 && bpt <= 255) { 5333 v = bpt * tps; 5334 delta = v >= kbps ? v - kbps : kbps - v; 5335 if (delta < mindelta) { 5336 mindelta = delta; 5337 selected_cpt = cpt; 5338 selected_bpt = bpt; 5339 } 5340 } else if (selected_cpt) 5341 break; 5342 } 5343 if (!selected_cpt) 5344 return -EINVAL; 5345 } 5346 t4_write_reg(adap, A_TP_TM_PIO_ADDR, 5347 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); 5348 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 5349 if (sched & 1) 5350 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); 5351 else 5352 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); 5353 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 5354 return 0; 5355} 5356 5357/** 5358 * t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler 5359 * @adap: the adapter 5360 * @sched: the scheduler index 5361 * @ipg: the interpacket delay in tenths of nanoseconds 5362 * 5363 * Set the interpacket delay for a HW packet rate scheduler. 5364 */ 5365int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg) 5366{ 5367 unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 5368 5369 /* convert ipg to nearest number of core clocks */ 5370 ipg *= core_ticks_per_usec(adap); 5371 ipg = (ipg + 5000) / 10000; 5372 if (ipg > M_TXTIMERSEPQ0) 5373 return -EINVAL; 5374 5375 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 5376 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 5377 if (sched & 1) 5378 v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg); 5379 else 5380 v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg); 5381 t4_write_reg(adap, A_TP_TM_PIO_DATA, v); 5382 t4_read_reg(adap, A_TP_TM_PIO_DATA); 5383 return 0; 5384} 5385 5386/* 5387 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core 5388 * clocks. The formula is 5389 * 5390 * bytes/s = bytes256 * 256 * ClkFreq / 4096 5391 * 5392 * which is equivalent to 5393 * 5394 * bytes/s = 62.5 * bytes256 * ClkFreq_ms 5395 */ 5396static u64 chan_rate(struct adapter *adap, unsigned int bytes256) 5397{ 5398 u64 v = bytes256 * adap->params.vpd.cclk; 5399 5400 return v * 62 + v / 2; 5401} 5402 5403/** 5404 * t4_get_chan_txrate - get the current per channel Tx rates 5405 * @adap: the adapter 5406 * @nic_rate: rates for NIC traffic 5407 * @ofld_rate: rates for offloaded traffic 5408 * 5409 * Return the current Tx rates in bytes/s for NIC and offloaded traffic 5410 * for each channel. 5411 */ 5412void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) 5413{ 5414 u32 v; 5415 5416 v = t4_read_reg(adap, A_TP_TX_TRATE); 5417 nic_rate[0] = chan_rate(adap, G_TNLRATE0(v)); 5418 nic_rate[1] = chan_rate(adap, G_TNLRATE1(v)); 5419 if (adap->chip_params->nchan > 2) { 5420 nic_rate[2] = chan_rate(adap, G_TNLRATE2(v)); 5421 nic_rate[3] = chan_rate(adap, G_TNLRATE3(v)); 5422 } 5423 5424 v = t4_read_reg(adap, A_TP_TX_ORATE); 5425 ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v)); 5426 ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v)); 5427 if (adap->chip_params->nchan > 2) { 5428 ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v)); 5429 ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v)); 5430 } 5431} 5432 5433/** 5434 * t4_set_trace_filter - configure one of the tracing filters 5435 * @adap: the adapter 5436 * @tp: the desired trace filter parameters 5437 * @idx: which filter to configure 5438 * @enable: whether to enable or disable the filter 5439 * 5440 * Configures one of the tracing filters available in HW. If @tp is %NULL 5441 * it indicates that the filter is already written in the register and it 5442 * just needs to be enabled or disabled. 5443 */ 5444int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, 5445 int idx, int enable) 5446{ 5447 int i, ofst = idx * 4; 5448 u32 data_reg, mask_reg, cfg; 5449 u32 multitrc = F_TRCMULTIFILTER; 5450 u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN; 5451 5452 if (idx < 0 || idx >= NTRACE) 5453 return -EINVAL; 5454 5455 if (tp == NULL || !enable) { 5456 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 5457 enable ? en : 0); 5458 return 0; 5459 } 5460 5461 /* 5462 * TODO - After T4 data book is updated, specify the exact 5463 * section below. 5464 * 5465 * See T4 data book - MPS section for a complete description 5466 * of the below if..else handling of A_MPS_TRC_CFG register 5467 * value. 5468 */ 5469 cfg = t4_read_reg(adap, A_MPS_TRC_CFG); 5470 if (cfg & F_TRCMULTIFILTER) { 5471 /* 5472 * If multiple tracers are enabled, then maximum 5473 * capture size is 2.5KB (FIFO size of a single channel) 5474 * minus 2 flits for CPL_TRACE_PKT header. 5475 */ 5476 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) 5477 return -EINVAL; 5478 } else { 5479 /* 5480 * If multiple tracers are disabled, to avoid deadlocks 5481 * maximum packet capture size of 9600 bytes is recommended. 5482 * Also in this mode, only trace0 can be enabled and running. 5483 */ 5484 multitrc = 0; 5485 if (tp->snap_len > 9600 || idx) 5486 return -EINVAL; 5487 } 5488 5489 if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 || 5490 tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET || 5491 tp->min_len > M_TFMINPKTSIZE) 5492 return -EINVAL; 5493 5494 /* stop the tracer we'll be changing */ 5495 t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0); 5496 5497 idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH); 5498 data_reg = A_MPS_TRC_FILTER0_MATCH + idx; 5499 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx; 5500 5501 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 5502 t4_write_reg(adap, data_reg, tp->data[i]); 5503 t4_write_reg(adap, mask_reg, ~tp->mask[i]); 5504 } 5505 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst, 5506 V_TFCAPTUREMAX(tp->snap_len) | 5507 V_TFMINPKTSIZE(tp->min_len)); 5508 t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 5509 V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en | 5510 (is_t4(adap) ? 5511 V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) : 5512 V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert))); 5513 5514 return 0; 5515} 5516 5517/** 5518 * t4_get_trace_filter - query one of the tracing filters 5519 * @adap: the adapter 5520 * @tp: the current trace filter parameters 5521 * @idx: which trace filter to query 5522 * @enabled: non-zero if the filter is enabled 5523 * 5524 * Returns the current settings of one of the HW tracing filters. 5525 */ 5526void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, 5527 int *enabled) 5528{ 5529 u32 ctla, ctlb; 5530 int i, ofst = idx * 4; 5531 u32 data_reg, mask_reg; 5532 5533 ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst); 5534 ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst); 5535 5536 if (is_t4(adap)) { 5537 *enabled = !!(ctla & F_TFEN); 5538 tp->port = G_TFPORT(ctla); 5539 tp->invert = !!(ctla & F_TFINVERTMATCH); 5540 } else { 5541 *enabled = !!(ctla & F_T5_TFEN); 5542 tp->port = G_T5_TFPORT(ctla); 5543 tp->invert = !!(ctla & F_T5_TFINVERTMATCH); 5544 } 5545 tp->snap_len = G_TFCAPTUREMAX(ctlb); 5546 tp->min_len = G_TFMINPKTSIZE(ctlb); 5547 tp->skip_ofst = G_TFOFFSET(ctla); 5548 tp->skip_len = G_TFLENGTH(ctla); 5549 5550 ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx; 5551 data_reg = A_MPS_TRC_FILTER0_MATCH + ofst; 5552 mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst; 5553 5554 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { 5555 tp->mask[i] = ~t4_read_reg(adap, mask_reg); 5556 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; 5557 } 5558} 5559 5560/** 5561 * t4_pmtx_get_stats - returns the HW stats from PMTX 5562 * @adap: the adapter 5563 * @cnt: where to store the count statistics 5564 * @cycles: where to store the cycle statistics 5565 * 5566 * Returns performance statistics from PMTX. 5567 */ 5568void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 5569{ 5570 int i; 5571 u32 data[2]; 5572 5573 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 5574 t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1); 5575 cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT); 5576 if (is_t4(adap)) 5577 cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB); 5578 else { 5579 t4_read_indirect(adap, A_PM_TX_DBG_CTRL, 5580 A_PM_TX_DBG_DATA, data, 2, 5581 A_PM_TX_DBG_STAT_MSB); 5582 cycles[i] = (((u64)data[0] << 32) | data[1]); 5583 } 5584 } 5585} 5586 5587/** 5588 * t4_pmrx_get_stats - returns the HW stats from PMRX 5589 * @adap: the adapter 5590 * @cnt: where to store the count statistics 5591 * @cycles: where to store the cycle statistics 5592 * 5593 * Returns performance statistics from PMRX. 5594 */ 5595void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) 5596{ 5597 int i; 5598 u32 data[2]; 5599 5600 for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) { 5601 t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1); 5602 cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT); 5603 if (is_t4(adap)) { 5604 cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB); 5605 } else { 5606 t4_read_indirect(adap, A_PM_RX_DBG_CTRL, 5607 A_PM_RX_DBG_DATA, data, 2, 5608 A_PM_RX_DBG_STAT_MSB); 5609 cycles[i] = (((u64)data[0] << 32) | data[1]); 5610 } 5611 } 5612} 5613 5614/** 5615 * t4_get_mps_bg_map - return the buffer groups associated with a port 5616 * @adap: the adapter 5617 * @idx: the port index 5618 * 5619 * Returns a bitmap indicating which MPS buffer groups are associated 5620 * with the given port. Bit i is set if buffer group i is used by the 5621 * port. 5622 */ 5623static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) 5624{ 5625 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); 5626 5627 if (n == 0) 5628 return idx == 0 ? 0xf : 0; 5629 if (n == 1 && chip_id(adap) <= CHELSIO_T5) 5630 return idx < 2 ? (3 << (2 * idx)) : 0; 5631 return 1 << idx; 5632} 5633 5634/** 5635 * t4_get_port_type_description - return Port Type string description 5636 * @port_type: firmware Port Type enumeration 5637 */ 5638const char *t4_get_port_type_description(enum fw_port_type port_type) 5639{ 5640 static const char *const port_type_description[] = { 5641 "Fiber_XFI", 5642 "Fiber_XAUI", 5643 "BT_SGMII", 5644 "BT_XFI", 5645 "BT_XAUI", 5646 "KX4", 5647 "CX4", 5648 "KX", 5649 "KR", 5650 "SFP", 5651 "BP_AP", 5652 "BP4_AP", 5653 "QSFP_10G", 5654 "QSA", 5655 "QSFP", 5656 "BP40_BA", 5657 }; 5658 5659 if (port_type < ARRAY_SIZE(port_type_description)) 5660 return port_type_description[port_type]; 5661 return "UNKNOWN"; 5662} 5663 5664/** 5665 * t4_get_port_stats_offset - collect port stats relative to a previous 5666 * snapshot 5667 * @adap: The adapter 5668 * @idx: The port 5669 * @stats: Current stats to fill 5670 * @offset: Previous stats snapshot 5671 */ 5672void t4_get_port_stats_offset(struct adapter *adap, int idx, 5673 struct port_stats *stats, 5674 struct port_stats *offset) 5675{ 5676 u64 *s, *o; 5677 int i; 5678 5679 t4_get_port_stats(adap, idx, stats); 5680 for (i = 0, s = (u64 *)stats, o = (u64 *)offset ; 5681 i < (sizeof(struct port_stats)/sizeof(u64)) ; 5682 i++, s++, o++) 5683 *s -= *o; 5684} 5685 5686/** 5687 * t4_get_port_stats - collect port statistics 5688 * @adap: the adapter 5689 * @idx: the port index 5690 * @p: the stats structure to fill 5691 * 5692 * Collect statistics related to the given port from HW. 5693 */ 5694void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 5695{ 5696 u32 bgmap = t4_get_mps_bg_map(adap, idx); 5697 u32 stat_ctl; 5698 5699#define GET_STAT(name) \ 5700 t4_read_reg64(adap, \ 5701 (is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \ 5702 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) 5703#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 5704 5705 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL); 5706 5707 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 5708 p->tx_octets = GET_STAT(TX_PORT_BYTES); 5709 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 5710 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 5711 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 5712 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 5713 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 5714 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 5715 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 5716 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 5717 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 5718 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 5719 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 5720 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 5721 p->tx_drop = GET_STAT(TX_PORT_DROP); 5722 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 5723 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 5724 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 5725 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 5726 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 5727 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 5728 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 5729 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 5730 5731 if (stat_ctl & F_COUNTPAUSESTATTX) { 5732 p->tx_frames -= p->tx_pause; 5733 p->tx_octets -= p->tx_pause * 64; 5734 p->tx_mcast_frames -= p->tx_pause; 5735 } 5736 5737 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 5738 p->rx_octets = GET_STAT(RX_PORT_BYTES); 5739 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 5740 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 5741 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 5742 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 5743 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 5744 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 5745 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 5746 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 5747 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 5748 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 5749 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 5750 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 5751 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 5752 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 5753 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 5754 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 5755 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 5756 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 5757 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 5758 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 5759 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 5760 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 5761 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 5762 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 5763 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 5764 5765 if (stat_ctl & F_COUNTPAUSESTATRX) { 5766 p->rx_frames -= p->rx_pause; 5767 p->rx_octets -= p->rx_pause * 64; 5768 p->rx_mcast_frames -= p->rx_pause; 5769 } 5770 5771 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 5772 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 5773 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 5774 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 5775 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 5776 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 5777 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 5778 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 5779 5780#undef GET_STAT 5781#undef GET_STAT_COM 5782} 5783 5784/** 5785 * t4_get_lb_stats - collect loopback port statistics 5786 * @adap: the adapter 5787 * @idx: the loopback port index 5788 * @p: the stats structure to fill 5789 * 5790 * Return HW statistics for the given loopback port. 5791 */ 5792void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) 5793{ 5794 u32 bgmap = t4_get_mps_bg_map(adap, idx); 5795 5796#define GET_STAT(name) \ 5797 t4_read_reg64(adap, \ 5798 (is_t4(adap) ? \ 5799 PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \ 5800 T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))) 5801#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) 5802 5803 p->octets = GET_STAT(BYTES); 5804 p->frames = GET_STAT(FRAMES); 5805 p->bcast_frames = GET_STAT(BCAST); 5806 p->mcast_frames = GET_STAT(MCAST); 5807 p->ucast_frames = GET_STAT(UCAST); 5808 p->error_frames = GET_STAT(ERROR); 5809 5810 p->frames_64 = GET_STAT(64B); 5811 p->frames_65_127 = GET_STAT(65B_127B); 5812 p->frames_128_255 = GET_STAT(128B_255B); 5813 p->frames_256_511 = GET_STAT(256B_511B); 5814 p->frames_512_1023 = GET_STAT(512B_1023B); 5815 p->frames_1024_1518 = GET_STAT(1024B_1518B); 5816 p->frames_1519_max = GET_STAT(1519B_MAX); 5817 p->drop = GET_STAT(DROP_FRAMES); 5818 5819 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; 5820 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; 5821 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; 5822 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; 5823 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; 5824 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; 5825 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; 5826 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; 5827 5828#undef GET_STAT 5829#undef GET_STAT_COM 5830} 5831 5832/** 5833 * t4_wol_magic_enable - enable/disable magic packet WoL 5834 * @adap: the adapter 5835 * @port: the physical port index 5836 * @addr: MAC address expected in magic packets, %NULL to disable 5837 * 5838 * Enables/disables magic packet wake-on-LAN for the selected port. 5839 */ 5840void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 5841 const u8 *addr) 5842{ 5843 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 5844 5845 if (is_t4(adap)) { 5846 mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO); 5847 mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI); 5848 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 5849 } else { 5850 mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO); 5851 mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI); 5852 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 5853 } 5854 5855 if (addr) { 5856 t4_write_reg(adap, mag_id_reg_l, 5857 (addr[2] << 24) | (addr[3] << 16) | 5858 (addr[4] << 8) | addr[5]); 5859 t4_write_reg(adap, mag_id_reg_h, 5860 (addr[0] << 8) | addr[1]); 5861 } 5862 t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN, 5863 V_MAGICEN(addr != NULL)); 5864} 5865 5866/** 5867 * t4_wol_pat_enable - enable/disable pattern-based WoL 5868 * @adap: the adapter 5869 * @port: the physical port index 5870 * @map: bitmap of which HW pattern filters to set 5871 * @mask0: byte mask for bytes 0-63 of a packet 5872 * @mask1: byte mask for bytes 64-127 of a packet 5873 * @crc: Ethernet CRC for selected bytes 5874 * @enable: enable/disable switch 5875 * 5876 * Sets the pattern filters indicated in @map to mask out the bytes 5877 * specified in @mask0/@mask1 in received packets and compare the CRC of 5878 * the resulting packet against @crc. If @enable is %true pattern-based 5879 * WoL is enabled, otherwise disabled. 5880 */ 5881int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 5882 u64 mask0, u64 mask1, unsigned int crc, bool enable) 5883{ 5884 int i; 5885 u32 port_cfg_reg; 5886 5887 if (is_t4(adap)) 5888 port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2); 5889 else 5890 port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2); 5891 5892 if (!enable) { 5893 t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0); 5894 return 0; 5895 } 5896 if (map > 0xff) 5897 return -EINVAL; 5898 5899#define EPIO_REG(name) \ 5900 (is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \ 5901 T5_PORT_REG(port, A_MAC_PORT_EPIO_##name)) 5902 5903 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 5904 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 5905 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 5906 5907 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 5908 if (!(map & 1)) 5909 continue; 5910 5911 /* write byte masks */ 5912 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 5913 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR); 5914 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 5915 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 5916 return -ETIMEDOUT; 5917 5918 /* write CRC */ 5919 t4_write_reg(adap, EPIO_REG(DATA0), crc); 5920 t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR); 5921 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 5922 if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY) 5923 return -ETIMEDOUT; 5924 } 5925#undef EPIO_REG 5926 5927 t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN); 5928 return 0; 5929} 5930 5931/* t4_mk_filtdelwr - create a delete filter WR 5932 * @ftid: the filter ID 5933 * @wr: the filter work request to populate 5934 * @qid: ingress queue to receive the delete notification 5935 * 5936 * Creates a filter work request to delete the supplied filter. If @qid is 5937 * negative the delete notification is suppressed. 5938 */ 5939void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 5940{ 5941 memset(wr, 0, sizeof(*wr)); 5942 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR)); 5943 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16)); 5944 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) | 5945 V_FW_FILTER_WR_NOREPLY(qid < 0)); 5946 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER); 5947 if (qid >= 0) 5948 wr->rx_chan_rx_rpl_iq = 5949 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 5950} 5951 5952#define INIT_CMD(var, cmd, rd_wr) do { \ 5953 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ 5954 F_FW_CMD_REQUEST | \ 5955 F_FW_CMD_##rd_wr); \ 5956 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ 5957} while (0) 5958 5959int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 5960 u32 addr, u32 val) 5961{ 5962 u32 ldst_addrspace; 5963 struct fw_ldst_cmd c; 5964 5965 memset(&c, 0, sizeof(c)); 5966 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE); 5967 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 5968 F_FW_CMD_REQUEST | 5969 F_FW_CMD_WRITE | 5970 ldst_addrspace); 5971 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 5972 c.u.addrval.addr = cpu_to_be32(addr); 5973 c.u.addrval.val = cpu_to_be32(val); 5974 5975 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 5976} 5977 5978/** 5979 * t4_mdio_rd - read a PHY register through MDIO 5980 * @adap: the adapter 5981 * @mbox: mailbox to use for the FW command 5982 * @phy_addr: the PHY address 5983 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 5984 * @reg: the register to read 5985 * @valp: where to store the value 5986 * 5987 * Issues a FW command through the given mailbox to read a PHY register. 5988 */ 5989int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 5990 unsigned int mmd, unsigned int reg, unsigned int *valp) 5991{ 5992 int ret; 5993 u32 ldst_addrspace; 5994 struct fw_ldst_cmd c; 5995 5996 memset(&c, 0, sizeof(c)); 5997 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 5998 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 5999 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6000 ldst_addrspace); 6001 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6002 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 6003 V_FW_LDST_CMD_MMD(mmd)); 6004 c.u.mdio.raddr = cpu_to_be16(reg); 6005 6006 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6007 if (ret == 0) 6008 *valp = be16_to_cpu(c.u.mdio.rval); 6009 return ret; 6010} 6011 6012/** 6013 * t4_mdio_wr - write a PHY register through MDIO 6014 * @adap: the adapter 6015 * @mbox: mailbox to use for the FW command 6016 * @phy_addr: the PHY address 6017 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 6018 * @reg: the register to write 6019 * @valp: value to write 6020 * 6021 * Issues a FW command through the given mailbox to write a PHY register. 6022 */ 6023int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 6024 unsigned int mmd, unsigned int reg, unsigned int val) 6025{ 6026 u32 ldst_addrspace; 6027 struct fw_ldst_cmd c; 6028 6029 memset(&c, 0, sizeof(c)); 6030 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO); 6031 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6032 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6033 ldst_addrspace); 6034 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6035 c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) | 6036 V_FW_LDST_CMD_MMD(mmd)); 6037 c.u.mdio.raddr = cpu_to_be16(reg); 6038 c.u.mdio.rval = cpu_to_be16(val); 6039 6040 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6041} 6042 6043/** 6044 * 6045 * t4_sge_decode_idma_state - decode the idma state 6046 * @adap: the adapter 6047 * @state: the state idma is stuck in 6048 */ 6049void t4_sge_decode_idma_state(struct adapter *adapter, int state) 6050{ 6051 static const char * const t4_decode[] = { 6052 "IDMA_IDLE", 6053 "IDMA_PUSH_MORE_CPL_FIFO", 6054 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6055 "Not used", 6056 "IDMA_PHYSADDR_SEND_PCIEHDR", 6057 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6058 "IDMA_PHYSADDR_SEND_PAYLOAD", 6059 "IDMA_SEND_FIFO_TO_IMSG", 6060 "IDMA_FL_REQ_DATA_FL_PREP", 6061 "IDMA_FL_REQ_DATA_FL", 6062 "IDMA_FL_DROP", 6063 "IDMA_FL_H_REQ_HEADER_FL", 6064 "IDMA_FL_H_SEND_PCIEHDR", 6065 "IDMA_FL_H_PUSH_CPL_FIFO", 6066 "IDMA_FL_H_SEND_CPL", 6067 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6068 "IDMA_FL_H_SEND_IP_HDR", 6069 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6070 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6071 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6072 "IDMA_FL_D_SEND_PCIEHDR", 6073 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6074 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6075 "IDMA_FL_SEND_PCIEHDR", 6076 "IDMA_FL_PUSH_CPL_FIFO", 6077 "IDMA_FL_SEND_CPL", 6078 "IDMA_FL_SEND_PAYLOAD_FIRST", 6079 "IDMA_FL_SEND_PAYLOAD", 6080 "IDMA_FL_REQ_NEXT_DATA_FL", 6081 "IDMA_FL_SEND_NEXT_PCIEHDR", 6082 "IDMA_FL_SEND_PADDING", 6083 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6084 "IDMA_FL_SEND_FIFO_TO_IMSG", 6085 "IDMA_FL_REQ_DATAFL_DONE", 6086 "IDMA_FL_REQ_HEADERFL_DONE", 6087 }; 6088 static const char * const t5_decode[] = { 6089 "IDMA_IDLE", 6090 "IDMA_ALMOST_IDLE", 6091 "IDMA_PUSH_MORE_CPL_FIFO", 6092 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6093 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 6094 "IDMA_PHYSADDR_SEND_PCIEHDR", 6095 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6096 "IDMA_PHYSADDR_SEND_PAYLOAD", 6097 "IDMA_SEND_FIFO_TO_IMSG", 6098 "IDMA_FL_REQ_DATA_FL", 6099 "IDMA_FL_DROP", 6100 "IDMA_FL_DROP_SEND_INC", 6101 "IDMA_FL_H_REQ_HEADER_FL", 6102 "IDMA_FL_H_SEND_PCIEHDR", 6103 "IDMA_FL_H_PUSH_CPL_FIFO", 6104 "IDMA_FL_H_SEND_CPL", 6105 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6106 "IDMA_FL_H_SEND_IP_HDR", 6107 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6108 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6109 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6110 "IDMA_FL_D_SEND_PCIEHDR", 6111 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6112 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6113 "IDMA_FL_SEND_PCIEHDR", 6114 "IDMA_FL_PUSH_CPL_FIFO", 6115 "IDMA_FL_SEND_CPL", 6116 "IDMA_FL_SEND_PAYLOAD_FIRST", 6117 "IDMA_FL_SEND_PAYLOAD", 6118 "IDMA_FL_REQ_NEXT_DATA_FL", 6119 "IDMA_FL_SEND_NEXT_PCIEHDR", 6120 "IDMA_FL_SEND_PADDING", 6121 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6122 }; 6123 static const char * const t6_decode[] = { 6124 "IDMA_IDLE", 6125 "IDMA_PUSH_MORE_CPL_FIFO", 6126 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 6127 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 6128 "IDMA_PHYSADDR_SEND_PCIEHDR", 6129 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 6130 "IDMA_PHYSADDR_SEND_PAYLOAD", 6131 "IDMA_FL_REQ_DATA_FL", 6132 "IDMA_FL_DROP", 6133 "IDMA_FL_DROP_SEND_INC", 6134 "IDMA_FL_H_REQ_HEADER_FL", 6135 "IDMA_FL_H_SEND_PCIEHDR", 6136 "IDMA_FL_H_PUSH_CPL_FIFO", 6137 "IDMA_FL_H_SEND_CPL", 6138 "IDMA_FL_H_SEND_IP_HDR_FIRST", 6139 "IDMA_FL_H_SEND_IP_HDR", 6140 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 6141 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 6142 "IDMA_FL_H_SEND_IP_HDR_PADDING", 6143 "IDMA_FL_D_SEND_PCIEHDR", 6144 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 6145 "IDMA_FL_D_REQ_NEXT_DATA_FL", 6146 "IDMA_FL_SEND_PCIEHDR", 6147 "IDMA_FL_PUSH_CPL_FIFO", 6148 "IDMA_FL_SEND_CPL", 6149 "IDMA_FL_SEND_PAYLOAD_FIRST", 6150 "IDMA_FL_SEND_PAYLOAD", 6151 "IDMA_FL_REQ_NEXT_DATA_FL", 6152 "IDMA_FL_SEND_NEXT_PCIEHDR", 6153 "IDMA_FL_SEND_PADDING", 6154 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 6155 }; 6156 static const u32 sge_regs[] = { 6157 A_SGE_DEBUG_DATA_LOW_INDEX_2, 6158 A_SGE_DEBUG_DATA_LOW_INDEX_3, 6159 A_SGE_DEBUG_DATA_HIGH_INDEX_10, 6160 }; 6161 const char * const *sge_idma_decode; 6162 int sge_idma_decode_nstates; 6163 int i; 6164 unsigned int chip_version = chip_id(adapter); 6165 6166 /* Select the right set of decode strings to dump depending on the 6167 * adapter chip type. 6168 */ 6169 switch (chip_version) { 6170 case CHELSIO_T4: 6171 sge_idma_decode = (const char * const *)t4_decode; 6172 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 6173 break; 6174 6175 case CHELSIO_T5: 6176 sge_idma_decode = (const char * const *)t5_decode; 6177 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 6178 break; 6179 6180 case CHELSIO_T6: 6181 sge_idma_decode = (const char * const *)t6_decode; 6182 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode); 6183 break; 6184 6185 default: 6186 CH_ERR(adapter, "Unsupported chip version %d\n", chip_version); 6187 return; 6188 } 6189 6190 if (state < sge_idma_decode_nstates) 6191 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 6192 else 6193 CH_WARN(adapter, "idma state %d unknown\n", state); 6194 6195 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 6196 CH_WARN(adapter, "SGE register %#x value %#x\n", 6197 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 6198} 6199 6200/** 6201 * t4_sge_ctxt_flush - flush the SGE context cache 6202 * @adap: the adapter 6203 * @mbox: mailbox to use for the FW command 6204 * 6205 * Issues a FW command through the given mailbox to flush the 6206 * SGE context cache. 6207 */ 6208int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) 6209{ 6210 int ret; 6211 u32 ldst_addrspace; 6212 struct fw_ldst_cmd c; 6213 6214 memset(&c, 0, sizeof(c)); 6215 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC); 6216 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 6217 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6218 ldst_addrspace); 6219 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 6220 c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH); 6221 6222 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6223 return ret; 6224} 6225 6226/** 6227 * t4_fw_hello - establish communication with FW 6228 * @adap: the adapter 6229 * @mbox: mailbox to use for the FW command 6230 * @evt_mbox: mailbox to receive async FW events 6231 * @master: specifies the caller's willingness to be the device master 6232 * @state: returns the current device state (if non-NULL) 6233 * 6234 * Issues a command to establish communication with FW. Returns either 6235 * an error (negative integer) or the mailbox of the Master PF. 6236 */ 6237int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 6238 enum dev_master master, enum dev_state *state) 6239{ 6240 int ret; 6241 struct fw_hello_cmd c; 6242 u32 v; 6243 unsigned int master_mbox; 6244 int retries = FW_CMD_HELLO_RETRIES; 6245 6246retry: 6247 memset(&c, 0, sizeof(c)); 6248 INIT_CMD(c, HELLO, WRITE); 6249 c.err_to_clearinit = cpu_to_be32( 6250 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 6251 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 6252 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? 6253 mbox : M_FW_HELLO_CMD_MBMASTER) | 6254 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 6255 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | 6256 F_FW_HELLO_CMD_CLEARINIT); 6257 6258 /* 6259 * Issue the HELLO command to the firmware. If it's not successful 6260 * but indicates that we got a "busy" or "timeout" condition, retry 6261 * the HELLO until we exhaust our retry limit. If we do exceed our 6262 * retry limit, check to see if the firmware left us any error 6263 * information and report that if so ... 6264 */ 6265 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6266 if (ret != FW_SUCCESS) { 6267 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 6268 goto retry; 6269 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) 6270 t4_report_fw_error(adap); 6271 return ret; 6272 } 6273 6274 v = be32_to_cpu(c.err_to_clearinit); 6275 master_mbox = G_FW_HELLO_CMD_MBMASTER(v); 6276 if (state) { 6277 if (v & F_FW_HELLO_CMD_ERR) 6278 *state = DEV_STATE_ERR; 6279 else if (v & F_FW_HELLO_CMD_INIT) 6280 *state = DEV_STATE_INIT; 6281 else 6282 *state = DEV_STATE_UNINIT; 6283 } 6284 6285 /* 6286 * If we're not the Master PF then we need to wait around for the 6287 * Master PF Driver to finish setting up the adapter. 6288 * 6289 * Note that we also do this wait if we're a non-Master-capable PF and 6290 * there is no current Master PF; a Master PF may show up momentarily 6291 * and we wouldn't want to fail pointlessly. (This can happen when an 6292 * OS loads lots of different drivers rapidly at the same time). In 6293 * this case, the Master PF returned by the firmware will be 6294 * M_PCIE_FW_MASTER so the test below will work ... 6295 */ 6296 if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 && 6297 master_mbox != mbox) { 6298 int waiting = FW_CMD_HELLO_TIMEOUT; 6299 6300 /* 6301 * Wait for the firmware to either indicate an error or 6302 * initialized state. If we see either of these we bail out 6303 * and report the issue to the caller. If we exhaust the 6304 * "hello timeout" and we haven't exhausted our retries, try 6305 * again. Otherwise bail with a timeout error. 6306 */ 6307 for (;;) { 6308 u32 pcie_fw; 6309 6310 msleep(50); 6311 waiting -= 50; 6312 6313 /* 6314 * If neither Error nor Initialialized are indicated 6315 * by the firmware keep waiting till we exhaust our 6316 * timeout ... and then retry if we haven't exhausted 6317 * our retries ... 6318 */ 6319 pcie_fw = t4_read_reg(adap, A_PCIE_FW); 6320 if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) { 6321 if (waiting <= 0) { 6322 if (retries-- > 0) 6323 goto retry; 6324 6325 return -ETIMEDOUT; 6326 } 6327 continue; 6328 } 6329 6330 /* 6331 * We either have an Error or Initialized condition 6332 * report errors preferentially. 6333 */ 6334 if (state) { 6335 if (pcie_fw & F_PCIE_FW_ERR) 6336 *state = DEV_STATE_ERR; 6337 else if (pcie_fw & F_PCIE_FW_INIT) 6338 *state = DEV_STATE_INIT; 6339 } 6340 6341 /* 6342 * If we arrived before a Master PF was selected and 6343 * there's not a valid Master PF, grab its identity 6344 * for our caller. 6345 */ 6346 if (master_mbox == M_PCIE_FW_MASTER && 6347 (pcie_fw & F_PCIE_FW_MASTER_VLD)) 6348 master_mbox = G_PCIE_FW_MASTER(pcie_fw); 6349 break; 6350 } 6351 } 6352 6353 return master_mbox; 6354} 6355 6356/** 6357 * t4_fw_bye - end communication with FW 6358 * @adap: the adapter 6359 * @mbox: mailbox to use for the FW command 6360 * 6361 * Issues a command to terminate communication with FW. 6362 */ 6363int t4_fw_bye(struct adapter *adap, unsigned int mbox) 6364{ 6365 struct fw_bye_cmd c; 6366 6367 memset(&c, 0, sizeof(c)); 6368 INIT_CMD(c, BYE, WRITE); 6369 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6370} 6371 6372/** 6373 * t4_fw_reset - issue a reset to FW 6374 * @adap: the adapter 6375 * @mbox: mailbox to use for the FW command 6376 * @reset: specifies the type of reset to perform 6377 * 6378 * Issues a reset command of the specified type to FW. 6379 */ 6380int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 6381{ 6382 struct fw_reset_cmd c; 6383 6384 memset(&c, 0, sizeof(c)); 6385 INIT_CMD(c, RESET, WRITE); 6386 c.val = cpu_to_be32(reset); 6387 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6388} 6389 6390/** 6391 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 6392 * @adap: the adapter 6393 * @mbox: mailbox to use for the FW RESET command (if desired) 6394 * @force: force uP into RESET even if FW RESET command fails 6395 * 6396 * Issues a RESET command to firmware (if desired) with a HALT indication 6397 * and then puts the microprocessor into RESET state. The RESET command 6398 * will only be issued if a legitimate mailbox is provided (mbox <= 6399 * M_PCIE_FW_MASTER). 6400 * 6401 * This is generally used in order for the host to safely manipulate the 6402 * adapter without fear of conflicting with whatever the firmware might 6403 * be doing. The only way out of this state is to RESTART the firmware 6404 * ... 6405 */ 6406int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 6407{ 6408 int ret = 0; 6409 6410 /* 6411 * If a legitimate mailbox is provided, issue a RESET command 6412 * with a HALT indication. 6413 */ 6414 if (mbox <= M_PCIE_FW_MASTER) { 6415 struct fw_reset_cmd c; 6416 6417 memset(&c, 0, sizeof(c)); 6418 INIT_CMD(c, RESET, WRITE); 6419 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); 6420 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); 6421 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6422 } 6423 6424 /* 6425 * Normally we won't complete the operation if the firmware RESET 6426 * command fails but if our caller insists we'll go ahead and put the 6427 * uP into RESET. This can be useful if the firmware is hung or even 6428 * missing ... We'll have to take the risk of putting the uP into 6429 * RESET without the cooperation of firmware in that case. 6430 * 6431 * We also force the firmware's HALT flag to be on in case we bypassed 6432 * the firmware RESET command above or we're dealing with old firmware 6433 * which doesn't have the HALT capability. This will serve as a flag 6434 * for the incoming firmware to know that it's coming out of a HALT 6435 * rather than a RESET ... if it's new enough to understand that ... 6436 */ 6437 if (ret == 0 || force) { 6438 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); 6439 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 6440 F_PCIE_FW_HALT); 6441 } 6442 6443 /* 6444 * And we always return the result of the firmware RESET command 6445 * even when we force the uP into RESET ... 6446 */ 6447 return ret; 6448} 6449 6450/** 6451 * t4_fw_restart - restart the firmware by taking the uP out of RESET 6452 * @adap: the adapter 6453 * @reset: if we want to do a RESET to restart things 6454 * 6455 * Restart firmware previously halted by t4_fw_halt(). On successful 6456 * return the previous PF Master remains as the new PF Master and there 6457 * is no need to issue a new HELLO command, etc. 6458 * 6459 * We do this in two ways: 6460 * 6461 * 1. If we're dealing with newer firmware we'll simply want to take 6462 * the chip's microprocessor out of RESET. This will cause the 6463 * firmware to start up from its start vector. And then we'll loop 6464 * until the firmware indicates it's started again (PCIE_FW.HALT 6465 * reset to 0) or we timeout. 6466 * 6467 * 2. If we're dealing with older firmware then we'll need to RESET 6468 * the chip since older firmware won't recognize the PCIE_FW.HALT 6469 * flag and automatically RESET itself on startup. 6470 */ 6471int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 6472{ 6473 if (reset) { 6474 /* 6475 * Since we're directing the RESET instead of the firmware 6476 * doing it automatically, we need to clear the PCIE_FW.HALT 6477 * bit. 6478 */ 6479 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); 6480 6481 /* 6482 * If we've been given a valid mailbox, first try to get the 6483 * firmware to do the RESET. If that works, great and we can 6484 * return success. Otherwise, if we haven't been given a 6485 * valid mailbox or the RESET command failed, fall back to 6486 * hitting the chip with a hammer. 6487 */ 6488 if (mbox <= M_PCIE_FW_MASTER) { 6489 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 6490 msleep(100); 6491 if (t4_fw_reset(adap, mbox, 6492 F_PIORST | F_PIORSTMODE) == 0) 6493 return 0; 6494 } 6495 6496 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); 6497 msleep(2000); 6498 } else { 6499 int ms; 6500 6501 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); 6502 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 6503 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) 6504 return FW_SUCCESS; 6505 msleep(100); 6506 ms += 100; 6507 } 6508 return -ETIMEDOUT; 6509 } 6510 return 0; 6511} 6512 6513/** 6514 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 6515 * @adap: the adapter 6516 * @mbox: mailbox to use for the FW RESET command (if desired) 6517 * @fw_data: the firmware image to write 6518 * @size: image size 6519 * @force: force upgrade even if firmware doesn't cooperate 6520 * 6521 * Perform all of the steps necessary for upgrading an adapter's 6522 * firmware image. Normally this requires the cooperation of the 6523 * existing firmware in order to halt all existing activities 6524 * but if an invalid mailbox token is passed in we skip that step 6525 * (though we'll still put the adapter microprocessor into RESET in 6526 * that case). 6527 * 6528 * On successful return the new firmware will have been loaded and 6529 * the adapter will have been fully RESET losing all previous setup 6530 * state. On unsuccessful return the adapter may be completely hosed ... 6531 * positive errno indicates that the adapter is ~probably~ intact, a 6532 * negative errno indicates that things are looking bad ... 6533 */ 6534int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 6535 const u8 *fw_data, unsigned int size, int force) 6536{ 6537 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 6538 unsigned int bootstrap = 6539 be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP; 6540 int reset, ret; 6541 6542 if (!t4_fw_matches_chip(adap, fw_hdr)) 6543 return -EINVAL; 6544 6545 if (!bootstrap) { 6546 ret = t4_fw_halt(adap, mbox, force); 6547 if (ret < 0 && !force) 6548 return ret; 6549 } 6550 6551 ret = t4_load_fw(adap, fw_data, size); 6552 if (ret < 0 || bootstrap) 6553 return ret; 6554 6555 /* 6556 * Older versions of the firmware don't understand the new 6557 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 6558 * restart. So for newly loaded older firmware we'll have to do the 6559 * RESET for it so it starts up on a clean slate. We can tell if 6560 * the newly loaded firmware will handle this right by checking 6561 * its header flags to see if it advertises the capability. 6562 */ 6563 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 6564 return t4_fw_restart(adap, mbox, reset); 6565} 6566 6567/** 6568 * t4_fw_initialize - ask FW to initialize the device 6569 * @adap: the adapter 6570 * @mbox: mailbox to use for the FW command 6571 * 6572 * Issues a command to FW to partially initialize the device. This 6573 * performs initialization that generally doesn't depend on user input. 6574 */ 6575int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 6576{ 6577 struct fw_initialize_cmd c; 6578 6579 memset(&c, 0, sizeof(c)); 6580 INIT_CMD(c, INITIALIZE, WRITE); 6581 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6582} 6583 6584/** 6585 * t4_query_params_rw - query FW or device parameters 6586 * @adap: the adapter 6587 * @mbox: mailbox to use for the FW command 6588 * @pf: the PF 6589 * @vf: the VF 6590 * @nparams: the number of parameters 6591 * @params: the parameter names 6592 * @val: the parameter values 6593 * @rw: Write and read flag 6594 * 6595 * Reads the value of FW or device parameters. Up to 7 parameters can be 6596 * queried at once. 6597 */ 6598int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf, 6599 unsigned int vf, unsigned int nparams, const u32 *params, 6600 u32 *val, int rw) 6601{ 6602 int i, ret; 6603 struct fw_params_cmd c; 6604 __be32 *p = &c.param[0].mnem; 6605 6606 if (nparams > 7) 6607 return -EINVAL; 6608 6609 memset(&c, 0, sizeof(c)); 6610 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 6611 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6612 V_FW_PARAMS_CMD_PFN(pf) | 6613 V_FW_PARAMS_CMD_VFN(vf)); 6614 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6615 6616 for (i = 0; i < nparams; i++) { 6617 *p++ = cpu_to_be32(*params++); 6618 if (rw) 6619 *p = cpu_to_be32(*(val + i)); 6620 p++; 6621 } 6622 6623 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6624 if (ret == 0) 6625 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 6626 *val++ = be32_to_cpu(*p); 6627 return ret; 6628} 6629 6630int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 6631 unsigned int vf, unsigned int nparams, const u32 *params, 6632 u32 *val) 6633{ 6634 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); 6635} 6636 6637/** 6638 * t4_set_params_timeout - sets FW or device parameters 6639 * @adap: the adapter 6640 * @mbox: mailbox to use for the FW command 6641 * @pf: the PF 6642 * @vf: the VF 6643 * @nparams: the number of parameters 6644 * @params: the parameter names 6645 * @val: the parameter values 6646 * @timeout: the timeout time 6647 * 6648 * Sets the value of FW or device parameters. Up to 7 parameters can be 6649 * specified at once. 6650 */ 6651int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, 6652 unsigned int pf, unsigned int vf, 6653 unsigned int nparams, const u32 *params, 6654 const u32 *val, int timeout) 6655{ 6656 struct fw_params_cmd c; 6657 __be32 *p = &c.param[0].mnem; 6658 6659 if (nparams > 7) 6660 return -EINVAL; 6661 6662 memset(&c, 0, sizeof(c)); 6663 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | 6664 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6665 V_FW_PARAMS_CMD_PFN(pf) | 6666 V_FW_PARAMS_CMD_VFN(vf)); 6667 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6668 6669 while (nparams--) { 6670 *p++ = cpu_to_be32(*params++); 6671 *p++ = cpu_to_be32(*val++); 6672 } 6673 6674 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); 6675} 6676 6677/** 6678 * t4_set_params - sets FW or device parameters 6679 * @adap: the adapter 6680 * @mbox: mailbox to use for the FW command 6681 * @pf: the PF 6682 * @vf: the VF 6683 * @nparams: the number of parameters 6684 * @params: the parameter names 6685 * @val: the parameter values 6686 * 6687 * Sets the value of FW or device parameters. Up to 7 parameters can be 6688 * specified at once. 6689 */ 6690int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 6691 unsigned int vf, unsigned int nparams, const u32 *params, 6692 const u32 *val) 6693{ 6694 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, 6695 FW_CMD_MAX_TIMEOUT); 6696} 6697 6698/** 6699 * t4_cfg_pfvf - configure PF/VF resource limits 6700 * @adap: the adapter 6701 * @mbox: mailbox to use for the FW command 6702 * @pf: the PF being configured 6703 * @vf: the VF being configured 6704 * @txq: the max number of egress queues 6705 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 6706 * @rxqi: the max number of interrupt-capable ingress queues 6707 * @rxq: the max number of interruptless ingress queues 6708 * @tc: the PCI traffic class 6709 * @vi: the max number of virtual interfaces 6710 * @cmask: the channel access rights mask for the PF/VF 6711 * @pmask: the port access rights mask for the PF/VF 6712 * @nexact: the maximum number of exact MPS filters 6713 * @rcaps: read capabilities 6714 * @wxcaps: write/execute capabilities 6715 * 6716 * Configures resource limits and capabilities for a physical or virtual 6717 * function. 6718 */ 6719int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 6720 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 6721 unsigned int rxqi, unsigned int rxq, unsigned int tc, 6722 unsigned int vi, unsigned int cmask, unsigned int pmask, 6723 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 6724{ 6725 struct fw_pfvf_cmd c; 6726 6727 memset(&c, 0, sizeof(c)); 6728 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST | 6729 F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) | 6730 V_FW_PFVF_CMD_VFN(vf)); 6731 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6732 c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) | 6733 V_FW_PFVF_CMD_NIQ(rxq)); 6734 c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) | 6735 V_FW_PFVF_CMD_PMASK(pmask) | 6736 V_FW_PFVF_CMD_NEQ(txq)); 6737 c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) | 6738 V_FW_PFVF_CMD_NVI(vi) | 6739 V_FW_PFVF_CMD_NEXACTF(nexact)); 6740 c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) | 6741 V_FW_PFVF_CMD_WX_CAPS(wxcaps) | 6742 V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 6743 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 6744} 6745 6746/** 6747 * t4_alloc_vi_func - allocate a virtual interface 6748 * @adap: the adapter 6749 * @mbox: mailbox to use for the FW command 6750 * @port: physical port associated with the VI 6751 * @pf: the PF owning the VI 6752 * @vf: the VF owning the VI 6753 * @nmac: number of MAC addresses needed (1 to 5) 6754 * @mac: the MAC addresses of the VI 6755 * @rss_size: size of RSS table slice associated with this VI 6756 * @portfunc: which Port Application Function MAC Address is desired 6757 * @idstype: Intrusion Detection Type 6758 * 6759 * Allocates a virtual interface for the given physical port. If @mac is 6760 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 6761 * If @rss_size is %NULL the VI is not assigned any RSS slice by FW. 6762 * @mac should be large enough to hold @nmac Ethernet addresses, they are 6763 * stored consecutively so the space needed is @nmac * 6 bytes. 6764 * Returns a negative error number or the non-negative VI id. 6765 */ 6766int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, 6767 unsigned int port, unsigned int pf, unsigned int vf, 6768 unsigned int nmac, u8 *mac, u16 *rss_size, 6769 unsigned int portfunc, unsigned int idstype) 6770{ 6771 int ret; 6772 struct fw_vi_cmd c; 6773 6774 memset(&c, 0, sizeof(c)); 6775 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | 6776 F_FW_CMD_WRITE | F_FW_CMD_EXEC | 6777 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); 6778 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); 6779 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | 6780 V_FW_VI_CMD_FUNC(portfunc)); 6781 c.portid_pkd = V_FW_VI_CMD_PORTID(port); 6782 c.nmac = nmac - 1; 6783 if(!rss_size) 6784 c.norss_rsssize = F_FW_VI_CMD_NORSS; 6785 6786 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6787 if (ret) 6788 return ret; 6789 6790 if (mac) { 6791 memcpy(mac, c.mac, sizeof(c.mac)); 6792 switch (nmac) { 6793 case 5: 6794 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 6795 case 4: 6796 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 6797 case 3: 6798 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 6799 case 2: 6800 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 6801 } 6802 } 6803 if (rss_size) 6804 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); 6805 return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid)); 6806} 6807 6808/** 6809 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface 6810 * @adap: the adapter 6811 * @mbox: mailbox to use for the FW command 6812 * @port: physical port associated with the VI 6813 * @pf: the PF owning the VI 6814 * @vf: the VF owning the VI 6815 * @nmac: number of MAC addresses needed (1 to 5) 6816 * @mac: the MAC addresses of the VI 6817 * @rss_size: size of RSS table slice associated with this VI 6818 * 6819 * backwards compatible and convieniance routine to allocate a Virtual 6820 * Interface with a Ethernet Port Application Function and Intrustion 6821 * Detection System disabled. 6822 */ 6823int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 6824 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 6825 u16 *rss_size) 6826{ 6827 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, 6828 FW_VI_FUNC_ETH, 0); 6829} 6830 6831/** 6832 * t4_free_vi - free a virtual interface 6833 * @adap: the adapter 6834 * @mbox: mailbox to use for the FW command 6835 * @pf: the PF owning the VI 6836 * @vf: the VF owning the VI 6837 * @viid: virtual interface identifiler 6838 * 6839 * Free a previously allocated virtual interface. 6840 */ 6841int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 6842 unsigned int vf, unsigned int viid) 6843{ 6844 struct fw_vi_cmd c; 6845 6846 memset(&c, 0, sizeof(c)); 6847 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | 6848 F_FW_CMD_REQUEST | 6849 F_FW_CMD_EXEC | 6850 V_FW_VI_CMD_PFN(pf) | 6851 V_FW_VI_CMD_VFN(vf)); 6852 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); 6853 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); 6854 6855 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 6856} 6857 6858/** 6859 * t4_set_rxmode - set Rx properties of a virtual interface 6860 * @adap: the adapter 6861 * @mbox: mailbox to use for the FW command 6862 * @viid: the VI id 6863 * @mtu: the new MTU or -1 6864 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 6865 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 6866 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 6867 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 6868 * @sleep_ok: if true we may sleep while awaiting command completion 6869 * 6870 * Sets Rx properties of a virtual interface. 6871 */ 6872int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 6873 int mtu, int promisc, int all_multi, int bcast, int vlanex, 6874 bool sleep_ok) 6875{ 6876 struct fw_vi_rxmode_cmd c; 6877 6878 /* convert to FW values */ 6879 if (mtu < 0) 6880 mtu = M_FW_VI_RXMODE_CMD_MTU; 6881 if (promisc < 0) 6882 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; 6883 if (all_multi < 0) 6884 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; 6885 if (bcast < 0) 6886 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; 6887 if (vlanex < 0) 6888 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; 6889 6890 memset(&c, 0, sizeof(c)); 6891 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | 6892 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 6893 V_FW_VI_RXMODE_CMD_VIID(viid)); 6894 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 6895 c.mtu_to_vlanexen = 6896 cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | 6897 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 6898 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 6899 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 6900 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 6901 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 6902} 6903 6904/** 6905 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 6906 * @adap: the adapter 6907 * @mbox: mailbox to use for the FW command 6908 * @viid: the VI id 6909 * @free: if true any existing filters for this VI id are first removed 6910 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 6911 * @addr: the MAC address(es) 6912 * @idx: where to store the index of each allocated filter 6913 * @hash: pointer to hash address filter bitmap 6914 * @sleep_ok: call is allowed to sleep 6915 * 6916 * Allocates an exact-match filter for each of the supplied addresses and 6917 * sets it to the corresponding address. If @idx is not %NULL it should 6918 * have at least @naddr entries, each of which will be set to the index of 6919 * the filter allocated for the corresponding MAC address. If a filter 6920 * could not be allocated for an address its index is set to 0xffff. 6921 * If @hash is not %NULL addresses that fail to allocate an exact filter 6922 * are hashed and update the hash filter bitmap pointed at by @hash. 6923 * 6924 * Returns a negative error number or the number of filters allocated. 6925 */ 6926int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 6927 unsigned int viid, bool free, unsigned int naddr, 6928 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 6929{ 6930 int offset, ret = 0; 6931 struct fw_vi_mac_cmd c; 6932 unsigned int nfilters = 0; 6933 unsigned int max_naddr = adap->chip_params->mps_tcam_size; 6934 unsigned int rem = naddr; 6935 6936 if (naddr > max_naddr) 6937 return -EINVAL; 6938 6939 for (offset = 0; offset < naddr ; /**/) { 6940 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) 6941 ? rem 6942 : ARRAY_SIZE(c.u.exact)); 6943 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 6944 u.exact[fw_naddr]), 16); 6945 struct fw_vi_mac_exact *p; 6946 int i; 6947 6948 memset(&c, 0, sizeof(c)); 6949 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 6950 F_FW_CMD_REQUEST | 6951 F_FW_CMD_WRITE | 6952 V_FW_CMD_EXEC(free) | 6953 V_FW_VI_MAC_CMD_VIID(viid)); 6954 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) | 6955 V_FW_CMD_LEN16(len16)); 6956 6957 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 6958 p->valid_to_idx = 6959 cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 6960 V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 6961 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); 6962 } 6963 6964 /* 6965 * It's okay if we run out of space in our MAC address arena. 6966 * Some of the addresses we submit may get stored so we need 6967 * to run through the reply to see what the results were ... 6968 */ 6969 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 6970 if (ret && ret != -FW_ENOMEM) 6971 break; 6972 6973 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { 6974 u16 index = G_FW_VI_MAC_CMD_IDX( 6975 be16_to_cpu(p->valid_to_idx)); 6976 6977 if (idx) 6978 idx[offset+i] = (index >= max_naddr 6979 ? 0xffff 6980 : index); 6981 if (index < max_naddr) 6982 nfilters++; 6983 else if (hash) 6984 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 6985 } 6986 6987 free = false; 6988 offset += fw_naddr; 6989 rem -= fw_naddr; 6990 } 6991 6992 if (ret == 0 || ret == -FW_ENOMEM) 6993 ret = nfilters; 6994 return ret; 6995} 6996 6997/** 6998 * t4_change_mac - modifies the exact-match filter for a MAC address 6999 * @adap: the adapter 7000 * @mbox: mailbox to use for the FW command 7001 * @viid: the VI id 7002 * @idx: index of existing filter for old value of MAC address, or -1 7003 * @addr: the new MAC address value 7004 * @persist: whether a new MAC allocation should be persistent 7005 * @add_smt: if true also add the address to the HW SMT 7006 * 7007 * Modifies an exact-match filter and sets it to the new MAC address if 7008 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the 7009 * latter case the address is added persistently if @persist is %true. 7010 * 7011 * Note that in general it is not possible to modify the value of a given 7012 * filter so the generic way to modify an address filter is to free the one 7013 * being used by the old address value and allocate a new filter for the 7014 * new address value. 7015 * 7016 * Returns a negative error number or the index of the filter with the new 7017 * MAC value. Note that this index may differ from @idx. 7018 */ 7019int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 7020 int idx, const u8 *addr, bool persist, bool add_smt) 7021{ 7022 int ret, mode; 7023 struct fw_vi_mac_cmd c; 7024 struct fw_vi_mac_exact *p = c.u.exact; 7025 unsigned int max_mac_addr = adap->chip_params->mps_tcam_size; 7026 7027 if (idx < 0) /* new allocation */ 7028 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 7029 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 7030 7031 memset(&c, 0, sizeof(c)); 7032 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 7033 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7034 V_FW_VI_MAC_CMD_VIID(viid)); 7035 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); 7036 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | 7037 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | 7038 V_FW_VI_MAC_CMD_IDX(idx)); 7039 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 7040 7041 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7042 if (ret == 0) { 7043 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); 7044 if (ret >= max_mac_addr) 7045 ret = -ENOMEM; 7046 } 7047 return ret; 7048} 7049 7050/** 7051 * t4_set_addr_hash - program the MAC inexact-match hash filter 7052 * @adap: the adapter 7053 * @mbox: mailbox to use for the FW command 7054 * @viid: the VI id 7055 * @ucast: whether the hash filter should also match unicast addresses 7056 * @vec: the value to be written to the hash filter 7057 * @sleep_ok: call is allowed to sleep 7058 * 7059 * Sets the 64-bit inexact-match hash filter for a virtual interface. 7060 */ 7061int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 7062 bool ucast, u64 vec, bool sleep_ok) 7063{ 7064 struct fw_vi_mac_cmd c; 7065 u32 val; 7066 7067 memset(&c, 0, sizeof(c)); 7068 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | 7069 F_FW_CMD_REQUEST | F_FW_CMD_WRITE | 7070 V_FW_VI_ENABLE_CMD_VIID(viid)); 7071 val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) | 7072 V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1); 7073 c.freemacs_to_len16 = cpu_to_be32(val); 7074 c.u.hash.hashvec = cpu_to_be64(vec); 7075 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 7076} 7077 7078/** 7079 * t4_enable_vi_params - enable/disable a virtual interface 7080 * @adap: the adapter 7081 * @mbox: mailbox to use for the FW command 7082 * @viid: the VI id 7083 * @rx_en: 1=enable Rx, 0=disable Rx 7084 * @tx_en: 1=enable Tx, 0=disable Tx 7085 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 7086 * 7087 * Enables/disables a virtual interface. Note that setting DCB Enable 7088 * only makes sense when enabling a Virtual Interface ... 7089 */ 7090int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 7091 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 7092{ 7093 struct fw_vi_enable_cmd c; 7094 7095 memset(&c, 0, sizeof(c)); 7096 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 7097 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7098 V_FW_VI_ENABLE_CMD_VIID(viid)); 7099 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | 7100 V_FW_VI_ENABLE_CMD_EEN(tx_en) | 7101 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | 7102 FW_LEN16(c)); 7103 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 7104} 7105 7106/** 7107 * t4_enable_vi - enable/disable a virtual interface 7108 * @adap: the adapter 7109 * @mbox: mailbox to use for the FW command 7110 * @viid: the VI id 7111 * @rx_en: 1=enable Rx, 0=disable Rx 7112 * @tx_en: 1=enable Tx, 0=disable Tx 7113 * 7114 * Enables/disables a virtual interface. Note that setting DCB Enable 7115 * only makes sense when enabling a Virtual Interface ... 7116 */ 7117int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 7118 bool rx_en, bool tx_en) 7119{ 7120 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 7121} 7122 7123/** 7124 * t4_identify_port - identify a VI's port by blinking its LED 7125 * @adap: the adapter 7126 * @mbox: mailbox to use for the FW command 7127 * @viid: the VI id 7128 * @nblinks: how many times to blink LED at 2.5 Hz 7129 * 7130 * Identifies a VI's port by blinking its LED. 7131 */ 7132int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 7133 unsigned int nblinks) 7134{ 7135 struct fw_vi_enable_cmd c; 7136 7137 memset(&c, 0, sizeof(c)); 7138 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | 7139 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7140 V_FW_VI_ENABLE_CMD_VIID(viid)); 7141 c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 7142 c.blinkdur = cpu_to_be16(nblinks); 7143 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7144} 7145 7146/** 7147 * t4_iq_stop - stop an ingress queue and its FLs 7148 * @adap: the adapter 7149 * @mbox: mailbox to use for the FW command 7150 * @pf: the PF owning the queues 7151 * @vf: the VF owning the queues 7152 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 7153 * @iqid: ingress queue id 7154 * @fl0id: FL0 queue id or 0xffff if no attached FL0 7155 * @fl1id: FL1 queue id or 0xffff if no attached FL1 7156 * 7157 * Stops an ingress queue and its associated FLs, if any. This causes 7158 * any current or future data/messages destined for these queues to be 7159 * tossed. 7160 */ 7161int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, 7162 unsigned int vf, unsigned int iqtype, unsigned int iqid, 7163 unsigned int fl0id, unsigned int fl1id) 7164{ 7165 struct fw_iq_cmd c; 7166 7167 memset(&c, 0, sizeof(c)); 7168 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 7169 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 7170 V_FW_IQ_CMD_VFN(vf)); 7171 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c)); 7172 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 7173 c.iqid = cpu_to_be16(iqid); 7174 c.fl0id = cpu_to_be16(fl0id); 7175 c.fl1id = cpu_to_be16(fl1id); 7176 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7177} 7178 7179/** 7180 * t4_iq_free - free an ingress queue and its FLs 7181 * @adap: the adapter 7182 * @mbox: mailbox to use for the FW command 7183 * @pf: the PF owning the queues 7184 * @vf: the VF owning the queues 7185 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) 7186 * @iqid: ingress queue id 7187 * @fl0id: FL0 queue id or 0xffff if no attached FL0 7188 * @fl1id: FL1 queue id or 0xffff if no attached FL1 7189 * 7190 * Frees an ingress queue and its associated FLs, if any. 7191 */ 7192int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7193 unsigned int vf, unsigned int iqtype, unsigned int iqid, 7194 unsigned int fl0id, unsigned int fl1id) 7195{ 7196 struct fw_iq_cmd c; 7197 7198 memset(&c, 0, sizeof(c)); 7199 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 7200 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | 7201 V_FW_IQ_CMD_VFN(vf)); 7202 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); 7203 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); 7204 c.iqid = cpu_to_be16(iqid); 7205 c.fl0id = cpu_to_be16(fl0id); 7206 c.fl1id = cpu_to_be16(fl1id); 7207 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7208} 7209 7210/** 7211 * t4_eth_eq_free - free an Ethernet egress queue 7212 * @adap: the adapter 7213 * @mbox: mailbox to use for the FW command 7214 * @pf: the PF owning the queue 7215 * @vf: the VF owning the queue 7216 * @eqid: egress queue id 7217 * 7218 * Frees an Ethernet egress queue. 7219 */ 7220int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7221 unsigned int vf, unsigned int eqid) 7222{ 7223 struct fw_eq_eth_cmd c; 7224 7225 memset(&c, 0, sizeof(c)); 7226 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | 7227 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7228 V_FW_EQ_ETH_CMD_PFN(pf) | 7229 V_FW_EQ_ETH_CMD_VFN(vf)); 7230 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 7231 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); 7232 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7233} 7234 7235/** 7236 * t4_ctrl_eq_free - free a control egress queue 7237 * @adap: the adapter 7238 * @mbox: mailbox to use for the FW command 7239 * @pf: the PF owning the queue 7240 * @vf: the VF owning the queue 7241 * @eqid: egress queue id 7242 * 7243 * Frees a control egress queue. 7244 */ 7245int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7246 unsigned int vf, unsigned int eqid) 7247{ 7248 struct fw_eq_ctrl_cmd c; 7249 7250 memset(&c, 0, sizeof(c)); 7251 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | 7252 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7253 V_FW_EQ_CTRL_CMD_PFN(pf) | 7254 V_FW_EQ_CTRL_CMD_VFN(vf)); 7255 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 7256 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid)); 7257 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7258} 7259 7260/** 7261 * t4_ofld_eq_free - free an offload egress queue 7262 * @adap: the adapter 7263 * @mbox: mailbox to use for the FW command 7264 * @pf: the PF owning the queue 7265 * @vf: the VF owning the queue 7266 * @eqid: egress queue id 7267 * 7268 * Frees a control egress queue. 7269 */ 7270int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 7271 unsigned int vf, unsigned int eqid) 7272{ 7273 struct fw_eq_ofld_cmd c; 7274 7275 memset(&c, 0, sizeof(c)); 7276 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | 7277 F_FW_CMD_REQUEST | F_FW_CMD_EXEC | 7278 V_FW_EQ_OFLD_CMD_PFN(pf) | 7279 V_FW_EQ_OFLD_CMD_VFN(vf)); 7280 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 7281 c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid)); 7282 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 7283} 7284 7285/** 7286 * t4_link_down_rc_str - return a string for a Link Down Reason Code 7287 * @link_down_rc: Link Down Reason Code 7288 * 7289 * Returns a string representation of the Link Down Reason Code. 7290 */ 7291const char *t4_link_down_rc_str(unsigned char link_down_rc) 7292{ 7293 static const char *reason[] = { 7294 "Link Down", 7295 "Remote Fault", 7296 "Auto-negotiation Failure", 7297 "Reserved3", 7298 "Insufficient Airflow", 7299 "Unable To Determine Reason", 7300 "No RX Signal Detected", 7301 "Reserved7", 7302 }; 7303 7304 if (link_down_rc >= ARRAY_SIZE(reason)) 7305 return "Bad Reason Code"; 7306 7307 return reason[link_down_rc]; 7308} 7309 7310/** 7311 * t4_handle_fw_rpl - process a FW reply message 7312 * @adap: the adapter 7313 * @rpl: start of the FW message 7314 * 7315 * Processes a FW message, such as link state change messages. 7316 */ 7317int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 7318{ 7319 u8 opcode = *(const u8 *)rpl; 7320 const struct fw_port_cmd *p = (const void *)rpl; 7321 unsigned int action = 7322 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); 7323 7324 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { 7325 /* link/module state change message */ 7326 int speed = 0, fc = 0, i; 7327 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); 7328 struct port_info *pi = NULL; 7329 struct link_config *lc; 7330 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); 7331 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; 7332 u32 mod = G_FW_PORT_CMD_MODTYPE(stat); 7333 7334 if (stat & F_FW_PORT_CMD_RXPAUSE) 7335 fc |= PAUSE_RX; 7336 if (stat & F_FW_PORT_CMD_TXPAUSE) 7337 fc |= PAUSE_TX; 7338 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 7339 speed = 100; 7340 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 7341 speed = 1000; 7342 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 7343 speed = 10000; 7344 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 7345 speed = 40000; 7346 7347 for_each_port(adap, i) { 7348 pi = adap2pinfo(adap, i); 7349 if (pi->tx_chan == chan) 7350 break; 7351 } 7352 lc = &pi->link_cfg; 7353 7354 if (mod != pi->mod_type) { 7355 pi->mod_type = mod; 7356 t4_os_portmod_changed(adap, i); 7357 } 7358 if (link_ok != lc->link_ok || speed != lc->speed || 7359 fc != lc->fc) { /* something changed */ 7360 int reason; 7361 7362 if (!link_ok && lc->link_ok) 7363 reason = G_FW_PORT_CMD_LINKDNRC(stat); 7364 else 7365 reason = -1; 7366 7367 lc->link_ok = link_ok; 7368 lc->speed = speed; 7369 lc->fc = fc; 7370 lc->supported = be16_to_cpu(p->u.info.pcap); 7371 t4_os_link_changed(adap, i, link_ok, reason); 7372 } 7373 } else { 7374 CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode); 7375 return -EINVAL; 7376 } 7377 return 0; 7378} 7379 7380/** 7381 * get_pci_mode - determine a card's PCI mode 7382 * @adapter: the adapter 7383 * @p: where to store the PCI settings 7384 * 7385 * Determines a card's PCI mode and associated parameters, such as speed 7386 * and width. 7387 */ 7388static void get_pci_mode(struct adapter *adapter, 7389 struct pci_params *p) 7390{ 7391 u16 val; 7392 u32 pcie_cap; 7393 7394 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 7395 if (pcie_cap) { 7396 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val); 7397 p->speed = val & PCI_EXP_LNKSTA_CLS; 7398 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 7399 } 7400} 7401 7402/** 7403 * init_link_config - initialize a link's SW state 7404 * @lc: structure holding the link state 7405 * @caps: link capabilities 7406 * 7407 * Initializes the SW state maintained for each link, including the link's 7408 * capabilities and default speed/flow-control/autonegotiation settings. 7409 */ 7410static void init_link_config(struct link_config *lc, unsigned int caps) 7411{ 7412 lc->supported = caps; 7413 lc->requested_speed = 0; 7414 lc->speed = 0; 7415 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 7416 if (lc->supported & FW_PORT_CAP_ANEG) { 7417 lc->advertising = lc->supported & ADVERT_MASK; 7418 lc->autoneg = AUTONEG_ENABLE; 7419 lc->requested_fc |= PAUSE_AUTONEG; 7420 } else { 7421 lc->advertising = 0; 7422 lc->autoneg = AUTONEG_DISABLE; 7423 } 7424} 7425 7426struct flash_desc { 7427 u32 vendor_and_model_id; 7428 u32 size_mb; 7429}; 7430 7431int t4_get_flash_params(struct adapter *adapter) 7432{ 7433 /* 7434 * Table for non-Numonix supported flash parts. Numonix parts are left 7435 * to the preexisting well-tested code. All flash parts have 64KB 7436 * sectors. 7437 */ 7438 static struct flash_desc supported_flash[] = { 7439 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 7440 }; 7441 7442 int ret; 7443 u32 info = 0; 7444 7445 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); 7446 if (!ret) 7447 ret = sf1_read(adapter, 3, 0, 1, &info); 7448 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ 7449 if (ret < 0) 7450 return ret; 7451 7452 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) 7453 if (supported_flash[ret].vendor_and_model_id == info) { 7454 adapter->params.sf_size = supported_flash[ret].size_mb; 7455 adapter->params.sf_nsec = 7456 adapter->params.sf_size / SF_SEC_SIZE; 7457 return 0; 7458 } 7459 7460 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 7461 return -EINVAL; 7462 info >>= 16; /* log2 of size */ 7463 if (info >= 0x14 && info < 0x18) 7464 adapter->params.sf_nsec = 1 << (info - 16); 7465 else if (info == 0x18) 7466 adapter->params.sf_nsec = 64; 7467 else 7468 return -EINVAL; 7469 adapter->params.sf_size = 1 << info; 7470 7471 /* 7472 * We should ~probably~ reject adapters with FLASHes which are too 7473 * small but we have some legacy FPGAs with small FLASHes that we'd 7474 * still like to use. So instead we emit a scary message ... 7475 */ 7476 if (adapter->params.sf_size < FLASH_MIN_SIZE) 7477 CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n", 7478 adapter->params.sf_size, FLASH_MIN_SIZE); 7479 7480 return 0; 7481} 7482 7483static void set_pcie_completion_timeout(struct adapter *adapter, 7484 u8 range) 7485{ 7486 u16 val; 7487 u32 pcie_cap; 7488 7489 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); 7490 if (pcie_cap) { 7491 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); 7492 val &= 0xfff0; 7493 val |= range ; 7494 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); 7495 } 7496} 7497 7498const struct chip_params *t4_get_chip_params(int chipid) 7499{ 7500 static const struct chip_params chip_params[] = { 7501 { 7502 /* T4 */ 7503 .nchan = NCHAN, 7504 .pm_stats_cnt = PM_NSTATS, 7505 .cng_ch_bits_log = 2, 7506 .nsched_cls = 15, 7507 .cim_num_obq = CIM_NUM_OBQ, 7508 .mps_rplc_size = 128, 7509 .vfcount = 128, 7510 .sge_fl_db = F_DBPRIO, 7511 .mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES, 7512 }, 7513 { 7514 /* T5 */ 7515 .nchan = NCHAN, 7516 .pm_stats_cnt = PM_NSTATS, 7517 .cng_ch_bits_log = 2, 7518 .nsched_cls = 16, 7519 .cim_num_obq = CIM_NUM_OBQ_T5, 7520 .mps_rplc_size = 128, 7521 .vfcount = 128, 7522 .sge_fl_db = F_DBPRIO | F_DBTYPE, 7523 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 7524 }, 7525 { 7526 /* T6 */ 7527 .nchan = T6_NCHAN, 7528 .pm_stats_cnt = T6_PM_NSTATS, 7529 .cng_ch_bits_log = 3, 7530 .nsched_cls = 16, 7531 .cim_num_obq = CIM_NUM_OBQ_T5, 7532 .mps_rplc_size = 256, 7533 .vfcount = 256, 7534 .sge_fl_db = 0, 7535 .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES, 7536 }, 7537 }; 7538 7539 chipid -= CHELSIO_T4; 7540 if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params)) 7541 return NULL; 7542 7543 return &chip_params[chipid]; 7544} 7545 7546/** 7547 * t4_prep_adapter - prepare SW and HW for operation 7548 * @adapter: the adapter 7549 * @buf: temporary space of at least VPD_LEN size provided by the caller. 7550 * 7551 * Initialize adapter SW state for the various HW modules, set initial 7552 * values for some adapter tunables, take PHYs out of reset, and 7553 * initialize the MDIO interface. 7554 */ 7555int t4_prep_adapter(struct adapter *adapter, u8 *buf) 7556{ 7557 int ret; 7558 uint16_t device_id; 7559 uint32_t pl_rev; 7560 7561 get_pci_mode(adapter, &adapter->params.pci); 7562 7563 pl_rev = t4_read_reg(adapter, A_PL_REV); 7564 adapter->params.chipid = G_CHIPID(pl_rev); 7565 adapter->params.rev = G_REV(pl_rev); 7566 if (adapter->params.chipid == 0) { 7567 /* T4 did not have chipid in PL_REV (T5 onwards do) */ 7568 adapter->params.chipid = CHELSIO_T4; 7569 7570 /* T4A1 chip is not supported */ 7571 if (adapter->params.rev == 1) { 7572 CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n"); 7573 return -EINVAL; 7574 } 7575 } 7576 7577 adapter->chip_params = t4_get_chip_params(chip_id(adapter)); 7578 if (adapter->chip_params == NULL) 7579 return -EINVAL; 7580 7581 adapter->params.pci.vpd_cap_addr = 7582 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); 7583 7584 ret = t4_get_flash_params(adapter); 7585 if (ret < 0) 7586 return ret; 7587 7588 ret = get_vpd_params(adapter, &adapter->params.vpd, buf); 7589 if (ret < 0) 7590 return ret; 7591 7592 /* Cards with real ASICs have the chipid in the PCIe device id */ 7593 t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id); 7594 if (device_id >> 12 == chip_id(adapter)) 7595 adapter->params.cim_la_size = CIMLA_SIZE; 7596 else { 7597 /* FPGA */ 7598 adapter->params.fpga = 1; 7599 adapter->params.cim_la_size = 2 * CIMLA_SIZE; 7600 } 7601 7602 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 7603 7604 /* 7605 * Default port and clock for debugging in case we can't reach FW. 7606 */ 7607 adapter->params.nports = 1; 7608 adapter->params.portvec = 1; 7609 adapter->params.vpd.cclk = 50000; 7610 7611 /* Set pci completion timeout value to 4 seconds. */ 7612 set_pcie_completion_timeout(adapter, 0xd); 7613 return 0; 7614} 7615 7616/** 7617 * t4_shutdown_adapter - shut down adapter, host & wire 7618 * @adapter: the adapter 7619 * 7620 * Perform an emergency shutdown of the adapter and stop it from 7621 * continuing any further communication on the ports or DMA to the 7622 * host. This is typically used when the adapter and/or firmware 7623 * have crashed and we want to prevent any further accidental 7624 * communication with the rest of the world. This will also force 7625 * the port Link Status to go down -- if register writes work -- 7626 * which should help our peers figure out that we're down. 7627 */ 7628int t4_shutdown_adapter(struct adapter *adapter) 7629{ 7630 int port; 7631 7632 t4_intr_disable(adapter); 7633 t4_write_reg(adapter, A_DBG_GPIO_EN, 0); 7634 for_each_port(adapter, port) { 7635 u32 a_port_cfg = PORT_REG(port, 7636 is_t4(adapter) 7637 ? A_XGMAC_PORT_CFG 7638 : A_MAC_PORT_CFG); 7639 7640 t4_write_reg(adapter, a_port_cfg, 7641 t4_read_reg(adapter, a_port_cfg) 7642 & ~V_SIGNAL_DET(1)); 7643 } 7644 t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0); 7645 7646 return 0; 7647} 7648 7649/** 7650 * t4_init_devlog_params - initialize adapter->params.devlog 7651 * @adap: the adapter 7652 * @fw_attach: whether we can talk to the firmware 7653 * 7654 * Initialize various fields of the adapter's Firmware Device Log 7655 * Parameters structure. 7656 */ 7657int t4_init_devlog_params(struct adapter *adap, int fw_attach) 7658{ 7659 struct devlog_params *dparams = &adap->params.devlog; 7660 u32 pf_dparams; 7661 unsigned int devlog_meminfo; 7662 struct fw_devlog_cmd devlog_cmd; 7663 int ret; 7664 7665 /* If we're dealing with newer firmware, the Device Log Paramerters 7666 * are stored in a designated register which allows us to access the 7667 * Device Log even if we can't talk to the firmware. 7668 */ 7669 pf_dparams = 7670 t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG)); 7671 if (pf_dparams) { 7672 unsigned int nentries, nentries128; 7673 7674 dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams); 7675 dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4; 7676 7677 nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams); 7678 nentries = (nentries128 + 1) * 128; 7679 dparams->size = nentries * sizeof(struct fw_devlog_e); 7680 7681 return 0; 7682 } 7683 7684 /* 7685 * For any failing returns ... 7686 */ 7687 memset(dparams, 0, sizeof *dparams); 7688 7689 /* 7690 * If we can't talk to the firmware, there's really nothing we can do 7691 * at this point. 7692 */ 7693 if (!fw_attach) 7694 return -ENXIO; 7695 7696 /* Otherwise, ask the firmware for it's Device Log Parameters. 7697 */ 7698 memset(&devlog_cmd, 0, sizeof devlog_cmd); 7699 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 7700 F_FW_CMD_REQUEST | F_FW_CMD_READ); 7701 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 7702 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), 7703 &devlog_cmd); 7704 if (ret) 7705 return ret; 7706 7707 devlog_meminfo = 7708 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog); 7709 dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo); 7710 dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4; 7711 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog); 7712 7713 return 0; 7714} 7715 7716/** 7717 * t4_init_sge_params - initialize adap->params.sge 7718 * @adapter: the adapter 7719 * 7720 * Initialize various fields of the adapter's SGE Parameters structure. 7721 */ 7722int t4_init_sge_params(struct adapter *adapter) 7723{ 7724 u32 r; 7725 struct sge_params *sp = &adapter->params.sge; 7726 unsigned i; 7727 7728 r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD); 7729 sp->counter_val[0] = G_THRESHOLD_0(r); 7730 sp->counter_val[1] = G_THRESHOLD_1(r); 7731 sp->counter_val[2] = G_THRESHOLD_2(r); 7732 sp->counter_val[3] = G_THRESHOLD_3(r); 7733 7734 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1); 7735 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)); 7736 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)); 7737 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3); 7738 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)); 7739 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)); 7740 r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5); 7741 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)); 7742 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)); 7743 7744 r = t4_read_reg(adapter, A_SGE_CONM_CTRL); 7745 sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 7746 if (is_t4(adapter)) 7747 sp->fl_starve_threshold2 = sp->fl_starve_threshold; 7748 else 7749 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 7750 7751 /* egress queues: log2 of # of doorbells per BAR2 page */ 7752 r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 7753 r >>= S_QUEUESPERPAGEPF0 + 7754 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 7755 sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 7756 7757 /* ingress queues: log2 of # of doorbells per BAR2 page */ 7758 r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 7759 r >>= S_QUEUESPERPAGEPF0 + 7760 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf; 7761 sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 7762 7763 r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); 7764 r >>= S_HOSTPAGESIZEPF0 + 7765 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf; 7766 sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10; 7767 7768 r = t4_read_reg(adapter, A_SGE_CONTROL); 7769 sp->sge_control = r; 7770 sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64; 7771 sp->fl_pktshift = G_PKTSHIFT(r); 7772 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5); 7773 if (is_t4(adapter)) 7774 sp->pack_boundary = sp->pad_boundary; 7775 else { 7776 r = t4_read_reg(adapter, A_SGE_CONTROL2); 7777 if (G_INGPACKBOUNDARY(r) == 0) 7778 sp->pack_boundary = 16; 7779 else 7780 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 7781 } 7782 for (i = 0; i < SGE_FLBUF_SIZES; i++) 7783 sp->sge_fl_buffer_size[i] = t4_read_reg(adapter, 7784 A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 7785 7786 return 0; 7787} 7788 7789/* 7790 * Read and cache the adapter's compressed filter mode and ingress config. 7791 */ 7792static void read_filter_mode_and_ingress_config(struct adapter *adap) 7793{ 7794 struct tp_params *tpp = &adap->params.tp; 7795 7796 if (t4_use_ldst(adap)) { 7797 t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1, 7798 A_TP_VLAN_PRI_MAP, 1); 7799 t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1, 7800 A_TP_INGRESS_CONFIG, 1); 7801 } else { 7802 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 7803 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP); 7804 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, 7805 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG); 7806 } 7807 7808 /* 7809 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 7810 * shift positions of several elements of the Compressed Filter Tuple 7811 * for this adapter which we need frequently ... 7812 */ 7813 tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE); 7814 tpp->port_shift = t4_filter_field_shift(adap, F_PORT); 7815 tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 7816 tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN); 7817 tpp->tos_shift = t4_filter_field_shift(adap, F_TOS); 7818 tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL); 7819 tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE); 7820 tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH); 7821 tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE); 7822 tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION); 7823 7824 /* 7825 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 7826 * represents the presense of an Outer VLAN instead of a VNIC ID. 7827 */ 7828 if ((tpp->ingress_config & F_VNIC) == 0) 7829 tpp->vnic_shift = -1; 7830} 7831 7832/** 7833 * t4_init_tp_params - initialize adap->params.tp 7834 * @adap: the adapter 7835 * 7836 * Initialize various fields of the adapter's TP Parameters structure. 7837 */ 7838int t4_init_tp_params(struct adapter *adap) 7839{ 7840 int chan; 7841 u32 v; 7842 struct tp_params *tpp = &adap->params.tp; 7843 7844 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); 7845 tpp->tre = G_TIMERRESOLUTION(v); 7846 tpp->dack_re = G_DELAYEDACKRESOLUTION(v); 7847 7848 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 7849 for (chan = 0; chan < MAX_NCHAN; chan++) 7850 tpp->tx_modq[chan] = chan; 7851 7852 read_filter_mode_and_ingress_config(adap); 7853 7854 /* 7855 * For T6, cache the adapter's compressed error vector 7856 * and passing outer header info for encapsulated packets. 7857 */ 7858 if (chip_id(adap) > CHELSIO_T5) { 7859 v = t4_read_reg(adap, A_TP_OUT_CONFIG); 7860 tpp->rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0; 7861 } 7862 7863 return 0; 7864} 7865 7866/** 7867 * t4_filter_field_shift - calculate filter field shift 7868 * @adap: the adapter 7869 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 7870 * 7871 * Return the shift position of a filter field within the Compressed 7872 * Filter Tuple. The filter field is specified via its selection bit 7873 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 7874 */ 7875int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 7876{ 7877 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 7878 unsigned int sel; 7879 int field_shift; 7880 7881 if ((filter_mode & filter_sel) == 0) 7882 return -1; 7883 7884 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 7885 switch (filter_mode & sel) { 7886 case F_FCOE: 7887 field_shift += W_FT_FCOE; 7888 break; 7889 case F_PORT: 7890 field_shift += W_FT_PORT; 7891 break; 7892 case F_VNIC_ID: 7893 field_shift += W_FT_VNIC_ID; 7894 break; 7895 case F_VLAN: 7896 field_shift += W_FT_VLAN; 7897 break; 7898 case F_TOS: 7899 field_shift += W_FT_TOS; 7900 break; 7901 case F_PROTOCOL: 7902 field_shift += W_FT_PROTOCOL; 7903 break; 7904 case F_ETHERTYPE: 7905 field_shift += W_FT_ETHERTYPE; 7906 break; 7907 case F_MACMATCH: 7908 field_shift += W_FT_MACMATCH; 7909 break; 7910 case F_MPSHITTYPE: 7911 field_shift += W_FT_MPSHITTYPE; 7912 break; 7913 case F_FRAGMENTATION: 7914 field_shift += W_FT_FRAGMENTATION; 7915 break; 7916 } 7917 } 7918 return field_shift; 7919} 7920 7921int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id) 7922{ 7923 u8 addr[6]; 7924 int ret, i, j; 7925 struct fw_port_cmd c; 7926 u16 rss_size; 7927 struct port_info *p = adap2pinfo(adap, port_id); 7928 u32 param, val; 7929 7930 memset(&c, 0, sizeof(c)); 7931 7932 for (i = 0, j = -1; i <= p->port_id; i++) { 7933 do { 7934 j++; 7935 } while ((adap->params.portvec & (1 << j)) == 0); 7936 } 7937 7938 if (!(adap->flags & IS_VF) || 7939 adap->params.vfres.r_caps & FW_CMD_CAP_PORT) { 7940 c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | 7941 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7942 V_FW_PORT_CMD_PORTID(j)); 7943 c.action_to_len16 = htonl( 7944 V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 7945 FW_LEN16(c)); 7946 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 7947 if (ret) 7948 return ret; 7949 7950 ret = be32_to_cpu(c.u.info.lstatus_to_modtype); 7951 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? 7952 G_FW_PORT_CMD_MDIOADDR(ret) : -1; 7953 p->port_type = G_FW_PORT_CMD_PTYPE(ret); 7954 p->mod_type = G_FW_PORT_CMD_MODTYPE(ret); 7955 7956 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap)); 7957 } 7958 7959 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 7960 if (ret < 0) 7961 return ret; 7962 7963 p->vi[0].viid = ret; 7964 p->tx_chan = j; 7965 p->rx_chan_map = t4_get_mps_bg_map(adap, j); 7966 p->lport = j; 7967 p->vi[0].rss_size = rss_size; 7968 t4_os_set_hw_addr(adap, p->port_id, addr); 7969 7970 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 7971 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 7972 V_FW_PARAMS_PARAM_YZ(p->vi[0].viid); 7973 ret = t4_query_params(adap, mbox, pf, vf, 1, ¶m, &val); 7974 if (ret) 7975 p->vi[0].rss_base = 0xffff; 7976 else { 7977 /* MPASS((val >> 16) == rss_size); */ 7978 p->vi[0].rss_base = val & 0xffff; 7979 } 7980 7981 return 0; 7982} 7983 7984/** 7985 * t4_read_cimq_cfg - read CIM queue configuration 7986 * @adap: the adapter 7987 * @base: holds the queue base addresses in bytes 7988 * @size: holds the queue sizes in bytes 7989 * @thres: holds the queue full thresholds in bytes 7990 * 7991 * Returns the current configuration of the CIM queues, starting with 7992 * the IBQs, then the OBQs. 7993 */ 7994void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) 7995{ 7996 unsigned int i, v; 7997 int cim_num_obq = adap->chip_params->cim_num_obq; 7998 7999 for (i = 0; i < CIM_NUM_IBQ; i++) { 8000 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT | 8001 V_QUENUMSELECT(i)); 8002 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8003 /* value is in 256-byte units */ 8004 *base++ = G_CIMQBASE(v) * 256; 8005 *size++ = G_CIMQSIZE(v) * 256; 8006 *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */ 8007 } 8008 for (i = 0; i < cim_num_obq; i++) { 8009 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 8010 V_QUENUMSELECT(i)); 8011 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8012 /* value is in 256-byte units */ 8013 *base++ = G_CIMQBASE(v) * 256; 8014 *size++ = G_CIMQSIZE(v) * 256; 8015 } 8016} 8017 8018/** 8019 * t4_read_cim_ibq - read the contents of a CIM inbound queue 8020 * @adap: the adapter 8021 * @qid: the queue index 8022 * @data: where to store the queue contents 8023 * @n: capacity of @data in 32-bit words 8024 * 8025 * Reads the contents of the selected CIM queue starting at address 0 up 8026 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 8027 * error and the number of 32-bit words actually read on success. 8028 */ 8029int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 8030{ 8031 int i, err, attempts; 8032 unsigned int addr; 8033 const unsigned int nwords = CIM_IBQ_SIZE * 4; 8034 8035 if (qid > 5 || (n & 3)) 8036 return -EINVAL; 8037 8038 addr = qid * nwords; 8039 if (n > nwords) 8040 n = nwords; 8041 8042 /* It might take 3-10ms before the IBQ debug read access is allowed. 8043 * Wait for 1 Sec with a delay of 1 usec. 8044 */ 8045 attempts = 1000000; 8046 8047 for (i = 0; i < n; i++, addr++) { 8048 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) | 8049 F_IBQDBGEN); 8050 err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0, 8051 attempts, 1); 8052 if (err) 8053 return err; 8054 *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA); 8055 } 8056 t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0); 8057 return i; 8058} 8059 8060/** 8061 * t4_read_cim_obq - read the contents of a CIM outbound queue 8062 * @adap: the adapter 8063 * @qid: the queue index 8064 * @data: where to store the queue contents 8065 * @n: capacity of @data in 32-bit words 8066 * 8067 * Reads the contents of the selected CIM queue starting at address 0 up 8068 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on 8069 * error and the number of 32-bit words actually read on success. 8070 */ 8071int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) 8072{ 8073 int i, err; 8074 unsigned int addr, v, nwords; 8075 int cim_num_obq = adap->chip_params->cim_num_obq; 8076 8077 if ((qid > (cim_num_obq - 1)) || (n & 3)) 8078 return -EINVAL; 8079 8080 t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT | 8081 V_QUENUMSELECT(qid)); 8082 v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL); 8083 8084 addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */ 8085 nwords = G_CIMQSIZE(v) * 64; /* same */ 8086 if (n > nwords) 8087 n = nwords; 8088 8089 for (i = 0; i < n; i++, addr++) { 8090 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) | 8091 F_OBQDBGEN); 8092 err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 8093 2, 1); 8094 if (err) 8095 return err; 8096 *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA); 8097 } 8098 t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0); 8099 return i; 8100} 8101 8102enum { 8103 CIM_QCTL_BASE = 0, 8104 CIM_CTL_BASE = 0x2000, 8105 CIM_PBT_ADDR_BASE = 0x2800, 8106 CIM_PBT_LRF_BASE = 0x3000, 8107 CIM_PBT_DATA_BASE = 0x3800 8108}; 8109 8110/** 8111 * t4_cim_read - read a block from CIM internal address space 8112 * @adap: the adapter 8113 * @addr: the start address within the CIM address space 8114 * @n: number of words to read 8115 * @valp: where to store the result 8116 * 8117 * Reads a block of 4-byte words from the CIM intenal address space. 8118 */ 8119int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, 8120 unsigned int *valp) 8121{ 8122 int ret = 0; 8123 8124 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 8125 return -EBUSY; 8126 8127 for ( ; !ret && n--; addr += 4) { 8128 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr); 8129 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 8130 0, 5, 2); 8131 if (!ret) 8132 *valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA); 8133 } 8134 return ret; 8135} 8136 8137/** 8138 * t4_cim_write - write a block into CIM internal address space 8139 * @adap: the adapter 8140 * @addr: the start address within the CIM address space 8141 * @n: number of words to write 8142 * @valp: set of values to write 8143 * 8144 * Writes a block of 4-byte words into the CIM intenal address space. 8145 */ 8146int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, 8147 const unsigned int *valp) 8148{ 8149 int ret = 0; 8150 8151 if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) 8152 return -EBUSY; 8153 8154 for ( ; !ret && n--; addr += 4) { 8155 t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++); 8156 t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE); 8157 ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, 8158 0, 5, 2); 8159 } 8160 return ret; 8161} 8162 8163static int t4_cim_write1(struct adapter *adap, unsigned int addr, 8164 unsigned int val) 8165{ 8166 return t4_cim_write(adap, addr, 1, &val); 8167} 8168 8169/** 8170 * t4_cim_ctl_read - read a block from CIM control region 8171 * @adap: the adapter 8172 * @addr: the start address within the CIM control region 8173 * @n: number of words to read 8174 * @valp: where to store the result 8175 * 8176 * Reads a block of 4-byte words from the CIM control region. 8177 */ 8178int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n, 8179 unsigned int *valp) 8180{ 8181 return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp); 8182} 8183 8184/** 8185 * t4_cim_read_la - read CIM LA capture buffer 8186 * @adap: the adapter 8187 * @la_buf: where to store the LA data 8188 * @wrptr: the HW write pointer within the capture buffer 8189 * 8190 * Reads the contents of the CIM LA buffer with the most recent entry at 8191 * the end of the returned data and with the entry at @wrptr first. 8192 * We try to leave the LA in the running state we find it in. 8193 */ 8194int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) 8195{ 8196 int i, ret; 8197 unsigned int cfg, val, idx; 8198 8199 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg); 8200 if (ret) 8201 return ret; 8202 8203 if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */ 8204 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0); 8205 if (ret) 8206 return ret; 8207 } 8208 8209 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 8210 if (ret) 8211 goto restart; 8212 8213 idx = G_UPDBGLAWRPTR(val); 8214 if (wrptr) 8215 *wrptr = idx; 8216 8217 for (i = 0; i < adap->params.cim_la_size; i++) { 8218 ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 8219 V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN); 8220 if (ret) 8221 break; 8222 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val); 8223 if (ret) 8224 break; 8225 if (val & F_UPDBGLARDEN) { 8226 ret = -ETIMEDOUT; 8227 break; 8228 } 8229 ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]); 8230 if (ret) 8231 break; 8232 8233 /* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */ 8234 idx = (idx + 1) & M_UPDBGLARDPTR; 8235 /* 8236 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to 8237 * identify the 32-bit portion of the full 312-bit data 8238 */ 8239 if (is_t6(adap)) 8240 while ((idx & 0xf) > 9) 8241 idx = (idx + 1) % M_UPDBGLARDPTR; 8242 } 8243restart: 8244 if (cfg & F_UPDBGLAEN) { 8245 int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 8246 cfg & ~F_UPDBGLARDEN); 8247 if (!ret) 8248 ret = r; 8249 } 8250 return ret; 8251} 8252 8253/** 8254 * t4_tp_read_la - read TP LA capture buffer 8255 * @adap: the adapter 8256 * @la_buf: where to store the LA data 8257 * @wrptr: the HW write pointer within the capture buffer 8258 * 8259 * Reads the contents of the TP LA buffer with the most recent entry at 8260 * the end of the returned data and with the entry at @wrptr first. 8261 * We leave the LA in the running state we find it in. 8262 */ 8263void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) 8264{ 8265 bool last_incomplete; 8266 unsigned int i, cfg, val, idx; 8267 8268 cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff; 8269 if (cfg & F_DBGLAENABLE) /* freeze LA */ 8270 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 8271 adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE)); 8272 8273 val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG); 8274 idx = G_DBGLAWPTR(val); 8275 last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0; 8276 if (last_incomplete) 8277 idx = (idx + 1) & M_DBGLARPTR; 8278 if (wrptr) 8279 *wrptr = idx; 8280 8281 val &= 0xffff; 8282 val &= ~V_DBGLARPTR(M_DBGLARPTR); 8283 val |= adap->params.tp.la_mask; 8284 8285 for (i = 0; i < TPLA_SIZE; i++) { 8286 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val); 8287 la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL); 8288 idx = (idx + 1) & M_DBGLARPTR; 8289 } 8290 8291 /* Wipe out last entry if it isn't valid */ 8292 if (last_incomplete) 8293 la_buf[TPLA_SIZE - 1] = ~0ULL; 8294 8295 if (cfg & F_DBGLAENABLE) /* restore running state */ 8296 t4_write_reg(adap, A_TP_DBG_LA_CONFIG, 8297 cfg | adap->params.tp.la_mask); 8298} 8299 8300/* 8301 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in 8302 * seconds). If we find one of the SGE Ingress DMA State Machines in the same 8303 * state for more than the Warning Threshold then we'll issue a warning about 8304 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel 8305 * appears to be hung every Warning Repeat second till the situation clears. 8306 * If the situation clears, we'll note that as well. 8307 */ 8308#define SGE_IDMA_WARN_THRESH 1 8309#define SGE_IDMA_WARN_REPEAT 300 8310 8311/** 8312 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor 8313 * @adapter: the adapter 8314 * @idma: the adapter IDMA Monitor state 8315 * 8316 * Initialize the state of an SGE Ingress DMA Monitor. 8317 */ 8318void t4_idma_monitor_init(struct adapter *adapter, 8319 struct sge_idma_monitor_state *idma) 8320{ 8321 /* Initialize the state variables for detecting an SGE Ingress DMA 8322 * hang. The SGE has internal counters which count up on each clock 8323 * tick whenever the SGE finds its Ingress DMA State Engines in the 8324 * same state they were on the previous clock tick. The clock used is 8325 * the Core Clock so we have a limit on the maximum "time" they can 8326 * record; typically a very small number of seconds. For instance, 8327 * with a 600MHz Core Clock, we can only count up to a bit more than 8328 * 7s. So we'll synthesize a larger counter in order to not run the 8329 * risk of having the "timers" overflow and give us the flexibility to 8330 * maintain a Hung SGE State Machine of our own which operates across 8331 * a longer time frame. 8332 */ 8333 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */ 8334 idma->idma_stalled[0] = idma->idma_stalled[1] = 0; 8335} 8336 8337/** 8338 * t4_idma_monitor - monitor SGE Ingress DMA state 8339 * @adapter: the adapter 8340 * @idma: the adapter IDMA Monitor state 8341 * @hz: number of ticks/second 8342 * @ticks: number of ticks since the last IDMA Monitor call 8343 */ 8344void t4_idma_monitor(struct adapter *adapter, 8345 struct sge_idma_monitor_state *idma, 8346 int hz, int ticks) 8347{ 8348 int i, idma_same_state_cnt[2]; 8349 8350 /* Read the SGE Debug Ingress DMA Same State Count registers. These 8351 * are counters inside the SGE which count up on each clock when the 8352 * SGE finds its Ingress DMA State Engines in the same states they 8353 * were in the previous clock. The counters will peg out at 8354 * 0xffffffff without wrapping around so once they pass the 1s 8355 * threshold they'll stay above that till the IDMA state changes. 8356 */ 8357 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13); 8358 idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH); 8359 idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8360 8361 for (i = 0; i < 2; i++) { 8362 u32 debug0, debug11; 8363 8364 /* If the Ingress DMA Same State Counter ("timer") is less 8365 * than 1s, then we can reset our synthesized Stall Timer and 8366 * continue. If we have previously emitted warnings about a 8367 * potential stalled Ingress Queue, issue a note indicating 8368 * that the Ingress Queue has resumed forward progress. 8369 */ 8370 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) { 8371 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz) 8372 CH_WARN(adapter, "SGE idma%d, queue %u, " 8373 "resumed after %d seconds\n", 8374 i, idma->idma_qid[i], 8375 idma->idma_stalled[i]/hz); 8376 idma->idma_stalled[i] = 0; 8377 continue; 8378 } 8379 8380 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz 8381 * domain. The first time we get here it'll be because we 8382 * passed the 1s Threshold; each additional time it'll be 8383 * because the RX Timer Callback is being fired on its regular 8384 * schedule. 8385 * 8386 * If the stall is below our Potential Hung Ingress Queue 8387 * Warning Threshold, continue. 8388 */ 8389 if (idma->idma_stalled[i] == 0) { 8390 idma->idma_stalled[i] = hz; 8391 idma->idma_warn[i] = 0; 8392 } else { 8393 idma->idma_stalled[i] += ticks; 8394 idma->idma_warn[i] -= ticks; 8395 } 8396 8397 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz) 8398 continue; 8399 8400 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds. 8401 */ 8402 if (idma->idma_warn[i] > 0) 8403 continue; 8404 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz; 8405 8406 /* Read and save the SGE IDMA State and Queue ID information. 8407 * We do this every time in case it changes across time ... 8408 * can't be too careful ... 8409 */ 8410 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0); 8411 debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8412 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; 8413 8414 t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11); 8415 debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW); 8416 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; 8417 8418 CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in " 8419 " state %u for %d seconds (debug0=%#x, debug11=%#x)\n", 8420 i, idma->idma_qid[i], idma->idma_state[i], 8421 idma->idma_stalled[i]/hz, 8422 debug0, debug11); 8423 t4_sge_decode_idma_state(adapter, idma->idma_state[i]); 8424 } 8425} 8426 8427/** 8428 * t4_read_pace_tbl - read the pace table 8429 * @adap: the adapter 8430 * @pace_vals: holds the returned values 8431 * 8432 * Returns the values of TP's pace table in microseconds. 8433 */ 8434void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) 8435{ 8436 unsigned int i, v; 8437 8438 for (i = 0; i < NTX_SCHED; i++) { 8439 t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i); 8440 v = t4_read_reg(adap, A_TP_PACE_TABLE); 8441 pace_vals[i] = dack_ticks_to_usec(adap, v); 8442 } 8443} 8444 8445/** 8446 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler 8447 * @adap: the adapter 8448 * @sched: the scheduler index 8449 * @kbps: the byte rate in Kbps 8450 * @ipg: the interpacket delay in tenths of nanoseconds 8451 * 8452 * Return the current configuration of a HW Tx scheduler. 8453 */ 8454void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, 8455 unsigned int *ipg) 8456{ 8457 unsigned int v, addr, bpt, cpt; 8458 8459 if (kbps) { 8460 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; 8461 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 8462 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 8463 if (sched & 1) 8464 v >>= 16; 8465 bpt = (v >> 8) & 0xff; 8466 cpt = v & 0xff; 8467 if (!cpt) 8468 *kbps = 0; /* scheduler disabled */ 8469 else { 8470 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ 8471 *kbps = (v * bpt) / 125; 8472 } 8473 } 8474 if (ipg) { 8475 addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2; 8476 t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr); 8477 v = t4_read_reg(adap, A_TP_TM_PIO_DATA); 8478 if (sched & 1) 8479 v >>= 16; 8480 v &= 0xffff; 8481 *ipg = (10000 * v) / core_ticks_per_usec(adap); 8482 } 8483} 8484 8485/** 8486 * t4_load_cfg - download config file 8487 * @adap: the adapter 8488 * @cfg_data: the cfg text file to write 8489 * @size: text file size 8490 * 8491 * Write the supplied config text file to the card's serial flash. 8492 */ 8493int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) 8494{ 8495 int ret, i, n, cfg_addr; 8496 unsigned int addr; 8497 unsigned int flash_cfg_start_sec; 8498 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 8499 8500 cfg_addr = t4_flash_cfg_addr(adap); 8501 if (cfg_addr < 0) 8502 return cfg_addr; 8503 8504 addr = cfg_addr; 8505 flash_cfg_start_sec = addr / SF_SEC_SIZE; 8506 8507 if (size > FLASH_CFG_MAX_SIZE) { 8508 CH_ERR(adap, "cfg file too large, max is %u bytes\n", 8509 FLASH_CFG_MAX_SIZE); 8510 return -EFBIG; 8511 } 8512 8513 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ 8514 sf_sec_size); 8515 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 8516 flash_cfg_start_sec + i - 1); 8517 /* 8518 * If size == 0 then we're simply erasing the FLASH sectors associated 8519 * with the on-adapter Firmware Configuration File. 8520 */ 8521 if (ret || size == 0) 8522 goto out; 8523 8524 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 8525 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 8526 if ( (size - i) < SF_PAGE_SIZE) 8527 n = size - i; 8528 else 8529 n = SF_PAGE_SIZE; 8530 ret = t4_write_flash(adap, addr, n, cfg_data, 1); 8531 if (ret) 8532 goto out; 8533 8534 addr += SF_PAGE_SIZE; 8535 cfg_data += SF_PAGE_SIZE; 8536 } 8537 8538out: 8539 if (ret) 8540 CH_ERR(adap, "config file %s failed %d\n", 8541 (size == 0 ? "clear" : "download"), ret); 8542 return ret; 8543} 8544 8545/** 8546 * t5_fw_init_extern_mem - initialize the external memory 8547 * @adap: the adapter 8548 * 8549 * Initializes the external memory on T5. 8550 */ 8551int t5_fw_init_extern_mem(struct adapter *adap) 8552{ 8553 u32 params[1], val[1]; 8554 int ret; 8555 8556 if (!is_t5(adap)) 8557 return 0; 8558 8559 val[0] = 0xff; /* Initialize all MCs */ 8560 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 8561 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT)); 8562 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val, 8563 FW_CMD_MAX_TIMEOUT); 8564 8565 return ret; 8566} 8567 8568/* BIOS boot headers */ 8569typedef struct pci_expansion_rom_header { 8570 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 8571 u8 reserved[22]; /* Reserved per processor Architecture data */ 8572 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 8573} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */ 8574 8575/* Legacy PCI Expansion ROM Header */ 8576typedef struct legacy_pci_expansion_rom_header { 8577 u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ 8578 u8 size512; /* Current Image Size in units of 512 bytes */ 8579 u8 initentry_point[4]; 8580 u8 cksum; /* Checksum computed on the entire Image */ 8581 u8 reserved[16]; /* Reserved */ 8582 u8 pcir_offset[2]; /* Offset to PCI Data Struture */ 8583} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */ 8584 8585/* EFI PCI Expansion ROM Header */ 8586typedef struct efi_pci_expansion_rom_header { 8587 u8 signature[2]; // ROM signature. The value 0xaa55 8588 u8 initialization_size[2]; /* Units 512. Includes this header */ 8589 u8 efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */ 8590 u8 efi_subsystem[2]; /* Subsystem value for EFI image header */ 8591 u8 efi_machine_type[2]; /* Machine type from EFI image header */ 8592 u8 compression_type[2]; /* Compression type. */ 8593 /* 8594 * Compression type definition 8595 * 0x0: uncompressed 8596 * 0x1: Compressed 8597 * 0x2-0xFFFF: Reserved 8598 */ 8599 u8 reserved[8]; /* Reserved */ 8600 u8 efi_image_header_offset[2]; /* Offset to EFI Image */ 8601 u8 pcir_offset[2]; /* Offset to PCI Data Structure */ 8602} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */ 8603 8604/* PCI Data Structure Format */ 8605typedef struct pcir_data_structure { /* PCI Data Structure */ 8606 u8 signature[4]; /* Signature. The string "PCIR" */ 8607 u8 vendor_id[2]; /* Vendor Identification */ 8608 u8 device_id[2]; /* Device Identification */ 8609 u8 vital_product[2]; /* Pointer to Vital Product Data */ 8610 u8 length[2]; /* PCIR Data Structure Length */ 8611 u8 revision; /* PCIR Data Structure Revision */ 8612 u8 class_code[3]; /* Class Code */ 8613 u8 image_length[2]; /* Image Length. Multiple of 512B */ 8614 u8 code_revision[2]; /* Revision Level of Code/Data */ 8615 u8 code_type; /* Code Type. */ 8616 /* 8617 * PCI Expansion ROM Code Types 8618 * 0x00: Intel IA-32, PC-AT compatible. Legacy 8619 * 0x01: Open Firmware standard for PCI. FCODE 8620 * 0x02: Hewlett-Packard PA RISC. HP reserved 8621 * 0x03: EFI Image. EFI 8622 * 0x04-0xFF: Reserved. 8623 */ 8624 u8 indicator; /* Indicator. Identifies the last image in the ROM */ 8625 u8 reserved[2]; /* Reserved */ 8626} pcir_data_t; /* PCI__DATA_STRUCTURE */ 8627 8628/* BOOT constants */ 8629enum { 8630 BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */ 8631 BOOT_SIGNATURE = 0xaa55, /* signature of BIOS boot ROM */ 8632 BOOT_SIZE_INC = 512, /* image size measured in 512B chunks */ 8633 BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */ 8634 BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment */ 8635 VENDOR_ID = 0x1425, /* Vendor ID */ 8636 PCIR_SIGNATURE = 0x52494350 /* PCIR signature */ 8637}; 8638 8639/* 8640 * modify_device_id - Modifies the device ID of the Boot BIOS image 8641 * @adatper: the device ID to write. 8642 * @boot_data: the boot image to modify. 8643 * 8644 * Write the supplied device ID to the boot BIOS image. 8645 */ 8646static void modify_device_id(int device_id, u8 *boot_data) 8647{ 8648 legacy_pci_exp_rom_header_t *header; 8649 pcir_data_t *pcir_header; 8650 u32 cur_header = 0; 8651 8652 /* 8653 * Loop through all chained images and change the device ID's 8654 */ 8655 while (1) { 8656 header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header]; 8657 pcir_header = (pcir_data_t *) &boot_data[cur_header + 8658 le16_to_cpu(*(u16*)header->pcir_offset)]; 8659 8660 /* 8661 * Only modify the Device ID if code type is Legacy or HP. 8662 * 0x00: Okay to modify 8663 * 0x01: FCODE. Do not be modify 8664 * 0x03: Okay to modify 8665 * 0x04-0xFF: Do not modify 8666 */ 8667 if (pcir_header->code_type == 0x00) { 8668 u8 csum = 0; 8669 int i; 8670 8671 /* 8672 * Modify Device ID to match current adatper 8673 */ 8674 *(u16*) pcir_header->device_id = device_id; 8675 8676 /* 8677 * Set checksum temporarily to 0. 8678 * We will recalculate it later. 8679 */ 8680 header->cksum = 0x0; 8681 8682 /* 8683 * Calculate and update checksum 8684 */ 8685 for (i = 0; i < (header->size512 * 512); i++) 8686 csum += (u8)boot_data[cur_header + i]; 8687 8688 /* 8689 * Invert summed value to create the checksum 8690 * Writing new checksum value directly to the boot data 8691 */ 8692 boot_data[cur_header + 7] = -csum; 8693 8694 } else if (pcir_header->code_type == 0x03) { 8695 8696 /* 8697 * Modify Device ID to match current adatper 8698 */ 8699 *(u16*) pcir_header->device_id = device_id; 8700 8701 } 8702 8703 8704 /* 8705 * Check indicator element to identify if this is the last 8706 * image in the ROM. 8707 */ 8708 if (pcir_header->indicator & 0x80) 8709 break; 8710 8711 /* 8712 * Move header pointer up to the next image in the ROM. 8713 */ 8714 cur_header += header->size512 * 512; 8715 } 8716} 8717 8718/* 8719 * t4_load_boot - download boot flash 8720 * @adapter: the adapter 8721 * @boot_data: the boot image to write 8722 * @boot_addr: offset in flash to write boot_data 8723 * @size: image size 8724 * 8725 * Write the supplied boot image to the card's serial flash. 8726 * The boot image has the following sections: a 28-byte header and the 8727 * boot image. 8728 */ 8729int t4_load_boot(struct adapter *adap, u8 *boot_data, 8730 unsigned int boot_addr, unsigned int size) 8731{ 8732 pci_exp_rom_header_t *header; 8733 int pcir_offset ; 8734 pcir_data_t *pcir_header; 8735 int ret, addr; 8736 uint16_t device_id; 8737 unsigned int i; 8738 unsigned int boot_sector = (boot_addr * 1024 ); 8739 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 8740 8741 /* 8742 * Make sure the boot image does not encroach on the firmware region 8743 */ 8744 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { 8745 CH_ERR(adap, "boot image encroaching on firmware region\n"); 8746 return -EFBIG; 8747 } 8748 8749 /* 8750 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, 8751 * and Boot configuration data sections. These 3 boot sections span 8752 * sectors 0 to 7 in flash and live right before the FW image location. 8753 */ 8754 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, 8755 sf_sec_size); 8756 ret = t4_flash_erase_sectors(adap, boot_sector >> 16, 8757 (boot_sector >> 16) + i - 1); 8758 8759 /* 8760 * If size == 0 then we're simply erasing the FLASH sectors associated 8761 * with the on-adapter option ROM file 8762 */ 8763 if (ret || (size == 0)) 8764 goto out; 8765 8766 /* Get boot header */ 8767 header = (pci_exp_rom_header_t *)boot_data; 8768 pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset); 8769 /* PCIR Data Structure */ 8770 pcir_header = (pcir_data_t *) &boot_data[pcir_offset]; 8771 8772 /* 8773 * Perform some primitive sanity testing to avoid accidentally 8774 * writing garbage over the boot sectors. We ought to check for 8775 * more but it's not worth it for now ... 8776 */ 8777 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { 8778 CH_ERR(adap, "boot image too small/large\n"); 8779 return -EFBIG; 8780 } 8781 8782#ifndef CHELSIO_T4_DIAGS 8783 /* 8784 * Check BOOT ROM header signature 8785 */ 8786 if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) { 8787 CH_ERR(adap, "Boot image missing signature\n"); 8788 return -EINVAL; 8789 } 8790 8791 /* 8792 * Check PCI header signature 8793 */ 8794 if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) { 8795 CH_ERR(adap, "PCI header missing signature\n"); 8796 return -EINVAL; 8797 } 8798 8799 /* 8800 * Check Vendor ID matches Chelsio ID 8801 */ 8802 if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) { 8803 CH_ERR(adap, "Vendor ID missing signature\n"); 8804 return -EINVAL; 8805 } 8806#endif 8807 8808 /* 8809 * Retrieve adapter's device ID 8810 */ 8811 t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id); 8812 /* Want to deal with PF 0 so I strip off PF 4 indicator */ 8813 device_id = device_id & 0xf0ff; 8814 8815 /* 8816 * Check PCIE Device ID 8817 */ 8818 if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) { 8819 /* 8820 * Change the device ID in the Boot BIOS image to match 8821 * the Device ID of the current adapter. 8822 */ 8823 modify_device_id(device_id, boot_data); 8824 } 8825 8826 /* 8827 * Skip over the first SF_PAGE_SIZE worth of data and write it after 8828 * we finish copying the rest of the boot image. This will ensure 8829 * that the BIOS boot header will only be written if the boot image 8830 * was written in full. 8831 */ 8832 addr = boot_sector; 8833 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 8834 addr += SF_PAGE_SIZE; 8835 boot_data += SF_PAGE_SIZE; 8836 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0); 8837 if (ret) 8838 goto out; 8839 } 8840 8841 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, 8842 (const u8 *)header, 0); 8843 8844out: 8845 if (ret) 8846 CH_ERR(adap, "boot image download failed, error %d\n", ret); 8847 return ret; 8848} 8849 8850/* 8851 * t4_flash_bootcfg_addr - return the address of the flash optionrom configuration 8852 * @adapter: the adapter 8853 * 8854 * Return the address within the flash where the OptionROM Configuration 8855 * is stored, or an error if the device FLASH is too small to contain 8856 * a OptionROM Configuration. 8857 */ 8858static int t4_flash_bootcfg_addr(struct adapter *adapter) 8859{ 8860 /* 8861 * If the device FLASH isn't large enough to hold a Firmware 8862 * Configuration File, return an error. 8863 */ 8864 if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) 8865 return -ENOSPC; 8866 8867 return FLASH_BOOTCFG_START; 8868} 8869 8870int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size) 8871{ 8872 int ret, i, n, cfg_addr; 8873 unsigned int addr; 8874 unsigned int flash_cfg_start_sec; 8875 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 8876 8877 cfg_addr = t4_flash_bootcfg_addr(adap); 8878 if (cfg_addr < 0) 8879 return cfg_addr; 8880 8881 addr = cfg_addr; 8882 flash_cfg_start_sec = addr / SF_SEC_SIZE; 8883 8884 if (size > FLASH_BOOTCFG_MAX_SIZE) { 8885 CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", 8886 FLASH_BOOTCFG_MAX_SIZE); 8887 return -EFBIG; 8888 } 8889 8890 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */ 8891 sf_sec_size); 8892 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, 8893 flash_cfg_start_sec + i - 1); 8894 8895 /* 8896 * If size == 0 then we're simply erasing the FLASH sectors associated 8897 * with the on-adapter OptionROM Configuration File. 8898 */ 8899 if (ret || size == 0) 8900 goto out; 8901 8902 /* this will write to the flash up to SF_PAGE_SIZE at a time */ 8903 for (i = 0; i< size; i+= SF_PAGE_SIZE) { 8904 if ( (size - i) < SF_PAGE_SIZE) 8905 n = size - i; 8906 else 8907 n = SF_PAGE_SIZE; 8908 ret = t4_write_flash(adap, addr, n, cfg_data, 0); 8909 if (ret) 8910 goto out; 8911 8912 addr += SF_PAGE_SIZE; 8913 cfg_data += SF_PAGE_SIZE; 8914 } 8915 8916out: 8917 if (ret) 8918 CH_ERR(adap, "boot config data %s failed %d\n", 8919 (size == 0 ? "clear" : "download"), ret); 8920 return ret; 8921} 8922 8923/** 8924 * t4_set_filter_mode - configure the optional components of filter tuples 8925 * @adap: the adapter 8926 * @mode_map: a bitmap selcting which optional filter components to enable 8927 * 8928 * Sets the filter mode by selecting the optional components to enable 8929 * in filter tuples. Returns 0 on success and a negative error if the 8930 * requested mode needs more bits than are available for optional 8931 * components. 8932 */ 8933int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map) 8934{ 8935 static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 }; 8936 8937 int i, nbits = 0; 8938 8939 for (i = S_FCOE; i <= S_FRAGMENTATION; i++) 8940 if (mode_map & (1 << i)) 8941 nbits += width[i]; 8942 if (nbits > FILTER_OPT_LEN) 8943 return -EINVAL; 8944 if (t4_use_ldst(adap)) 8945 t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0); 8946 else 8947 t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 8948 1, A_TP_VLAN_PRI_MAP); 8949 read_filter_mode_and_ingress_config(adap); 8950 8951 return 0; 8952} 8953 8954/** 8955 * t4_clr_port_stats - clear port statistics 8956 * @adap: the adapter 8957 * @idx: the port index 8958 * 8959 * Clear HW statistics for the given port. 8960 */ 8961void t4_clr_port_stats(struct adapter *adap, int idx) 8962{ 8963 unsigned int i; 8964 u32 bgmap = t4_get_mps_bg_map(adap, idx); 8965 u32 port_base_addr; 8966 8967 if (is_t4(adap)) 8968 port_base_addr = PORT_BASE(idx); 8969 else 8970 port_base_addr = T5_PORT_BASE(idx); 8971 8972 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; 8973 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) 8974 t4_write_reg(adap, port_base_addr + i, 0); 8975 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; 8976 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) 8977 t4_write_reg(adap, port_base_addr + i, 0); 8978 for (i = 0; i < 4; i++) 8979 if (bgmap & (1 << i)) { 8980 t4_write_reg(adap, 8981 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0); 8982 t4_write_reg(adap, 8983 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0); 8984 } 8985} 8986 8987/** 8988 * t4_i2c_rd - read I2C data from adapter 8989 * @adap: the adapter 8990 * @port: Port number if per-port device; <0 if not 8991 * @devid: per-port device ID or absolute device ID 8992 * @offset: byte offset into device I2C space 8993 * @len: byte length of I2C space data 8994 * @buf: buffer in which to return I2C data 8995 * 8996 * Reads the I2C data from the indicated device and location. 8997 */ 8998int t4_i2c_rd(struct adapter *adap, unsigned int mbox, 8999 int port, unsigned int devid, 9000 unsigned int offset, unsigned int len, 9001 u8 *buf) 9002{ 9003 u32 ldst_addrspace; 9004 struct fw_ldst_cmd ldst; 9005 int ret; 9006 9007 if (port >= 4 || 9008 devid >= 256 || 9009 offset >= 256 || 9010 len > sizeof ldst.u.i2c.data) 9011 return -EINVAL; 9012 9013 memset(&ldst, 0, sizeof ldst); 9014 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 9015 ldst.op_to_addrspace = 9016 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9017 F_FW_CMD_REQUEST | 9018 F_FW_CMD_READ | 9019 ldst_addrspace); 9020 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 9021 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 9022 ldst.u.i2c.did = devid; 9023 ldst.u.i2c.boffset = offset; 9024 ldst.u.i2c.blen = len; 9025 ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 9026 if (!ret) 9027 memcpy(buf, ldst.u.i2c.data, len); 9028 return ret; 9029} 9030 9031/** 9032 * t4_i2c_wr - write I2C data to adapter 9033 * @adap: the adapter 9034 * @port: Port number if per-port device; <0 if not 9035 * @devid: per-port device ID or absolute device ID 9036 * @offset: byte offset into device I2C space 9037 * @len: byte length of I2C space data 9038 * @buf: buffer containing new I2C data 9039 * 9040 * Write the I2C data to the indicated device and location. 9041 */ 9042int t4_i2c_wr(struct adapter *adap, unsigned int mbox, 9043 int port, unsigned int devid, 9044 unsigned int offset, unsigned int len, 9045 u8 *buf) 9046{ 9047 u32 ldst_addrspace; 9048 struct fw_ldst_cmd ldst; 9049 9050 if (port >= 4 || 9051 devid >= 256 || 9052 offset >= 256 || 9053 len > sizeof ldst.u.i2c.data) 9054 return -EINVAL; 9055 9056 memset(&ldst, 0, sizeof ldst); 9057 ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C); 9058 ldst.op_to_addrspace = 9059 cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9060 F_FW_CMD_REQUEST | 9061 F_FW_CMD_WRITE | 9062 ldst_addrspace); 9063 ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst)); 9064 ldst.u.i2c.pid = (port < 0 ? 0xff : port); 9065 ldst.u.i2c.did = devid; 9066 ldst.u.i2c.boffset = offset; 9067 ldst.u.i2c.blen = len; 9068 memcpy(ldst.u.i2c.data, buf, len); 9069 return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst); 9070} 9071 9072/** 9073 * t4_sge_ctxt_rd - read an SGE context through FW 9074 * @adap: the adapter 9075 * @mbox: mailbox to use for the FW command 9076 * @cid: the context id 9077 * @ctype: the context type 9078 * @data: where to store the context data 9079 * 9080 * Issues a FW command through the given mailbox to read an SGE context. 9081 */ 9082int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, 9083 enum ctxt_type ctype, u32 *data) 9084{ 9085 int ret; 9086 struct fw_ldst_cmd c; 9087 9088 if (ctype == CTXT_EGRESS) 9089 ret = FW_LDST_ADDRSPC_SGE_EGRC; 9090 else if (ctype == CTXT_INGRESS) 9091 ret = FW_LDST_ADDRSPC_SGE_INGC; 9092 else if (ctype == CTXT_FLM) 9093 ret = FW_LDST_ADDRSPC_SGE_FLMC; 9094 else 9095 ret = FW_LDST_ADDRSPC_SGE_CONMC; 9096 9097 memset(&c, 0, sizeof(c)); 9098 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) | 9099 F_FW_CMD_REQUEST | F_FW_CMD_READ | 9100 V_FW_LDST_CMD_ADDRSPACE(ret)); 9101 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); 9102 c.u.idctxt.physid = cpu_to_be32(cid); 9103 9104 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 9105 if (ret == 0) { 9106 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); 9107 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); 9108 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); 9109 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); 9110 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); 9111 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); 9112 } 9113 return ret; 9114} 9115 9116/** 9117 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW 9118 * @adap: the adapter 9119 * @cid: the context id 9120 * @ctype: the context type 9121 * @data: where to store the context data 9122 * 9123 * Reads an SGE context directly, bypassing FW. This is only for 9124 * debugging when FW is unavailable. 9125 */ 9126int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype, 9127 u32 *data) 9128{ 9129 int i, ret; 9130 9131 t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype)); 9132 ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1); 9133 if (!ret) 9134 for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4) 9135 *data++ = t4_read_reg(adap, i); 9136 return ret; 9137} 9138 9139int t4_sched_config(struct adapter *adapter, int type, int minmaxen, 9140 int sleep_ok) 9141{ 9142 struct fw_sched_cmd cmd; 9143 9144 memset(&cmd, 0, sizeof(cmd)); 9145 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9146 F_FW_CMD_REQUEST | 9147 F_FW_CMD_WRITE); 9148 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9149 9150 cmd.u.config.sc = FW_SCHED_SC_CONFIG; 9151 cmd.u.config.type = type; 9152 cmd.u.config.minmaxen = minmaxen; 9153 9154 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9155 NULL, sleep_ok); 9156} 9157 9158int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 9159 int rateunit, int ratemode, int channel, int cl, 9160 int minrate, int maxrate, int weight, int pktsize, 9161 int sleep_ok) 9162{ 9163 struct fw_sched_cmd cmd; 9164 9165 memset(&cmd, 0, sizeof(cmd)); 9166 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) | 9167 F_FW_CMD_REQUEST | 9168 F_FW_CMD_WRITE); 9169 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 9170 9171 cmd.u.params.sc = FW_SCHED_SC_PARAMS; 9172 cmd.u.params.type = type; 9173 cmd.u.params.level = level; 9174 cmd.u.params.mode = mode; 9175 cmd.u.params.ch = channel; 9176 cmd.u.params.cl = cl; 9177 cmd.u.params.unit = rateunit; 9178 cmd.u.params.rate = ratemode; 9179 cmd.u.params.min = cpu_to_be32(minrate); 9180 cmd.u.params.max = cpu_to_be32(maxrate); 9181 cmd.u.params.weight = cpu_to_be16(weight); 9182 cmd.u.params.pktsize = cpu_to_be16(pktsize); 9183 9184 return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd), 9185 NULL, sleep_ok); 9186} 9187 9188/* 9189 * t4_config_watchdog - configure (enable/disable) a watchdog timer 9190 * @adapter: the adapter 9191 * @mbox: mailbox to use for the FW command 9192 * @pf: the PF owning the queue 9193 * @vf: the VF owning the queue 9194 * @timeout: watchdog timeout in ms 9195 * @action: watchdog timer / action 9196 * 9197 * There are separate watchdog timers for each possible watchdog 9198 * action. Configure one of the watchdog timers by setting a non-zero 9199 * timeout. Disable a watchdog timer by using a timeout of zero. 9200 */ 9201int t4_config_watchdog(struct adapter *adapter, unsigned int mbox, 9202 unsigned int pf, unsigned int vf, 9203 unsigned int timeout, unsigned int action) 9204{ 9205 struct fw_watchdog_cmd wdog; 9206 unsigned int ticks; 9207 9208 /* 9209 * The watchdog command expects a timeout in units of 10ms so we need 9210 * to convert it here (via rounding) and force a minimum of one 10ms 9211 * "tick" if the timeout is non-zero but the convertion results in 0 9212 * ticks. 9213 */ 9214 ticks = (timeout + 5)/10; 9215 if (timeout && !ticks) 9216 ticks = 1; 9217 9218 memset(&wdog, 0, sizeof wdog); 9219 wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) | 9220 F_FW_CMD_REQUEST | 9221 F_FW_CMD_WRITE | 9222 V_FW_PARAMS_CMD_PFN(pf) | 9223 V_FW_PARAMS_CMD_VFN(vf)); 9224 wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog)); 9225 wdog.timeout = cpu_to_be32(ticks); 9226 wdog.action = cpu_to_be32(action); 9227 9228 return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL); 9229} 9230 9231int t4_get_devlog_level(struct adapter *adapter, unsigned int *level) 9232{ 9233 struct fw_devlog_cmd devlog_cmd; 9234 int ret; 9235 9236 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 9237 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9238 F_FW_CMD_REQUEST | F_FW_CMD_READ); 9239 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9240 ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 9241 sizeof(devlog_cmd), &devlog_cmd); 9242 if (ret) 9243 return ret; 9244 9245 *level = devlog_cmd.level; 9246 return 0; 9247} 9248 9249int t4_set_devlog_level(struct adapter *adapter, unsigned int level) 9250{ 9251 struct fw_devlog_cmd devlog_cmd; 9252 9253 memset(&devlog_cmd, 0, sizeof(devlog_cmd)); 9254 devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 9255 F_FW_CMD_REQUEST | 9256 F_FW_CMD_WRITE); 9257 devlog_cmd.level = level; 9258 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd)); 9259 return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd, 9260 sizeof(devlog_cmd), &devlog_cmd); 9261} 9262