t4_hw.c revision 353418
1/*-
2 * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/common/t4_hw.c 353418 2019-10-10 23:27:02Z np $");
29
30#include "opt_inet.h"
31
32#include "common.h"
33#include "t4_regs.h"
34#include "t4_regs_values.h"
35#include "firmware/t4fw_interface.h"
36
37#undef msleep
38#define msleep(x) do { \
39	if (cold) \
40		DELAY((x) * 1000); \
41	else \
42		pause("t4hw", (x) * hz / 1000); \
43} while (0)
44
45/**
46 *	t4_wait_op_done_val - wait until an operation is completed
47 *	@adapter: the adapter performing the operation
48 *	@reg: the register to check for completion
49 *	@mask: a single-bit field within @reg that indicates completion
50 *	@polarity: the value of the field when the operation is completed
51 *	@attempts: number of check iterations
52 *	@delay: delay in usecs between iterations
53 *	@valp: where to store the value of the register at completion time
54 *
55 *	Wait until an operation is completed by checking a bit in a register
56 *	up to @attempts times.  If @valp is not NULL the value of the register
57 *	at the time it indicated completion is stored there.  Returns 0 if the
58 *	operation completes and	-EAGAIN	otherwise.
59 */
60static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
61			       int polarity, int attempts, int delay, u32 *valp)
62{
63	while (1) {
64		u32 val = t4_read_reg(adapter, reg);
65
66		if (!!(val & mask) == polarity) {
67			if (valp)
68				*valp = val;
69			return 0;
70		}
71		if (--attempts == 0)
72			return -EAGAIN;
73		if (delay)
74			udelay(delay);
75	}
76}
77
78static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
79				  int polarity, int attempts, int delay)
80{
81	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
82				   delay, NULL);
83}
84
85/**
86 *	t4_set_reg_field - set a register field to a value
87 *	@adapter: the adapter to program
88 *	@addr: the register address
89 *	@mask: specifies the portion of the register to modify
90 *	@val: the new value for the register field
91 *
92 *	Sets a register field specified by the supplied mask to the
93 *	given value.
94 */
95void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
96		      u32 val)
97{
98	u32 v = t4_read_reg(adapter, addr) & ~mask;
99
100	t4_write_reg(adapter, addr, v | val);
101	(void) t4_read_reg(adapter, addr);      /* flush */
102}
103
104/**
105 *	t4_read_indirect - read indirectly addressed registers
106 *	@adap: the adapter
107 *	@addr_reg: register holding the indirect address
108 *	@data_reg: register holding the value of the indirect register
109 *	@vals: where the read register values are stored
110 *	@nregs: how many indirect registers to read
111 *	@start_idx: index of first indirect register to read
112 *
113 *	Reads registers that are accessed indirectly through an address/data
114 *	register pair.
115 */
116void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
117			     unsigned int data_reg, u32 *vals,
118			     unsigned int nregs, unsigned int start_idx)
119{
120	while (nregs--) {
121		t4_write_reg(adap, addr_reg, start_idx);
122		*vals++ = t4_read_reg(adap, data_reg);
123		start_idx++;
124	}
125}
126
127/**
128 *	t4_write_indirect - write indirectly addressed registers
129 *	@adap: the adapter
130 *	@addr_reg: register holding the indirect addresses
131 *	@data_reg: register holding the value for the indirect registers
132 *	@vals: values to write
133 *	@nregs: how many indirect registers to write
134 *	@start_idx: address of first indirect register to write
135 *
136 *	Writes a sequential block of registers that are accessed indirectly
137 *	through an address/data register pair.
138 */
139void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
140		       unsigned int data_reg, const u32 *vals,
141		       unsigned int nregs, unsigned int start_idx)
142{
143	while (nregs--) {
144		t4_write_reg(adap, addr_reg, start_idx++);
145		t4_write_reg(adap, data_reg, *vals++);
146	}
147}
148
149/*
150 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
151 * mechanism.  This guarantees that we get the real value even if we're
152 * operating within a Virtual Machine and the Hypervisor is trapping our
153 * Configuration Space accesses.
154 *
155 * N.B. This routine should only be used as a last resort: the firmware uses
156 *      the backdoor registers on a regular basis and we can end up
157 *      conflicting with it's uses!
158 */
159u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
160{
161	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
162	u32 val;
163
164	if (chip_id(adap) <= CHELSIO_T5)
165		req |= F_ENABLE;
166	else
167		req |= F_T6_ENABLE;
168
169	if (is_t4(adap))
170		req |= F_LOCALCFG;
171
172	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
173	val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
174
175	/*
176	 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
177	 * Configuration Space read.  (None of the other fields matter when
178	 * F_ENABLE is 0 so a simple register write is easier than a
179	 * read-modify-write via t4_set_reg_field().)
180	 */
181	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
182
183	return val;
184}
185
186/*
187 * t4_report_fw_error - report firmware error
188 * @adap: the adapter
189 *
190 * The adapter firmware can indicate error conditions to the host.
191 * If the firmware has indicated an error, print out the reason for
192 * the firmware error.
193 */
194static void t4_report_fw_error(struct adapter *adap)
195{
196	static const char *const reason[] = {
197		"Crash",			/* PCIE_FW_EVAL_CRASH */
198		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
199		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
200		"During Device Initialization",	/* PCIE_FW_EVAL_INIT */
201		"Unexpected Event",		/* PCIE_FW_EVAL_UNEXPECTEDEVENT */
202		"Insufficient Airflow",		/* PCIE_FW_EVAL_OVERHEAT */
203		"Device Shutdown",		/* PCIE_FW_EVAL_DEVICESHUTDOWN */
204		"Reserved",			/* reserved */
205	};
206	u32 pcie_fw;
207
208	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
209	if (pcie_fw & F_PCIE_FW_ERR)
210		CH_ERR(adap, "Firmware reports adapter error: %s\n",
211			reason[G_PCIE_FW_EVAL(pcie_fw)]);
212}
213
214/*
215 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
216 */
217static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
218			 u32 mbox_addr)
219{
220	for ( ; nflit; nflit--, mbox_addr += 8)
221		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
222}
223
224/*
225 * Handle a FW assertion reported in a mailbox.
226 */
227static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
228{
229	CH_ALERT(adap,
230		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
231		  asrt->u.assert.filename_0_7,
232		  be32_to_cpu(asrt->u.assert.line),
233		  be32_to_cpu(asrt->u.assert.x),
234		  be32_to_cpu(asrt->u.assert.y));
235}
236
237#define X_CIM_PF_NOACCESS 0xeeeeeeee
238/**
239 *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
240 *	@adap: the adapter
241 *	@mbox: index of the mailbox to use
242 *	@cmd: the command to write
243 *	@size: command length in bytes
244 *	@rpl: where to optionally store the reply
245 *	@sleep_ok: if true we may sleep while awaiting command completion
246 *	@timeout: time to wait for command to finish before timing out
247 *		(negative implies @sleep_ok=false)
248 *
249 *	Sends the given command to FW through the selected mailbox and waits
250 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
251 *	store the FW's reply to the command.  The command and its optional
252 *	reply are of the same length.  Some FW commands like RESET and
253 *	INITIALIZE can take a considerable amount of time to execute.
254 *	@sleep_ok determines whether we may sleep while awaiting the response.
255 *	If sleeping is allowed we use progressive backoff otherwise we spin.
256 *	Note that passing in a negative @timeout is an alternate mechanism
257 *	for specifying @sleep_ok=false.  This is useful when a higher level
258 *	interface allows for specification of @timeout but not @sleep_ok ...
259 *
260 *	The return value is 0 on success or a negative errno on failure.  A
261 *	failure can happen either because we are not able to execute the
262 *	command or FW executes it but signals an error.  In the latter case
263 *	the return value is the error code indicated by FW (negated).
264 */
265int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
266			    int size, void *rpl, bool sleep_ok, int timeout)
267{
268	/*
269	 * We delay in small increments at first in an effort to maintain
270	 * responsiveness for simple, fast executing commands but then back
271	 * off to larger delays to a maximum retry delay.
272	 */
273	static const int delay[] = {
274		1, 1, 3, 5, 10, 10, 20, 50, 100
275	};
276	u32 v;
277	u64 res;
278	int i, ms, delay_idx, ret;
279	const __be64 *p = cmd;
280	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
281	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
282	u32 ctl;
283	__be64 cmd_rpl[MBOX_LEN/8];
284	u32 pcie_fw;
285
286	if ((size & 15) || size > MBOX_LEN)
287		return -EINVAL;
288
289	if (adap->flags & IS_VF) {
290		if (is_t6(adap))
291			data_reg = FW_T6VF_MBDATA_BASE_ADDR;
292		else
293			data_reg = FW_T4VF_MBDATA_BASE_ADDR;
294		ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
295	}
296
297	/*
298	 * If we have a negative timeout, that implies that we can't sleep.
299	 */
300	if (timeout < 0) {
301		sleep_ok = false;
302		timeout = -timeout;
303	}
304
305	/*
306	 * Attempt to gain access to the mailbox.
307	 */
308	for (i = 0; i < 4; i++) {
309		ctl = t4_read_reg(adap, ctl_reg);
310		v = G_MBOWNER(ctl);
311		if (v != X_MBOWNER_NONE)
312			break;
313	}
314
315	/*
316	 * If we were unable to gain access, dequeue ourselves from the
317	 * mailbox atomic access list and report the error to our caller.
318	 */
319	if (v != X_MBOWNER_PL) {
320		t4_report_fw_error(adap);
321		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
322		return ret;
323	}
324
325	/*
326	 * If we gain ownership of the mailbox and there's a "valid" message
327	 * in it, this is likely an asynchronous error message from the
328	 * firmware.  So we'll report that and then proceed on with attempting
329	 * to issue our own command ... which may well fail if the error
330	 * presaged the firmware crashing ...
331	 */
332	if (ctl & F_MBMSGVALID) {
333		CH_ERR(adap, "found VALID command in mbox %u: "
334		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
335		       (unsigned long long)t4_read_reg64(adap, data_reg),
336		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
337		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
338		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
339		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
340		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
341		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
342		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
343	}
344
345	/*
346	 * Copy in the new mailbox command and send it on its way ...
347	 */
348	for (i = 0; i < size; i += 8, p++)
349		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
350
351	if (adap->flags & IS_VF) {
352		/*
353		 * For the VFs, the Mailbox Data "registers" are
354		 * actually backed by T4's "MA" interface rather than
355		 * PL Registers (as is the case for the PFs).  Because
356		 * these are in different coherency domains, the write
357		 * to the VF's PL-register-backed Mailbox Control can
358		 * race in front of the writes to the MA-backed VF
359		 * Mailbox Data "registers".  So we need to do a
360		 * read-back on at least one byte of the VF Mailbox
361		 * Data registers before doing the write to the VF
362		 * Mailbox Control register.
363		 */
364		t4_read_reg(adap, data_reg);
365	}
366
367	CH_DUMP_MBOX(adap, mbox, data_reg);
368
369	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
370	t4_read_reg(adap, ctl_reg);	/* flush write */
371
372	delay_idx = 0;
373	ms = delay[0];
374
375	/*
376	 * Loop waiting for the reply; bail out if we time out or the firmware
377	 * reports an error.
378	 */
379	pcie_fw = 0;
380	for (i = 0; i < timeout; i += ms) {
381		if (!(adap->flags & IS_VF)) {
382			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
383			if (pcie_fw & F_PCIE_FW_ERR)
384				break;
385		}
386		if (sleep_ok) {
387			ms = delay[delay_idx];  /* last element may repeat */
388			if (delay_idx < ARRAY_SIZE(delay) - 1)
389				delay_idx++;
390			msleep(ms);
391		} else {
392			mdelay(ms);
393		}
394
395		v = t4_read_reg(adap, ctl_reg);
396		if (v == X_CIM_PF_NOACCESS)
397			continue;
398		if (G_MBOWNER(v) == X_MBOWNER_PL) {
399			if (!(v & F_MBMSGVALID)) {
400				t4_write_reg(adap, ctl_reg,
401					     V_MBOWNER(X_MBOWNER_NONE));
402				continue;
403			}
404
405			/*
406			 * Retrieve the command reply and release the mailbox.
407			 */
408			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
409			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
410
411			CH_DUMP_MBOX(adap, mbox, data_reg);
412
413			res = be64_to_cpu(cmd_rpl[0]);
414			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
415				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
416				res = V_FW_CMD_RETVAL(EIO);
417			} else if (rpl)
418				memcpy(rpl, cmd_rpl, size);
419			return -G_FW_CMD_RETVAL((int)res);
420		}
421	}
422
423	/*
424	 * We timed out waiting for a reply to our mailbox command.  Report
425	 * the error and also check to see if the firmware reported any
426	 * errors ...
427	 */
428	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
429	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
430	       *(const u8 *)cmd, mbox);
431
432	/* If DUMP_MBOX is set the mbox has already been dumped */
433	if ((adap->debug_flags & DF_DUMP_MBOX) == 0) {
434		p = cmd;
435		CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx "
436		    "%016llx %016llx %016llx %016llx\n",
437		    (unsigned long long)be64_to_cpu(p[0]),
438		    (unsigned long long)be64_to_cpu(p[1]),
439		    (unsigned long long)be64_to_cpu(p[2]),
440		    (unsigned long long)be64_to_cpu(p[3]),
441		    (unsigned long long)be64_to_cpu(p[4]),
442		    (unsigned long long)be64_to_cpu(p[5]),
443		    (unsigned long long)be64_to_cpu(p[6]),
444		    (unsigned long long)be64_to_cpu(p[7]));
445	}
446
447	t4_report_fw_error(adap);
448	t4_fatal_err(adap);
449	return ret;
450}
451
452int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
453		    void *rpl, bool sleep_ok)
454{
455		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
456					       sleep_ok, FW_CMD_MAX_TIMEOUT);
457
458}
459
460static int t4_edc_err_read(struct adapter *adap, int idx)
461{
462	u32 edc_ecc_err_addr_reg;
463	u32 edc_bist_status_rdata_reg;
464
465	if (is_t4(adap)) {
466		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
467		return 0;
468	}
469	if (idx != MEM_EDC0 && idx != MEM_EDC1) {
470		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
471		return 0;
472	}
473
474	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
475	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
476
477	CH_WARN(adap,
478		"edc%d err addr 0x%x: 0x%x.\n",
479		idx, edc_ecc_err_addr_reg,
480		t4_read_reg(adap, edc_ecc_err_addr_reg));
481	CH_WARN(adap,
482	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
483		edc_bist_status_rdata_reg,
484		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
485		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
486		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
487		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
488		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
489		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
490		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
491		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
492		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
493
494	return 0;
495}
496
497/**
498 *	t4_mc_read - read from MC through backdoor accesses
499 *	@adap: the adapter
500 *	@idx: which MC to access
501 *	@addr: address of first byte requested
502 *	@data: 64 bytes of data containing the requested address
503 *	@ecc: where to store the corresponding 64-bit ECC word
504 *
505 *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
506 *	that covers the requested address @addr.  If @parity is not %NULL it
507 *	is assigned the 64-bit ECC word for the read data.
508 */
509int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
510{
511	int i;
512	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
513	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
514
515	if (is_t4(adap)) {
516		mc_bist_cmd_reg = A_MC_BIST_CMD;
517		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
518		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
519		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
520		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
521	} else {
522		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
523		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
524		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
525		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
526						  idx);
527		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
528						  idx);
529	}
530
531	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
532		return -EBUSY;
533	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
534	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
535	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
536	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
537		     F_START_BIST | V_BIST_CMD_GAP(1));
538	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
539	if (i)
540		return i;
541
542#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
543
544	for (i = 15; i >= 0; i--)
545		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
546	if (ecc)
547		*ecc = t4_read_reg64(adap, MC_DATA(16));
548#undef MC_DATA
549	return 0;
550}
551
552/**
553 *	t4_edc_read - read from EDC through backdoor accesses
554 *	@adap: the adapter
555 *	@idx: which EDC to access
556 *	@addr: address of first byte requested
557 *	@data: 64 bytes of data containing the requested address
558 *	@ecc: where to store the corresponding 64-bit ECC word
559 *
560 *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
561 *	that covers the requested address @addr.  If @parity is not %NULL it
562 *	is assigned the 64-bit ECC word for the read data.
563 */
564int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
565{
566	int i;
567	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
568	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
569
570	if (is_t4(adap)) {
571		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
572		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
573		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
574		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
575						    idx);
576		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
577						    idx);
578	} else {
579/*
580 * These macro are missing in t4_regs.h file.
581 * Added temporarily for testing.
582 */
583#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
584#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
585		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
586		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
587		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
588		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
589						    idx);
590		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
591						    idx);
592#undef EDC_REG_T5
593#undef EDC_STRIDE_T5
594	}
595
596	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
597		return -EBUSY;
598	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
599	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
600	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
601	t4_write_reg(adap, edc_bist_cmd_reg,
602		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
603	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
604	if (i)
605		return i;
606
607#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
608
609	for (i = 15; i >= 0; i--)
610		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
611	if (ecc)
612		*ecc = t4_read_reg64(adap, EDC_DATA(16));
613#undef EDC_DATA
614	return 0;
615}
616
617/**
618 *	t4_mem_read - read EDC 0, EDC 1 or MC into buffer
619 *	@adap: the adapter
620 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
621 *	@addr: address within indicated memory type
622 *	@len: amount of memory to read
623 *	@buf: host memory buffer
624 *
625 *	Reads an [almost] arbitrary memory region in the firmware: the
626 *	firmware memory address, length and host buffer must be aligned on
627 *	32-bit boudaries.  The memory is returned as a raw byte sequence from
628 *	the firmware's memory.  If this memory contains data structures which
629 *	contain multi-byte integers, it's the callers responsibility to
630 *	perform appropriate byte order conversions.
631 */
632int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
633		__be32 *buf)
634{
635	u32 pos, start, end, offset;
636	int ret;
637
638	/*
639	 * Argument sanity checks ...
640	 */
641	if ((addr & 0x3) || (len & 0x3))
642		return -EINVAL;
643
644	/*
645	 * The underlaying EDC/MC read routines read 64 bytes at a time so we
646	 * need to round down the start and round up the end.  We'll start
647	 * copying out of the first line at (addr - start) a word at a time.
648	 */
649	start = addr & ~(64-1);
650	end = (addr + len + 64-1) & ~(64-1);
651	offset = (addr - start)/sizeof(__be32);
652
653	for (pos = start; pos < end; pos += 64, offset = 0) {
654		__be32 data[16];
655
656		/*
657		 * Read the chip's memory block and bail if there's an error.
658		 */
659		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
660			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
661		else
662			ret = t4_edc_read(adap, mtype, pos, data, NULL);
663		if (ret)
664			return ret;
665
666		/*
667		 * Copy the data into the caller's memory buffer.
668		 */
669		while (offset < 16 && len > 0) {
670			*buf++ = data[offset++];
671			len -= sizeof(__be32);
672		}
673	}
674
675	return 0;
676}
677
678/*
679 * Return the specified PCI-E Configuration Space register from our Physical
680 * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
681 * since we prefer to let the firmware own all of these registers, but if that
682 * fails we go for it directly ourselves.
683 */
684u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
685{
686
687	/*
688	 * If fw_attach != 0, construct and send the Firmware LDST Command to
689	 * retrieve the specified PCI-E Configuration Space register.
690	 */
691	if (drv_fw_attach != 0) {
692		struct fw_ldst_cmd ldst_cmd;
693		int ret;
694
695		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
696		ldst_cmd.op_to_addrspace =
697			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
698				    F_FW_CMD_REQUEST |
699				    F_FW_CMD_READ |
700				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
701		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
702		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
703		ldst_cmd.u.pcie.ctrl_to_fn =
704			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
705		ldst_cmd.u.pcie.r = reg;
706
707		/*
708		 * If the LDST Command succeeds, return the result, otherwise
709		 * fall through to reading it directly ourselves ...
710		 */
711		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
712				 &ldst_cmd);
713		if (ret == 0)
714			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
715
716		CH_WARN(adap, "Firmware failed to return "
717			"Configuration Space register %d, err = %d\n",
718			reg, -ret);
719	}
720
721	/*
722	 * Read the desired Configuration Space register via the PCI-E
723	 * Backdoor mechanism.
724	 */
725	return t4_hw_pci_read_cfg4(adap, reg);
726}
727
728/**
729 *	t4_get_regs_len - return the size of the chips register set
730 *	@adapter: the adapter
731 *
732 *	Returns the size of the chip's BAR0 register space.
733 */
734unsigned int t4_get_regs_len(struct adapter *adapter)
735{
736	unsigned int chip_version = chip_id(adapter);
737
738	switch (chip_version) {
739	case CHELSIO_T4:
740		if (adapter->flags & IS_VF)
741			return FW_T4VF_REGMAP_SIZE;
742		return T4_REGMAP_SIZE;
743
744	case CHELSIO_T5:
745	case CHELSIO_T6:
746		if (adapter->flags & IS_VF)
747			return FW_T4VF_REGMAP_SIZE;
748		return T5_REGMAP_SIZE;
749	}
750
751	CH_ERR(adapter,
752		"Unsupported chip version %d\n", chip_version);
753	return 0;
754}
755
756/**
757 *	t4_get_regs - read chip registers into provided buffer
758 *	@adap: the adapter
759 *	@buf: register buffer
760 *	@buf_size: size (in bytes) of register buffer
761 *
762 *	If the provided register buffer isn't large enough for the chip's
763 *	full register range, the register dump will be truncated to the
764 *	register buffer's size.
765 */
766void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
767{
768	static const unsigned int t4_reg_ranges[] = {
769		0x1008, 0x1108,
770		0x1180, 0x1184,
771		0x1190, 0x1194,
772		0x11a0, 0x11a4,
773		0x11b0, 0x11b4,
774		0x11fc, 0x123c,
775		0x1300, 0x173c,
776		0x1800, 0x18fc,
777		0x3000, 0x30d8,
778		0x30e0, 0x30e4,
779		0x30ec, 0x5910,
780		0x5920, 0x5924,
781		0x5960, 0x5960,
782		0x5968, 0x5968,
783		0x5970, 0x5970,
784		0x5978, 0x5978,
785		0x5980, 0x5980,
786		0x5988, 0x5988,
787		0x5990, 0x5990,
788		0x5998, 0x5998,
789		0x59a0, 0x59d4,
790		0x5a00, 0x5ae0,
791		0x5ae8, 0x5ae8,
792		0x5af0, 0x5af0,
793		0x5af8, 0x5af8,
794		0x6000, 0x6098,
795		0x6100, 0x6150,
796		0x6200, 0x6208,
797		0x6240, 0x6248,
798		0x6280, 0x62b0,
799		0x62c0, 0x6338,
800		0x6370, 0x638c,
801		0x6400, 0x643c,
802		0x6500, 0x6524,
803		0x6a00, 0x6a04,
804		0x6a14, 0x6a38,
805		0x6a60, 0x6a70,
806		0x6a78, 0x6a78,
807		0x6b00, 0x6b0c,
808		0x6b1c, 0x6b84,
809		0x6bf0, 0x6bf8,
810		0x6c00, 0x6c0c,
811		0x6c1c, 0x6c84,
812		0x6cf0, 0x6cf8,
813		0x6d00, 0x6d0c,
814		0x6d1c, 0x6d84,
815		0x6df0, 0x6df8,
816		0x6e00, 0x6e0c,
817		0x6e1c, 0x6e84,
818		0x6ef0, 0x6ef8,
819		0x6f00, 0x6f0c,
820		0x6f1c, 0x6f84,
821		0x6ff0, 0x6ff8,
822		0x7000, 0x700c,
823		0x701c, 0x7084,
824		0x70f0, 0x70f8,
825		0x7100, 0x710c,
826		0x711c, 0x7184,
827		0x71f0, 0x71f8,
828		0x7200, 0x720c,
829		0x721c, 0x7284,
830		0x72f0, 0x72f8,
831		0x7300, 0x730c,
832		0x731c, 0x7384,
833		0x73f0, 0x73f8,
834		0x7400, 0x7450,
835		0x7500, 0x7530,
836		0x7600, 0x760c,
837		0x7614, 0x761c,
838		0x7680, 0x76cc,
839		0x7700, 0x7798,
840		0x77c0, 0x77fc,
841		0x7900, 0x79fc,
842		0x7b00, 0x7b58,
843		0x7b60, 0x7b84,
844		0x7b8c, 0x7c38,
845		0x7d00, 0x7d38,
846		0x7d40, 0x7d80,
847		0x7d8c, 0x7ddc,
848		0x7de4, 0x7e04,
849		0x7e10, 0x7e1c,
850		0x7e24, 0x7e38,
851		0x7e40, 0x7e44,
852		0x7e4c, 0x7e78,
853		0x7e80, 0x7ea4,
854		0x7eac, 0x7edc,
855		0x7ee8, 0x7efc,
856		0x8dc0, 0x8e04,
857		0x8e10, 0x8e1c,
858		0x8e30, 0x8e78,
859		0x8ea0, 0x8eb8,
860		0x8ec0, 0x8f6c,
861		0x8fc0, 0x9008,
862		0x9010, 0x9058,
863		0x9060, 0x9060,
864		0x9068, 0x9074,
865		0x90fc, 0x90fc,
866		0x9400, 0x9408,
867		0x9410, 0x9458,
868		0x9600, 0x9600,
869		0x9608, 0x9638,
870		0x9640, 0x96bc,
871		0x9800, 0x9808,
872		0x9820, 0x983c,
873		0x9850, 0x9864,
874		0x9c00, 0x9c6c,
875		0x9c80, 0x9cec,
876		0x9d00, 0x9d6c,
877		0x9d80, 0x9dec,
878		0x9e00, 0x9e6c,
879		0x9e80, 0x9eec,
880		0x9f00, 0x9f6c,
881		0x9f80, 0x9fec,
882		0xd004, 0xd004,
883		0xd010, 0xd03c,
884		0xdfc0, 0xdfe0,
885		0xe000, 0xea7c,
886		0xf000, 0x11110,
887		0x11118, 0x11190,
888		0x19040, 0x1906c,
889		0x19078, 0x19080,
890		0x1908c, 0x190e4,
891		0x190f0, 0x190f8,
892		0x19100, 0x19110,
893		0x19120, 0x19124,
894		0x19150, 0x19194,
895		0x1919c, 0x191b0,
896		0x191d0, 0x191e8,
897		0x19238, 0x1924c,
898		0x193f8, 0x1943c,
899		0x1944c, 0x19474,
900		0x19490, 0x194e0,
901		0x194f0, 0x194f8,
902		0x19800, 0x19c08,
903		0x19c10, 0x19c90,
904		0x19ca0, 0x19ce4,
905		0x19cf0, 0x19d40,
906		0x19d50, 0x19d94,
907		0x19da0, 0x19de8,
908		0x19df0, 0x19e40,
909		0x19e50, 0x19e90,
910		0x19ea0, 0x19f4c,
911		0x1a000, 0x1a004,
912		0x1a010, 0x1a06c,
913		0x1a0b0, 0x1a0e4,
914		0x1a0ec, 0x1a0f4,
915		0x1a100, 0x1a108,
916		0x1a114, 0x1a120,
917		0x1a128, 0x1a130,
918		0x1a138, 0x1a138,
919		0x1a190, 0x1a1c4,
920		0x1a1fc, 0x1a1fc,
921		0x1e040, 0x1e04c,
922		0x1e284, 0x1e28c,
923		0x1e2c0, 0x1e2c0,
924		0x1e2e0, 0x1e2e0,
925		0x1e300, 0x1e384,
926		0x1e3c0, 0x1e3c8,
927		0x1e440, 0x1e44c,
928		0x1e684, 0x1e68c,
929		0x1e6c0, 0x1e6c0,
930		0x1e6e0, 0x1e6e0,
931		0x1e700, 0x1e784,
932		0x1e7c0, 0x1e7c8,
933		0x1e840, 0x1e84c,
934		0x1ea84, 0x1ea8c,
935		0x1eac0, 0x1eac0,
936		0x1eae0, 0x1eae0,
937		0x1eb00, 0x1eb84,
938		0x1ebc0, 0x1ebc8,
939		0x1ec40, 0x1ec4c,
940		0x1ee84, 0x1ee8c,
941		0x1eec0, 0x1eec0,
942		0x1eee0, 0x1eee0,
943		0x1ef00, 0x1ef84,
944		0x1efc0, 0x1efc8,
945		0x1f040, 0x1f04c,
946		0x1f284, 0x1f28c,
947		0x1f2c0, 0x1f2c0,
948		0x1f2e0, 0x1f2e0,
949		0x1f300, 0x1f384,
950		0x1f3c0, 0x1f3c8,
951		0x1f440, 0x1f44c,
952		0x1f684, 0x1f68c,
953		0x1f6c0, 0x1f6c0,
954		0x1f6e0, 0x1f6e0,
955		0x1f700, 0x1f784,
956		0x1f7c0, 0x1f7c8,
957		0x1f840, 0x1f84c,
958		0x1fa84, 0x1fa8c,
959		0x1fac0, 0x1fac0,
960		0x1fae0, 0x1fae0,
961		0x1fb00, 0x1fb84,
962		0x1fbc0, 0x1fbc8,
963		0x1fc40, 0x1fc4c,
964		0x1fe84, 0x1fe8c,
965		0x1fec0, 0x1fec0,
966		0x1fee0, 0x1fee0,
967		0x1ff00, 0x1ff84,
968		0x1ffc0, 0x1ffc8,
969		0x20000, 0x2002c,
970		0x20100, 0x2013c,
971		0x20190, 0x201a0,
972		0x201a8, 0x201b8,
973		0x201c4, 0x201c8,
974		0x20200, 0x20318,
975		0x20400, 0x204b4,
976		0x204c0, 0x20528,
977		0x20540, 0x20614,
978		0x21000, 0x21040,
979		0x2104c, 0x21060,
980		0x210c0, 0x210ec,
981		0x21200, 0x21268,
982		0x21270, 0x21284,
983		0x212fc, 0x21388,
984		0x21400, 0x21404,
985		0x21500, 0x21500,
986		0x21510, 0x21518,
987		0x2152c, 0x21530,
988		0x2153c, 0x2153c,
989		0x21550, 0x21554,
990		0x21600, 0x21600,
991		0x21608, 0x2161c,
992		0x21624, 0x21628,
993		0x21630, 0x21634,
994		0x2163c, 0x2163c,
995		0x21700, 0x2171c,
996		0x21780, 0x2178c,
997		0x21800, 0x21818,
998		0x21820, 0x21828,
999		0x21830, 0x21848,
1000		0x21850, 0x21854,
1001		0x21860, 0x21868,
1002		0x21870, 0x21870,
1003		0x21878, 0x21898,
1004		0x218a0, 0x218a8,
1005		0x218b0, 0x218c8,
1006		0x218d0, 0x218d4,
1007		0x218e0, 0x218e8,
1008		0x218f0, 0x218f0,
1009		0x218f8, 0x21a18,
1010		0x21a20, 0x21a28,
1011		0x21a30, 0x21a48,
1012		0x21a50, 0x21a54,
1013		0x21a60, 0x21a68,
1014		0x21a70, 0x21a70,
1015		0x21a78, 0x21a98,
1016		0x21aa0, 0x21aa8,
1017		0x21ab0, 0x21ac8,
1018		0x21ad0, 0x21ad4,
1019		0x21ae0, 0x21ae8,
1020		0x21af0, 0x21af0,
1021		0x21af8, 0x21c18,
1022		0x21c20, 0x21c20,
1023		0x21c28, 0x21c30,
1024		0x21c38, 0x21c38,
1025		0x21c80, 0x21c98,
1026		0x21ca0, 0x21ca8,
1027		0x21cb0, 0x21cc8,
1028		0x21cd0, 0x21cd4,
1029		0x21ce0, 0x21ce8,
1030		0x21cf0, 0x21cf0,
1031		0x21cf8, 0x21d7c,
1032		0x21e00, 0x21e04,
1033		0x22000, 0x2202c,
1034		0x22100, 0x2213c,
1035		0x22190, 0x221a0,
1036		0x221a8, 0x221b8,
1037		0x221c4, 0x221c8,
1038		0x22200, 0x22318,
1039		0x22400, 0x224b4,
1040		0x224c0, 0x22528,
1041		0x22540, 0x22614,
1042		0x23000, 0x23040,
1043		0x2304c, 0x23060,
1044		0x230c0, 0x230ec,
1045		0x23200, 0x23268,
1046		0x23270, 0x23284,
1047		0x232fc, 0x23388,
1048		0x23400, 0x23404,
1049		0x23500, 0x23500,
1050		0x23510, 0x23518,
1051		0x2352c, 0x23530,
1052		0x2353c, 0x2353c,
1053		0x23550, 0x23554,
1054		0x23600, 0x23600,
1055		0x23608, 0x2361c,
1056		0x23624, 0x23628,
1057		0x23630, 0x23634,
1058		0x2363c, 0x2363c,
1059		0x23700, 0x2371c,
1060		0x23780, 0x2378c,
1061		0x23800, 0x23818,
1062		0x23820, 0x23828,
1063		0x23830, 0x23848,
1064		0x23850, 0x23854,
1065		0x23860, 0x23868,
1066		0x23870, 0x23870,
1067		0x23878, 0x23898,
1068		0x238a0, 0x238a8,
1069		0x238b0, 0x238c8,
1070		0x238d0, 0x238d4,
1071		0x238e0, 0x238e8,
1072		0x238f0, 0x238f0,
1073		0x238f8, 0x23a18,
1074		0x23a20, 0x23a28,
1075		0x23a30, 0x23a48,
1076		0x23a50, 0x23a54,
1077		0x23a60, 0x23a68,
1078		0x23a70, 0x23a70,
1079		0x23a78, 0x23a98,
1080		0x23aa0, 0x23aa8,
1081		0x23ab0, 0x23ac8,
1082		0x23ad0, 0x23ad4,
1083		0x23ae0, 0x23ae8,
1084		0x23af0, 0x23af0,
1085		0x23af8, 0x23c18,
1086		0x23c20, 0x23c20,
1087		0x23c28, 0x23c30,
1088		0x23c38, 0x23c38,
1089		0x23c80, 0x23c98,
1090		0x23ca0, 0x23ca8,
1091		0x23cb0, 0x23cc8,
1092		0x23cd0, 0x23cd4,
1093		0x23ce0, 0x23ce8,
1094		0x23cf0, 0x23cf0,
1095		0x23cf8, 0x23d7c,
1096		0x23e00, 0x23e04,
1097		0x24000, 0x2402c,
1098		0x24100, 0x2413c,
1099		0x24190, 0x241a0,
1100		0x241a8, 0x241b8,
1101		0x241c4, 0x241c8,
1102		0x24200, 0x24318,
1103		0x24400, 0x244b4,
1104		0x244c0, 0x24528,
1105		0x24540, 0x24614,
1106		0x25000, 0x25040,
1107		0x2504c, 0x25060,
1108		0x250c0, 0x250ec,
1109		0x25200, 0x25268,
1110		0x25270, 0x25284,
1111		0x252fc, 0x25388,
1112		0x25400, 0x25404,
1113		0x25500, 0x25500,
1114		0x25510, 0x25518,
1115		0x2552c, 0x25530,
1116		0x2553c, 0x2553c,
1117		0x25550, 0x25554,
1118		0x25600, 0x25600,
1119		0x25608, 0x2561c,
1120		0x25624, 0x25628,
1121		0x25630, 0x25634,
1122		0x2563c, 0x2563c,
1123		0x25700, 0x2571c,
1124		0x25780, 0x2578c,
1125		0x25800, 0x25818,
1126		0x25820, 0x25828,
1127		0x25830, 0x25848,
1128		0x25850, 0x25854,
1129		0x25860, 0x25868,
1130		0x25870, 0x25870,
1131		0x25878, 0x25898,
1132		0x258a0, 0x258a8,
1133		0x258b0, 0x258c8,
1134		0x258d0, 0x258d4,
1135		0x258e0, 0x258e8,
1136		0x258f0, 0x258f0,
1137		0x258f8, 0x25a18,
1138		0x25a20, 0x25a28,
1139		0x25a30, 0x25a48,
1140		0x25a50, 0x25a54,
1141		0x25a60, 0x25a68,
1142		0x25a70, 0x25a70,
1143		0x25a78, 0x25a98,
1144		0x25aa0, 0x25aa8,
1145		0x25ab0, 0x25ac8,
1146		0x25ad0, 0x25ad4,
1147		0x25ae0, 0x25ae8,
1148		0x25af0, 0x25af0,
1149		0x25af8, 0x25c18,
1150		0x25c20, 0x25c20,
1151		0x25c28, 0x25c30,
1152		0x25c38, 0x25c38,
1153		0x25c80, 0x25c98,
1154		0x25ca0, 0x25ca8,
1155		0x25cb0, 0x25cc8,
1156		0x25cd0, 0x25cd4,
1157		0x25ce0, 0x25ce8,
1158		0x25cf0, 0x25cf0,
1159		0x25cf8, 0x25d7c,
1160		0x25e00, 0x25e04,
1161		0x26000, 0x2602c,
1162		0x26100, 0x2613c,
1163		0x26190, 0x261a0,
1164		0x261a8, 0x261b8,
1165		0x261c4, 0x261c8,
1166		0x26200, 0x26318,
1167		0x26400, 0x264b4,
1168		0x264c0, 0x26528,
1169		0x26540, 0x26614,
1170		0x27000, 0x27040,
1171		0x2704c, 0x27060,
1172		0x270c0, 0x270ec,
1173		0x27200, 0x27268,
1174		0x27270, 0x27284,
1175		0x272fc, 0x27388,
1176		0x27400, 0x27404,
1177		0x27500, 0x27500,
1178		0x27510, 0x27518,
1179		0x2752c, 0x27530,
1180		0x2753c, 0x2753c,
1181		0x27550, 0x27554,
1182		0x27600, 0x27600,
1183		0x27608, 0x2761c,
1184		0x27624, 0x27628,
1185		0x27630, 0x27634,
1186		0x2763c, 0x2763c,
1187		0x27700, 0x2771c,
1188		0x27780, 0x2778c,
1189		0x27800, 0x27818,
1190		0x27820, 0x27828,
1191		0x27830, 0x27848,
1192		0x27850, 0x27854,
1193		0x27860, 0x27868,
1194		0x27870, 0x27870,
1195		0x27878, 0x27898,
1196		0x278a0, 0x278a8,
1197		0x278b0, 0x278c8,
1198		0x278d0, 0x278d4,
1199		0x278e0, 0x278e8,
1200		0x278f0, 0x278f0,
1201		0x278f8, 0x27a18,
1202		0x27a20, 0x27a28,
1203		0x27a30, 0x27a48,
1204		0x27a50, 0x27a54,
1205		0x27a60, 0x27a68,
1206		0x27a70, 0x27a70,
1207		0x27a78, 0x27a98,
1208		0x27aa0, 0x27aa8,
1209		0x27ab0, 0x27ac8,
1210		0x27ad0, 0x27ad4,
1211		0x27ae0, 0x27ae8,
1212		0x27af0, 0x27af0,
1213		0x27af8, 0x27c18,
1214		0x27c20, 0x27c20,
1215		0x27c28, 0x27c30,
1216		0x27c38, 0x27c38,
1217		0x27c80, 0x27c98,
1218		0x27ca0, 0x27ca8,
1219		0x27cb0, 0x27cc8,
1220		0x27cd0, 0x27cd4,
1221		0x27ce0, 0x27ce8,
1222		0x27cf0, 0x27cf0,
1223		0x27cf8, 0x27d7c,
1224		0x27e00, 0x27e04,
1225	};
1226
1227	static const unsigned int t4vf_reg_ranges[] = {
1228		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
1229		VF_MPS_REG(A_MPS_VF_CTL),
1230		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
1231		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
1232		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
1233		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
1234		FW_T4VF_MBDATA_BASE_ADDR,
1235		FW_T4VF_MBDATA_BASE_ADDR +
1236		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
1237	};
1238
1239	static const unsigned int t5_reg_ranges[] = {
1240		0x1008, 0x10c0,
1241		0x10cc, 0x10f8,
1242		0x1100, 0x1100,
1243		0x110c, 0x1148,
1244		0x1180, 0x1184,
1245		0x1190, 0x1194,
1246		0x11a0, 0x11a4,
1247		0x11b0, 0x11b4,
1248		0x11fc, 0x123c,
1249		0x1280, 0x173c,
1250		0x1800, 0x18fc,
1251		0x3000, 0x3028,
1252		0x3060, 0x30b0,
1253		0x30b8, 0x30d8,
1254		0x30e0, 0x30fc,
1255		0x3140, 0x357c,
1256		0x35a8, 0x35cc,
1257		0x35ec, 0x35ec,
1258		0x3600, 0x5624,
1259		0x56cc, 0x56ec,
1260		0x56f4, 0x5720,
1261		0x5728, 0x575c,
1262		0x580c, 0x5814,
1263		0x5890, 0x589c,
1264		0x58a4, 0x58ac,
1265		0x58b8, 0x58bc,
1266		0x5940, 0x59c8,
1267		0x59d0, 0x59dc,
1268		0x59fc, 0x5a18,
1269		0x5a60, 0x5a70,
1270		0x5a80, 0x5a9c,
1271		0x5b94, 0x5bfc,
1272		0x6000, 0x6020,
1273		0x6028, 0x6040,
1274		0x6058, 0x609c,
1275		0x60a8, 0x614c,
1276		0x7700, 0x7798,
1277		0x77c0, 0x78fc,
1278		0x7b00, 0x7b58,
1279		0x7b60, 0x7b84,
1280		0x7b8c, 0x7c54,
1281		0x7d00, 0x7d38,
1282		0x7d40, 0x7d80,
1283		0x7d8c, 0x7ddc,
1284		0x7de4, 0x7e04,
1285		0x7e10, 0x7e1c,
1286		0x7e24, 0x7e38,
1287		0x7e40, 0x7e44,
1288		0x7e4c, 0x7e78,
1289		0x7e80, 0x7edc,
1290		0x7ee8, 0x7efc,
1291		0x8dc0, 0x8de0,
1292		0x8df8, 0x8e04,
1293		0x8e10, 0x8e84,
1294		0x8ea0, 0x8f84,
1295		0x8fc0, 0x9058,
1296		0x9060, 0x9060,
1297		0x9068, 0x90f8,
1298		0x9400, 0x9408,
1299		0x9410, 0x9470,
1300		0x9600, 0x9600,
1301		0x9608, 0x9638,
1302		0x9640, 0x96f4,
1303		0x9800, 0x9808,
1304		0x9820, 0x983c,
1305		0x9850, 0x9864,
1306		0x9c00, 0x9c6c,
1307		0x9c80, 0x9cec,
1308		0x9d00, 0x9d6c,
1309		0x9d80, 0x9dec,
1310		0x9e00, 0x9e6c,
1311		0x9e80, 0x9eec,
1312		0x9f00, 0x9f6c,
1313		0x9f80, 0xa020,
1314		0xd004, 0xd004,
1315		0xd010, 0xd03c,
1316		0xdfc0, 0xdfe0,
1317		0xe000, 0x1106c,
1318		0x11074, 0x11088,
1319		0x1109c, 0x1117c,
1320		0x11190, 0x11204,
1321		0x19040, 0x1906c,
1322		0x19078, 0x19080,
1323		0x1908c, 0x190e8,
1324		0x190f0, 0x190f8,
1325		0x19100, 0x19110,
1326		0x19120, 0x19124,
1327		0x19150, 0x19194,
1328		0x1919c, 0x191b0,
1329		0x191d0, 0x191e8,
1330		0x19238, 0x19290,
1331		0x193f8, 0x19428,
1332		0x19430, 0x19444,
1333		0x1944c, 0x1946c,
1334		0x19474, 0x19474,
1335		0x19490, 0x194cc,
1336		0x194f0, 0x194f8,
1337		0x19c00, 0x19c08,
1338		0x19c10, 0x19c60,
1339		0x19c94, 0x19ce4,
1340		0x19cf0, 0x19d40,
1341		0x19d50, 0x19d94,
1342		0x19da0, 0x19de8,
1343		0x19df0, 0x19e10,
1344		0x19e50, 0x19e90,
1345		0x19ea0, 0x19f24,
1346		0x19f34, 0x19f34,
1347		0x19f40, 0x19f50,
1348		0x19f90, 0x19fb4,
1349		0x19fc4, 0x19fe4,
1350		0x1a000, 0x1a004,
1351		0x1a010, 0x1a06c,
1352		0x1a0b0, 0x1a0e4,
1353		0x1a0ec, 0x1a0f8,
1354		0x1a100, 0x1a108,
1355		0x1a114, 0x1a120,
1356		0x1a128, 0x1a130,
1357		0x1a138, 0x1a138,
1358		0x1a190, 0x1a1c4,
1359		0x1a1fc, 0x1a1fc,
1360		0x1e008, 0x1e00c,
1361		0x1e040, 0x1e044,
1362		0x1e04c, 0x1e04c,
1363		0x1e284, 0x1e290,
1364		0x1e2c0, 0x1e2c0,
1365		0x1e2e0, 0x1e2e0,
1366		0x1e300, 0x1e384,
1367		0x1e3c0, 0x1e3c8,
1368		0x1e408, 0x1e40c,
1369		0x1e440, 0x1e444,
1370		0x1e44c, 0x1e44c,
1371		0x1e684, 0x1e690,
1372		0x1e6c0, 0x1e6c0,
1373		0x1e6e0, 0x1e6e0,
1374		0x1e700, 0x1e784,
1375		0x1e7c0, 0x1e7c8,
1376		0x1e808, 0x1e80c,
1377		0x1e840, 0x1e844,
1378		0x1e84c, 0x1e84c,
1379		0x1ea84, 0x1ea90,
1380		0x1eac0, 0x1eac0,
1381		0x1eae0, 0x1eae0,
1382		0x1eb00, 0x1eb84,
1383		0x1ebc0, 0x1ebc8,
1384		0x1ec08, 0x1ec0c,
1385		0x1ec40, 0x1ec44,
1386		0x1ec4c, 0x1ec4c,
1387		0x1ee84, 0x1ee90,
1388		0x1eec0, 0x1eec0,
1389		0x1eee0, 0x1eee0,
1390		0x1ef00, 0x1ef84,
1391		0x1efc0, 0x1efc8,
1392		0x1f008, 0x1f00c,
1393		0x1f040, 0x1f044,
1394		0x1f04c, 0x1f04c,
1395		0x1f284, 0x1f290,
1396		0x1f2c0, 0x1f2c0,
1397		0x1f2e0, 0x1f2e0,
1398		0x1f300, 0x1f384,
1399		0x1f3c0, 0x1f3c8,
1400		0x1f408, 0x1f40c,
1401		0x1f440, 0x1f444,
1402		0x1f44c, 0x1f44c,
1403		0x1f684, 0x1f690,
1404		0x1f6c0, 0x1f6c0,
1405		0x1f6e0, 0x1f6e0,
1406		0x1f700, 0x1f784,
1407		0x1f7c0, 0x1f7c8,
1408		0x1f808, 0x1f80c,
1409		0x1f840, 0x1f844,
1410		0x1f84c, 0x1f84c,
1411		0x1fa84, 0x1fa90,
1412		0x1fac0, 0x1fac0,
1413		0x1fae0, 0x1fae0,
1414		0x1fb00, 0x1fb84,
1415		0x1fbc0, 0x1fbc8,
1416		0x1fc08, 0x1fc0c,
1417		0x1fc40, 0x1fc44,
1418		0x1fc4c, 0x1fc4c,
1419		0x1fe84, 0x1fe90,
1420		0x1fec0, 0x1fec0,
1421		0x1fee0, 0x1fee0,
1422		0x1ff00, 0x1ff84,
1423		0x1ffc0, 0x1ffc8,
1424		0x30000, 0x30030,
1425		0x30100, 0x30144,
1426		0x30190, 0x301a0,
1427		0x301a8, 0x301b8,
1428		0x301c4, 0x301c8,
1429		0x301d0, 0x301d0,
1430		0x30200, 0x30318,
1431		0x30400, 0x304b4,
1432		0x304c0, 0x3052c,
1433		0x30540, 0x3061c,
1434		0x30800, 0x30828,
1435		0x30834, 0x30834,
1436		0x308c0, 0x30908,
1437		0x30910, 0x309ac,
1438		0x30a00, 0x30a14,
1439		0x30a1c, 0x30a2c,
1440		0x30a44, 0x30a50,
1441		0x30a74, 0x30a74,
1442		0x30a7c, 0x30afc,
1443		0x30b08, 0x30c24,
1444		0x30d00, 0x30d00,
1445		0x30d08, 0x30d14,
1446		0x30d1c, 0x30d20,
1447		0x30d3c, 0x30d3c,
1448		0x30d48, 0x30d50,
1449		0x31200, 0x3120c,
1450		0x31220, 0x31220,
1451		0x31240, 0x31240,
1452		0x31600, 0x3160c,
1453		0x31a00, 0x31a1c,
1454		0x31e00, 0x31e20,
1455		0x31e38, 0x31e3c,
1456		0x31e80, 0x31e80,
1457		0x31e88, 0x31ea8,
1458		0x31eb0, 0x31eb4,
1459		0x31ec8, 0x31ed4,
1460		0x31fb8, 0x32004,
1461		0x32200, 0x32200,
1462		0x32208, 0x32240,
1463		0x32248, 0x32280,
1464		0x32288, 0x322c0,
1465		0x322c8, 0x322fc,
1466		0x32600, 0x32630,
1467		0x32a00, 0x32abc,
1468		0x32b00, 0x32b10,
1469		0x32b20, 0x32b30,
1470		0x32b40, 0x32b50,
1471		0x32b60, 0x32b70,
1472		0x33000, 0x33028,
1473		0x33030, 0x33048,
1474		0x33060, 0x33068,
1475		0x33070, 0x3309c,
1476		0x330f0, 0x33128,
1477		0x33130, 0x33148,
1478		0x33160, 0x33168,
1479		0x33170, 0x3319c,
1480		0x331f0, 0x33238,
1481		0x33240, 0x33240,
1482		0x33248, 0x33250,
1483		0x3325c, 0x33264,
1484		0x33270, 0x332b8,
1485		0x332c0, 0x332e4,
1486		0x332f8, 0x33338,
1487		0x33340, 0x33340,
1488		0x33348, 0x33350,
1489		0x3335c, 0x33364,
1490		0x33370, 0x333b8,
1491		0x333c0, 0x333e4,
1492		0x333f8, 0x33428,
1493		0x33430, 0x33448,
1494		0x33460, 0x33468,
1495		0x33470, 0x3349c,
1496		0x334f0, 0x33528,
1497		0x33530, 0x33548,
1498		0x33560, 0x33568,
1499		0x33570, 0x3359c,
1500		0x335f0, 0x33638,
1501		0x33640, 0x33640,
1502		0x33648, 0x33650,
1503		0x3365c, 0x33664,
1504		0x33670, 0x336b8,
1505		0x336c0, 0x336e4,
1506		0x336f8, 0x33738,
1507		0x33740, 0x33740,
1508		0x33748, 0x33750,
1509		0x3375c, 0x33764,
1510		0x33770, 0x337b8,
1511		0x337c0, 0x337e4,
1512		0x337f8, 0x337fc,
1513		0x33814, 0x33814,
1514		0x3382c, 0x3382c,
1515		0x33880, 0x3388c,
1516		0x338e8, 0x338ec,
1517		0x33900, 0x33928,
1518		0x33930, 0x33948,
1519		0x33960, 0x33968,
1520		0x33970, 0x3399c,
1521		0x339f0, 0x33a38,
1522		0x33a40, 0x33a40,
1523		0x33a48, 0x33a50,
1524		0x33a5c, 0x33a64,
1525		0x33a70, 0x33ab8,
1526		0x33ac0, 0x33ae4,
1527		0x33af8, 0x33b10,
1528		0x33b28, 0x33b28,
1529		0x33b3c, 0x33b50,
1530		0x33bf0, 0x33c10,
1531		0x33c28, 0x33c28,
1532		0x33c3c, 0x33c50,
1533		0x33cf0, 0x33cfc,
1534		0x34000, 0x34030,
1535		0x34100, 0x34144,
1536		0x34190, 0x341a0,
1537		0x341a8, 0x341b8,
1538		0x341c4, 0x341c8,
1539		0x341d0, 0x341d0,
1540		0x34200, 0x34318,
1541		0x34400, 0x344b4,
1542		0x344c0, 0x3452c,
1543		0x34540, 0x3461c,
1544		0x34800, 0x34828,
1545		0x34834, 0x34834,
1546		0x348c0, 0x34908,
1547		0x34910, 0x349ac,
1548		0x34a00, 0x34a14,
1549		0x34a1c, 0x34a2c,
1550		0x34a44, 0x34a50,
1551		0x34a74, 0x34a74,
1552		0x34a7c, 0x34afc,
1553		0x34b08, 0x34c24,
1554		0x34d00, 0x34d00,
1555		0x34d08, 0x34d14,
1556		0x34d1c, 0x34d20,
1557		0x34d3c, 0x34d3c,
1558		0x34d48, 0x34d50,
1559		0x35200, 0x3520c,
1560		0x35220, 0x35220,
1561		0x35240, 0x35240,
1562		0x35600, 0x3560c,
1563		0x35a00, 0x35a1c,
1564		0x35e00, 0x35e20,
1565		0x35e38, 0x35e3c,
1566		0x35e80, 0x35e80,
1567		0x35e88, 0x35ea8,
1568		0x35eb0, 0x35eb4,
1569		0x35ec8, 0x35ed4,
1570		0x35fb8, 0x36004,
1571		0x36200, 0x36200,
1572		0x36208, 0x36240,
1573		0x36248, 0x36280,
1574		0x36288, 0x362c0,
1575		0x362c8, 0x362fc,
1576		0x36600, 0x36630,
1577		0x36a00, 0x36abc,
1578		0x36b00, 0x36b10,
1579		0x36b20, 0x36b30,
1580		0x36b40, 0x36b50,
1581		0x36b60, 0x36b70,
1582		0x37000, 0x37028,
1583		0x37030, 0x37048,
1584		0x37060, 0x37068,
1585		0x37070, 0x3709c,
1586		0x370f0, 0x37128,
1587		0x37130, 0x37148,
1588		0x37160, 0x37168,
1589		0x37170, 0x3719c,
1590		0x371f0, 0x37238,
1591		0x37240, 0x37240,
1592		0x37248, 0x37250,
1593		0x3725c, 0x37264,
1594		0x37270, 0x372b8,
1595		0x372c0, 0x372e4,
1596		0x372f8, 0x37338,
1597		0x37340, 0x37340,
1598		0x37348, 0x37350,
1599		0x3735c, 0x37364,
1600		0x37370, 0x373b8,
1601		0x373c0, 0x373e4,
1602		0x373f8, 0x37428,
1603		0x37430, 0x37448,
1604		0x37460, 0x37468,
1605		0x37470, 0x3749c,
1606		0x374f0, 0x37528,
1607		0x37530, 0x37548,
1608		0x37560, 0x37568,
1609		0x37570, 0x3759c,
1610		0x375f0, 0x37638,
1611		0x37640, 0x37640,
1612		0x37648, 0x37650,
1613		0x3765c, 0x37664,
1614		0x37670, 0x376b8,
1615		0x376c0, 0x376e4,
1616		0x376f8, 0x37738,
1617		0x37740, 0x37740,
1618		0x37748, 0x37750,
1619		0x3775c, 0x37764,
1620		0x37770, 0x377b8,
1621		0x377c0, 0x377e4,
1622		0x377f8, 0x377fc,
1623		0x37814, 0x37814,
1624		0x3782c, 0x3782c,
1625		0x37880, 0x3788c,
1626		0x378e8, 0x378ec,
1627		0x37900, 0x37928,
1628		0x37930, 0x37948,
1629		0x37960, 0x37968,
1630		0x37970, 0x3799c,
1631		0x379f0, 0x37a38,
1632		0x37a40, 0x37a40,
1633		0x37a48, 0x37a50,
1634		0x37a5c, 0x37a64,
1635		0x37a70, 0x37ab8,
1636		0x37ac0, 0x37ae4,
1637		0x37af8, 0x37b10,
1638		0x37b28, 0x37b28,
1639		0x37b3c, 0x37b50,
1640		0x37bf0, 0x37c10,
1641		0x37c28, 0x37c28,
1642		0x37c3c, 0x37c50,
1643		0x37cf0, 0x37cfc,
1644		0x38000, 0x38030,
1645		0x38100, 0x38144,
1646		0x38190, 0x381a0,
1647		0x381a8, 0x381b8,
1648		0x381c4, 0x381c8,
1649		0x381d0, 0x381d0,
1650		0x38200, 0x38318,
1651		0x38400, 0x384b4,
1652		0x384c0, 0x3852c,
1653		0x38540, 0x3861c,
1654		0x38800, 0x38828,
1655		0x38834, 0x38834,
1656		0x388c0, 0x38908,
1657		0x38910, 0x389ac,
1658		0x38a00, 0x38a14,
1659		0x38a1c, 0x38a2c,
1660		0x38a44, 0x38a50,
1661		0x38a74, 0x38a74,
1662		0x38a7c, 0x38afc,
1663		0x38b08, 0x38c24,
1664		0x38d00, 0x38d00,
1665		0x38d08, 0x38d14,
1666		0x38d1c, 0x38d20,
1667		0x38d3c, 0x38d3c,
1668		0x38d48, 0x38d50,
1669		0x39200, 0x3920c,
1670		0x39220, 0x39220,
1671		0x39240, 0x39240,
1672		0x39600, 0x3960c,
1673		0x39a00, 0x39a1c,
1674		0x39e00, 0x39e20,
1675		0x39e38, 0x39e3c,
1676		0x39e80, 0x39e80,
1677		0x39e88, 0x39ea8,
1678		0x39eb0, 0x39eb4,
1679		0x39ec8, 0x39ed4,
1680		0x39fb8, 0x3a004,
1681		0x3a200, 0x3a200,
1682		0x3a208, 0x3a240,
1683		0x3a248, 0x3a280,
1684		0x3a288, 0x3a2c0,
1685		0x3a2c8, 0x3a2fc,
1686		0x3a600, 0x3a630,
1687		0x3aa00, 0x3aabc,
1688		0x3ab00, 0x3ab10,
1689		0x3ab20, 0x3ab30,
1690		0x3ab40, 0x3ab50,
1691		0x3ab60, 0x3ab70,
1692		0x3b000, 0x3b028,
1693		0x3b030, 0x3b048,
1694		0x3b060, 0x3b068,
1695		0x3b070, 0x3b09c,
1696		0x3b0f0, 0x3b128,
1697		0x3b130, 0x3b148,
1698		0x3b160, 0x3b168,
1699		0x3b170, 0x3b19c,
1700		0x3b1f0, 0x3b238,
1701		0x3b240, 0x3b240,
1702		0x3b248, 0x3b250,
1703		0x3b25c, 0x3b264,
1704		0x3b270, 0x3b2b8,
1705		0x3b2c0, 0x3b2e4,
1706		0x3b2f8, 0x3b338,
1707		0x3b340, 0x3b340,
1708		0x3b348, 0x3b350,
1709		0x3b35c, 0x3b364,
1710		0x3b370, 0x3b3b8,
1711		0x3b3c0, 0x3b3e4,
1712		0x3b3f8, 0x3b428,
1713		0x3b430, 0x3b448,
1714		0x3b460, 0x3b468,
1715		0x3b470, 0x3b49c,
1716		0x3b4f0, 0x3b528,
1717		0x3b530, 0x3b548,
1718		0x3b560, 0x3b568,
1719		0x3b570, 0x3b59c,
1720		0x3b5f0, 0x3b638,
1721		0x3b640, 0x3b640,
1722		0x3b648, 0x3b650,
1723		0x3b65c, 0x3b664,
1724		0x3b670, 0x3b6b8,
1725		0x3b6c0, 0x3b6e4,
1726		0x3b6f8, 0x3b738,
1727		0x3b740, 0x3b740,
1728		0x3b748, 0x3b750,
1729		0x3b75c, 0x3b764,
1730		0x3b770, 0x3b7b8,
1731		0x3b7c0, 0x3b7e4,
1732		0x3b7f8, 0x3b7fc,
1733		0x3b814, 0x3b814,
1734		0x3b82c, 0x3b82c,
1735		0x3b880, 0x3b88c,
1736		0x3b8e8, 0x3b8ec,
1737		0x3b900, 0x3b928,
1738		0x3b930, 0x3b948,
1739		0x3b960, 0x3b968,
1740		0x3b970, 0x3b99c,
1741		0x3b9f0, 0x3ba38,
1742		0x3ba40, 0x3ba40,
1743		0x3ba48, 0x3ba50,
1744		0x3ba5c, 0x3ba64,
1745		0x3ba70, 0x3bab8,
1746		0x3bac0, 0x3bae4,
1747		0x3baf8, 0x3bb10,
1748		0x3bb28, 0x3bb28,
1749		0x3bb3c, 0x3bb50,
1750		0x3bbf0, 0x3bc10,
1751		0x3bc28, 0x3bc28,
1752		0x3bc3c, 0x3bc50,
1753		0x3bcf0, 0x3bcfc,
1754		0x3c000, 0x3c030,
1755		0x3c100, 0x3c144,
1756		0x3c190, 0x3c1a0,
1757		0x3c1a8, 0x3c1b8,
1758		0x3c1c4, 0x3c1c8,
1759		0x3c1d0, 0x3c1d0,
1760		0x3c200, 0x3c318,
1761		0x3c400, 0x3c4b4,
1762		0x3c4c0, 0x3c52c,
1763		0x3c540, 0x3c61c,
1764		0x3c800, 0x3c828,
1765		0x3c834, 0x3c834,
1766		0x3c8c0, 0x3c908,
1767		0x3c910, 0x3c9ac,
1768		0x3ca00, 0x3ca14,
1769		0x3ca1c, 0x3ca2c,
1770		0x3ca44, 0x3ca50,
1771		0x3ca74, 0x3ca74,
1772		0x3ca7c, 0x3cafc,
1773		0x3cb08, 0x3cc24,
1774		0x3cd00, 0x3cd00,
1775		0x3cd08, 0x3cd14,
1776		0x3cd1c, 0x3cd20,
1777		0x3cd3c, 0x3cd3c,
1778		0x3cd48, 0x3cd50,
1779		0x3d200, 0x3d20c,
1780		0x3d220, 0x3d220,
1781		0x3d240, 0x3d240,
1782		0x3d600, 0x3d60c,
1783		0x3da00, 0x3da1c,
1784		0x3de00, 0x3de20,
1785		0x3de38, 0x3de3c,
1786		0x3de80, 0x3de80,
1787		0x3de88, 0x3dea8,
1788		0x3deb0, 0x3deb4,
1789		0x3dec8, 0x3ded4,
1790		0x3dfb8, 0x3e004,
1791		0x3e200, 0x3e200,
1792		0x3e208, 0x3e240,
1793		0x3e248, 0x3e280,
1794		0x3e288, 0x3e2c0,
1795		0x3e2c8, 0x3e2fc,
1796		0x3e600, 0x3e630,
1797		0x3ea00, 0x3eabc,
1798		0x3eb00, 0x3eb10,
1799		0x3eb20, 0x3eb30,
1800		0x3eb40, 0x3eb50,
1801		0x3eb60, 0x3eb70,
1802		0x3f000, 0x3f028,
1803		0x3f030, 0x3f048,
1804		0x3f060, 0x3f068,
1805		0x3f070, 0x3f09c,
1806		0x3f0f0, 0x3f128,
1807		0x3f130, 0x3f148,
1808		0x3f160, 0x3f168,
1809		0x3f170, 0x3f19c,
1810		0x3f1f0, 0x3f238,
1811		0x3f240, 0x3f240,
1812		0x3f248, 0x3f250,
1813		0x3f25c, 0x3f264,
1814		0x3f270, 0x3f2b8,
1815		0x3f2c0, 0x3f2e4,
1816		0x3f2f8, 0x3f338,
1817		0x3f340, 0x3f340,
1818		0x3f348, 0x3f350,
1819		0x3f35c, 0x3f364,
1820		0x3f370, 0x3f3b8,
1821		0x3f3c0, 0x3f3e4,
1822		0x3f3f8, 0x3f428,
1823		0x3f430, 0x3f448,
1824		0x3f460, 0x3f468,
1825		0x3f470, 0x3f49c,
1826		0x3f4f0, 0x3f528,
1827		0x3f530, 0x3f548,
1828		0x3f560, 0x3f568,
1829		0x3f570, 0x3f59c,
1830		0x3f5f0, 0x3f638,
1831		0x3f640, 0x3f640,
1832		0x3f648, 0x3f650,
1833		0x3f65c, 0x3f664,
1834		0x3f670, 0x3f6b8,
1835		0x3f6c0, 0x3f6e4,
1836		0x3f6f8, 0x3f738,
1837		0x3f740, 0x3f740,
1838		0x3f748, 0x3f750,
1839		0x3f75c, 0x3f764,
1840		0x3f770, 0x3f7b8,
1841		0x3f7c0, 0x3f7e4,
1842		0x3f7f8, 0x3f7fc,
1843		0x3f814, 0x3f814,
1844		0x3f82c, 0x3f82c,
1845		0x3f880, 0x3f88c,
1846		0x3f8e8, 0x3f8ec,
1847		0x3f900, 0x3f928,
1848		0x3f930, 0x3f948,
1849		0x3f960, 0x3f968,
1850		0x3f970, 0x3f99c,
1851		0x3f9f0, 0x3fa38,
1852		0x3fa40, 0x3fa40,
1853		0x3fa48, 0x3fa50,
1854		0x3fa5c, 0x3fa64,
1855		0x3fa70, 0x3fab8,
1856		0x3fac0, 0x3fae4,
1857		0x3faf8, 0x3fb10,
1858		0x3fb28, 0x3fb28,
1859		0x3fb3c, 0x3fb50,
1860		0x3fbf0, 0x3fc10,
1861		0x3fc28, 0x3fc28,
1862		0x3fc3c, 0x3fc50,
1863		0x3fcf0, 0x3fcfc,
1864		0x40000, 0x4000c,
1865		0x40040, 0x40050,
1866		0x40060, 0x40068,
1867		0x4007c, 0x4008c,
1868		0x40094, 0x400b0,
1869		0x400c0, 0x40144,
1870		0x40180, 0x4018c,
1871		0x40200, 0x40254,
1872		0x40260, 0x40264,
1873		0x40270, 0x40288,
1874		0x40290, 0x40298,
1875		0x402ac, 0x402c8,
1876		0x402d0, 0x402e0,
1877		0x402f0, 0x402f0,
1878		0x40300, 0x4033c,
1879		0x403f8, 0x403fc,
1880		0x41304, 0x413c4,
1881		0x41400, 0x4140c,
1882		0x41414, 0x4141c,
1883		0x41480, 0x414d0,
1884		0x44000, 0x44054,
1885		0x4405c, 0x44078,
1886		0x440c0, 0x44174,
1887		0x44180, 0x441ac,
1888		0x441b4, 0x441b8,
1889		0x441c0, 0x44254,
1890		0x4425c, 0x44278,
1891		0x442c0, 0x44374,
1892		0x44380, 0x443ac,
1893		0x443b4, 0x443b8,
1894		0x443c0, 0x44454,
1895		0x4445c, 0x44478,
1896		0x444c0, 0x44574,
1897		0x44580, 0x445ac,
1898		0x445b4, 0x445b8,
1899		0x445c0, 0x44654,
1900		0x4465c, 0x44678,
1901		0x446c0, 0x44774,
1902		0x44780, 0x447ac,
1903		0x447b4, 0x447b8,
1904		0x447c0, 0x44854,
1905		0x4485c, 0x44878,
1906		0x448c0, 0x44974,
1907		0x44980, 0x449ac,
1908		0x449b4, 0x449b8,
1909		0x449c0, 0x449fc,
1910		0x45000, 0x45004,
1911		0x45010, 0x45030,
1912		0x45040, 0x45060,
1913		0x45068, 0x45068,
1914		0x45080, 0x45084,
1915		0x450a0, 0x450b0,
1916		0x45200, 0x45204,
1917		0x45210, 0x45230,
1918		0x45240, 0x45260,
1919		0x45268, 0x45268,
1920		0x45280, 0x45284,
1921		0x452a0, 0x452b0,
1922		0x460c0, 0x460e4,
1923		0x47000, 0x4703c,
1924		0x47044, 0x4708c,
1925		0x47200, 0x47250,
1926		0x47400, 0x47408,
1927		0x47414, 0x47420,
1928		0x47600, 0x47618,
1929		0x47800, 0x47814,
1930		0x48000, 0x4800c,
1931		0x48040, 0x48050,
1932		0x48060, 0x48068,
1933		0x4807c, 0x4808c,
1934		0x48094, 0x480b0,
1935		0x480c0, 0x48144,
1936		0x48180, 0x4818c,
1937		0x48200, 0x48254,
1938		0x48260, 0x48264,
1939		0x48270, 0x48288,
1940		0x48290, 0x48298,
1941		0x482ac, 0x482c8,
1942		0x482d0, 0x482e0,
1943		0x482f0, 0x482f0,
1944		0x48300, 0x4833c,
1945		0x483f8, 0x483fc,
1946		0x49304, 0x493c4,
1947		0x49400, 0x4940c,
1948		0x49414, 0x4941c,
1949		0x49480, 0x494d0,
1950		0x4c000, 0x4c054,
1951		0x4c05c, 0x4c078,
1952		0x4c0c0, 0x4c174,
1953		0x4c180, 0x4c1ac,
1954		0x4c1b4, 0x4c1b8,
1955		0x4c1c0, 0x4c254,
1956		0x4c25c, 0x4c278,
1957		0x4c2c0, 0x4c374,
1958		0x4c380, 0x4c3ac,
1959		0x4c3b4, 0x4c3b8,
1960		0x4c3c0, 0x4c454,
1961		0x4c45c, 0x4c478,
1962		0x4c4c0, 0x4c574,
1963		0x4c580, 0x4c5ac,
1964		0x4c5b4, 0x4c5b8,
1965		0x4c5c0, 0x4c654,
1966		0x4c65c, 0x4c678,
1967		0x4c6c0, 0x4c774,
1968		0x4c780, 0x4c7ac,
1969		0x4c7b4, 0x4c7b8,
1970		0x4c7c0, 0x4c854,
1971		0x4c85c, 0x4c878,
1972		0x4c8c0, 0x4c974,
1973		0x4c980, 0x4c9ac,
1974		0x4c9b4, 0x4c9b8,
1975		0x4c9c0, 0x4c9fc,
1976		0x4d000, 0x4d004,
1977		0x4d010, 0x4d030,
1978		0x4d040, 0x4d060,
1979		0x4d068, 0x4d068,
1980		0x4d080, 0x4d084,
1981		0x4d0a0, 0x4d0b0,
1982		0x4d200, 0x4d204,
1983		0x4d210, 0x4d230,
1984		0x4d240, 0x4d260,
1985		0x4d268, 0x4d268,
1986		0x4d280, 0x4d284,
1987		0x4d2a0, 0x4d2b0,
1988		0x4e0c0, 0x4e0e4,
1989		0x4f000, 0x4f03c,
1990		0x4f044, 0x4f08c,
1991		0x4f200, 0x4f250,
1992		0x4f400, 0x4f408,
1993		0x4f414, 0x4f420,
1994		0x4f600, 0x4f618,
1995		0x4f800, 0x4f814,
1996		0x50000, 0x50084,
1997		0x50090, 0x500cc,
1998		0x50400, 0x50400,
1999		0x50800, 0x50884,
2000		0x50890, 0x508cc,
2001		0x50c00, 0x50c00,
2002		0x51000, 0x5101c,
2003		0x51300, 0x51308,
2004	};
2005
2006	static const unsigned int t5vf_reg_ranges[] = {
2007		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2008		VF_MPS_REG(A_MPS_VF_CTL),
2009		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2010		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2011		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2012		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2013		FW_T4VF_MBDATA_BASE_ADDR,
2014		FW_T4VF_MBDATA_BASE_ADDR +
2015		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2016	};
2017
2018	static const unsigned int t6_reg_ranges[] = {
2019		0x1008, 0x101c,
2020		0x1024, 0x10a8,
2021		0x10b4, 0x10f8,
2022		0x1100, 0x1114,
2023		0x111c, 0x112c,
2024		0x1138, 0x113c,
2025		0x1144, 0x114c,
2026		0x1180, 0x1184,
2027		0x1190, 0x1194,
2028		0x11a0, 0x11a4,
2029		0x11b0, 0x11b4,
2030		0x11fc, 0x1274,
2031		0x1280, 0x133c,
2032		0x1800, 0x18fc,
2033		0x3000, 0x302c,
2034		0x3060, 0x30b0,
2035		0x30b8, 0x30d8,
2036		0x30e0, 0x30fc,
2037		0x3140, 0x357c,
2038		0x35a8, 0x35cc,
2039		0x35ec, 0x35ec,
2040		0x3600, 0x5624,
2041		0x56cc, 0x56ec,
2042		0x56f4, 0x5720,
2043		0x5728, 0x575c,
2044		0x580c, 0x5814,
2045		0x5890, 0x589c,
2046		0x58a4, 0x58ac,
2047		0x58b8, 0x58bc,
2048		0x5940, 0x595c,
2049		0x5980, 0x598c,
2050		0x59b0, 0x59c8,
2051		0x59d0, 0x59dc,
2052		0x59fc, 0x5a18,
2053		0x5a60, 0x5a6c,
2054		0x5a80, 0x5a8c,
2055		0x5a94, 0x5a9c,
2056		0x5b94, 0x5bfc,
2057		0x5c10, 0x5e48,
2058		0x5e50, 0x5e94,
2059		0x5ea0, 0x5eb0,
2060		0x5ec0, 0x5ec0,
2061		0x5ec8, 0x5ed0,
2062		0x5ee0, 0x5ee0,
2063		0x5ef0, 0x5ef0,
2064		0x5f00, 0x5f00,
2065		0x6000, 0x6020,
2066		0x6028, 0x6040,
2067		0x6058, 0x609c,
2068		0x60a8, 0x619c,
2069		0x7700, 0x7798,
2070		0x77c0, 0x7880,
2071		0x78cc, 0x78fc,
2072		0x7b00, 0x7b58,
2073		0x7b60, 0x7b84,
2074		0x7b8c, 0x7c54,
2075		0x7d00, 0x7d38,
2076		0x7d40, 0x7d84,
2077		0x7d8c, 0x7ddc,
2078		0x7de4, 0x7e04,
2079		0x7e10, 0x7e1c,
2080		0x7e24, 0x7e38,
2081		0x7e40, 0x7e44,
2082		0x7e4c, 0x7e78,
2083		0x7e80, 0x7edc,
2084		0x7ee8, 0x7efc,
2085		0x8dc0, 0x8de4,
2086		0x8df8, 0x8e04,
2087		0x8e10, 0x8e84,
2088		0x8ea0, 0x8f88,
2089		0x8fb8, 0x9058,
2090		0x9060, 0x9060,
2091		0x9068, 0x90f8,
2092		0x9100, 0x9124,
2093		0x9400, 0x9470,
2094		0x9600, 0x9600,
2095		0x9608, 0x9638,
2096		0x9640, 0x9704,
2097		0x9710, 0x971c,
2098		0x9800, 0x9808,
2099		0x9820, 0x983c,
2100		0x9850, 0x9864,
2101		0x9c00, 0x9c6c,
2102		0x9c80, 0x9cec,
2103		0x9d00, 0x9d6c,
2104		0x9d80, 0x9dec,
2105		0x9e00, 0x9e6c,
2106		0x9e80, 0x9eec,
2107		0x9f00, 0x9f6c,
2108		0x9f80, 0xa020,
2109		0xd004, 0xd03c,
2110		0xd100, 0xd118,
2111		0xd200, 0xd214,
2112		0xd220, 0xd234,
2113		0xd240, 0xd254,
2114		0xd260, 0xd274,
2115		0xd280, 0xd294,
2116		0xd2a0, 0xd2b4,
2117		0xd2c0, 0xd2d4,
2118		0xd2e0, 0xd2f4,
2119		0xd300, 0xd31c,
2120		0xdfc0, 0xdfe0,
2121		0xe000, 0xf008,
2122		0xf010, 0xf018,
2123		0xf020, 0xf028,
2124		0x11000, 0x11014,
2125		0x11048, 0x1106c,
2126		0x11074, 0x11088,
2127		0x11098, 0x11120,
2128		0x1112c, 0x1117c,
2129		0x11190, 0x112e0,
2130		0x11300, 0x1130c,
2131		0x12000, 0x1206c,
2132		0x19040, 0x1906c,
2133		0x19078, 0x19080,
2134		0x1908c, 0x190e8,
2135		0x190f0, 0x190f8,
2136		0x19100, 0x19110,
2137		0x19120, 0x19124,
2138		0x19150, 0x19194,
2139		0x1919c, 0x191b0,
2140		0x191d0, 0x191e8,
2141		0x19238, 0x19290,
2142		0x192a4, 0x192b0,
2143		0x192bc, 0x192bc,
2144		0x19348, 0x1934c,
2145		0x193f8, 0x19418,
2146		0x19420, 0x19428,
2147		0x19430, 0x19444,
2148		0x1944c, 0x1946c,
2149		0x19474, 0x19474,
2150		0x19490, 0x194cc,
2151		0x194f0, 0x194f8,
2152		0x19c00, 0x19c48,
2153		0x19c50, 0x19c80,
2154		0x19c94, 0x19c98,
2155		0x19ca0, 0x19cbc,
2156		0x19ce4, 0x19ce4,
2157		0x19cf0, 0x19cf8,
2158		0x19d00, 0x19d28,
2159		0x19d50, 0x19d78,
2160		0x19d94, 0x19d98,
2161		0x19da0, 0x19dc8,
2162		0x19df0, 0x19e10,
2163		0x19e50, 0x19e6c,
2164		0x19ea0, 0x19ebc,
2165		0x19ec4, 0x19ef4,
2166		0x19f04, 0x19f2c,
2167		0x19f34, 0x19f34,
2168		0x19f40, 0x19f50,
2169		0x19f90, 0x19fac,
2170		0x19fc4, 0x19fc8,
2171		0x19fd0, 0x19fe4,
2172		0x1a000, 0x1a004,
2173		0x1a010, 0x1a06c,
2174		0x1a0b0, 0x1a0e4,
2175		0x1a0ec, 0x1a0f8,
2176		0x1a100, 0x1a108,
2177		0x1a114, 0x1a120,
2178		0x1a128, 0x1a130,
2179		0x1a138, 0x1a138,
2180		0x1a190, 0x1a1c4,
2181		0x1a1fc, 0x1a1fc,
2182		0x1e008, 0x1e00c,
2183		0x1e040, 0x1e044,
2184		0x1e04c, 0x1e04c,
2185		0x1e284, 0x1e290,
2186		0x1e2c0, 0x1e2c0,
2187		0x1e2e0, 0x1e2e0,
2188		0x1e300, 0x1e384,
2189		0x1e3c0, 0x1e3c8,
2190		0x1e408, 0x1e40c,
2191		0x1e440, 0x1e444,
2192		0x1e44c, 0x1e44c,
2193		0x1e684, 0x1e690,
2194		0x1e6c0, 0x1e6c0,
2195		0x1e6e0, 0x1e6e0,
2196		0x1e700, 0x1e784,
2197		0x1e7c0, 0x1e7c8,
2198		0x1e808, 0x1e80c,
2199		0x1e840, 0x1e844,
2200		0x1e84c, 0x1e84c,
2201		0x1ea84, 0x1ea90,
2202		0x1eac0, 0x1eac0,
2203		0x1eae0, 0x1eae0,
2204		0x1eb00, 0x1eb84,
2205		0x1ebc0, 0x1ebc8,
2206		0x1ec08, 0x1ec0c,
2207		0x1ec40, 0x1ec44,
2208		0x1ec4c, 0x1ec4c,
2209		0x1ee84, 0x1ee90,
2210		0x1eec0, 0x1eec0,
2211		0x1eee0, 0x1eee0,
2212		0x1ef00, 0x1ef84,
2213		0x1efc0, 0x1efc8,
2214		0x1f008, 0x1f00c,
2215		0x1f040, 0x1f044,
2216		0x1f04c, 0x1f04c,
2217		0x1f284, 0x1f290,
2218		0x1f2c0, 0x1f2c0,
2219		0x1f2e0, 0x1f2e0,
2220		0x1f300, 0x1f384,
2221		0x1f3c0, 0x1f3c8,
2222		0x1f408, 0x1f40c,
2223		0x1f440, 0x1f444,
2224		0x1f44c, 0x1f44c,
2225		0x1f684, 0x1f690,
2226		0x1f6c0, 0x1f6c0,
2227		0x1f6e0, 0x1f6e0,
2228		0x1f700, 0x1f784,
2229		0x1f7c0, 0x1f7c8,
2230		0x1f808, 0x1f80c,
2231		0x1f840, 0x1f844,
2232		0x1f84c, 0x1f84c,
2233		0x1fa84, 0x1fa90,
2234		0x1fac0, 0x1fac0,
2235		0x1fae0, 0x1fae0,
2236		0x1fb00, 0x1fb84,
2237		0x1fbc0, 0x1fbc8,
2238		0x1fc08, 0x1fc0c,
2239		0x1fc40, 0x1fc44,
2240		0x1fc4c, 0x1fc4c,
2241		0x1fe84, 0x1fe90,
2242		0x1fec0, 0x1fec0,
2243		0x1fee0, 0x1fee0,
2244		0x1ff00, 0x1ff84,
2245		0x1ffc0, 0x1ffc8,
2246		0x30000, 0x30030,
2247		0x30100, 0x30168,
2248		0x30190, 0x301a0,
2249		0x301a8, 0x301b8,
2250		0x301c4, 0x301c8,
2251		0x301d0, 0x301d0,
2252		0x30200, 0x30320,
2253		0x30400, 0x304b4,
2254		0x304c0, 0x3052c,
2255		0x30540, 0x3061c,
2256		0x30800, 0x308a0,
2257		0x308c0, 0x30908,
2258		0x30910, 0x309b8,
2259		0x30a00, 0x30a04,
2260		0x30a0c, 0x30a14,
2261		0x30a1c, 0x30a2c,
2262		0x30a44, 0x30a50,
2263		0x30a74, 0x30a74,
2264		0x30a7c, 0x30afc,
2265		0x30b08, 0x30c24,
2266		0x30d00, 0x30d14,
2267		0x30d1c, 0x30d3c,
2268		0x30d44, 0x30d4c,
2269		0x30d54, 0x30d74,
2270		0x30d7c, 0x30d7c,
2271		0x30de0, 0x30de0,
2272		0x30e00, 0x30ed4,
2273		0x30f00, 0x30fa4,
2274		0x30fc0, 0x30fc4,
2275		0x31000, 0x31004,
2276		0x31080, 0x310fc,
2277		0x31208, 0x31220,
2278		0x3123c, 0x31254,
2279		0x31300, 0x31300,
2280		0x31308, 0x3131c,
2281		0x31338, 0x3133c,
2282		0x31380, 0x31380,
2283		0x31388, 0x313a8,
2284		0x313b4, 0x313b4,
2285		0x31400, 0x31420,
2286		0x31438, 0x3143c,
2287		0x31480, 0x31480,
2288		0x314a8, 0x314a8,
2289		0x314b0, 0x314b4,
2290		0x314c8, 0x314d4,
2291		0x31a40, 0x31a4c,
2292		0x31af0, 0x31b20,
2293		0x31b38, 0x31b3c,
2294		0x31b80, 0x31b80,
2295		0x31ba8, 0x31ba8,
2296		0x31bb0, 0x31bb4,
2297		0x31bc8, 0x31bd4,
2298		0x32140, 0x3218c,
2299		0x321f0, 0x321f4,
2300		0x32200, 0x32200,
2301		0x32218, 0x32218,
2302		0x32400, 0x32400,
2303		0x32408, 0x3241c,
2304		0x32618, 0x32620,
2305		0x32664, 0x32664,
2306		0x326a8, 0x326a8,
2307		0x326ec, 0x326ec,
2308		0x32a00, 0x32abc,
2309		0x32b00, 0x32b18,
2310		0x32b20, 0x32b38,
2311		0x32b40, 0x32b58,
2312		0x32b60, 0x32b78,
2313		0x32c00, 0x32c00,
2314		0x32c08, 0x32c3c,
2315		0x33000, 0x3302c,
2316		0x33034, 0x33050,
2317		0x33058, 0x33058,
2318		0x33060, 0x3308c,
2319		0x3309c, 0x330ac,
2320		0x330c0, 0x330c0,
2321		0x330c8, 0x330d0,
2322		0x330d8, 0x330e0,
2323		0x330ec, 0x3312c,
2324		0x33134, 0x33150,
2325		0x33158, 0x33158,
2326		0x33160, 0x3318c,
2327		0x3319c, 0x331ac,
2328		0x331c0, 0x331c0,
2329		0x331c8, 0x331d0,
2330		0x331d8, 0x331e0,
2331		0x331ec, 0x33290,
2332		0x33298, 0x332c4,
2333		0x332e4, 0x33390,
2334		0x33398, 0x333c4,
2335		0x333e4, 0x3342c,
2336		0x33434, 0x33450,
2337		0x33458, 0x33458,
2338		0x33460, 0x3348c,
2339		0x3349c, 0x334ac,
2340		0x334c0, 0x334c0,
2341		0x334c8, 0x334d0,
2342		0x334d8, 0x334e0,
2343		0x334ec, 0x3352c,
2344		0x33534, 0x33550,
2345		0x33558, 0x33558,
2346		0x33560, 0x3358c,
2347		0x3359c, 0x335ac,
2348		0x335c0, 0x335c0,
2349		0x335c8, 0x335d0,
2350		0x335d8, 0x335e0,
2351		0x335ec, 0x33690,
2352		0x33698, 0x336c4,
2353		0x336e4, 0x33790,
2354		0x33798, 0x337c4,
2355		0x337e4, 0x337fc,
2356		0x33814, 0x33814,
2357		0x33854, 0x33868,
2358		0x33880, 0x3388c,
2359		0x338c0, 0x338d0,
2360		0x338e8, 0x338ec,
2361		0x33900, 0x3392c,
2362		0x33934, 0x33950,
2363		0x33958, 0x33958,
2364		0x33960, 0x3398c,
2365		0x3399c, 0x339ac,
2366		0x339c0, 0x339c0,
2367		0x339c8, 0x339d0,
2368		0x339d8, 0x339e0,
2369		0x339ec, 0x33a90,
2370		0x33a98, 0x33ac4,
2371		0x33ae4, 0x33b10,
2372		0x33b24, 0x33b28,
2373		0x33b38, 0x33b50,
2374		0x33bf0, 0x33c10,
2375		0x33c24, 0x33c28,
2376		0x33c38, 0x33c50,
2377		0x33cf0, 0x33cfc,
2378		0x34000, 0x34030,
2379		0x34100, 0x34168,
2380		0x34190, 0x341a0,
2381		0x341a8, 0x341b8,
2382		0x341c4, 0x341c8,
2383		0x341d0, 0x341d0,
2384		0x34200, 0x34320,
2385		0x34400, 0x344b4,
2386		0x344c0, 0x3452c,
2387		0x34540, 0x3461c,
2388		0x34800, 0x348a0,
2389		0x348c0, 0x34908,
2390		0x34910, 0x349b8,
2391		0x34a00, 0x34a04,
2392		0x34a0c, 0x34a14,
2393		0x34a1c, 0x34a2c,
2394		0x34a44, 0x34a50,
2395		0x34a74, 0x34a74,
2396		0x34a7c, 0x34afc,
2397		0x34b08, 0x34c24,
2398		0x34d00, 0x34d14,
2399		0x34d1c, 0x34d3c,
2400		0x34d44, 0x34d4c,
2401		0x34d54, 0x34d74,
2402		0x34d7c, 0x34d7c,
2403		0x34de0, 0x34de0,
2404		0x34e00, 0x34ed4,
2405		0x34f00, 0x34fa4,
2406		0x34fc0, 0x34fc4,
2407		0x35000, 0x35004,
2408		0x35080, 0x350fc,
2409		0x35208, 0x35220,
2410		0x3523c, 0x35254,
2411		0x35300, 0x35300,
2412		0x35308, 0x3531c,
2413		0x35338, 0x3533c,
2414		0x35380, 0x35380,
2415		0x35388, 0x353a8,
2416		0x353b4, 0x353b4,
2417		0x35400, 0x35420,
2418		0x35438, 0x3543c,
2419		0x35480, 0x35480,
2420		0x354a8, 0x354a8,
2421		0x354b0, 0x354b4,
2422		0x354c8, 0x354d4,
2423		0x35a40, 0x35a4c,
2424		0x35af0, 0x35b20,
2425		0x35b38, 0x35b3c,
2426		0x35b80, 0x35b80,
2427		0x35ba8, 0x35ba8,
2428		0x35bb0, 0x35bb4,
2429		0x35bc8, 0x35bd4,
2430		0x36140, 0x3618c,
2431		0x361f0, 0x361f4,
2432		0x36200, 0x36200,
2433		0x36218, 0x36218,
2434		0x36400, 0x36400,
2435		0x36408, 0x3641c,
2436		0x36618, 0x36620,
2437		0x36664, 0x36664,
2438		0x366a8, 0x366a8,
2439		0x366ec, 0x366ec,
2440		0x36a00, 0x36abc,
2441		0x36b00, 0x36b18,
2442		0x36b20, 0x36b38,
2443		0x36b40, 0x36b58,
2444		0x36b60, 0x36b78,
2445		0x36c00, 0x36c00,
2446		0x36c08, 0x36c3c,
2447		0x37000, 0x3702c,
2448		0x37034, 0x37050,
2449		0x37058, 0x37058,
2450		0x37060, 0x3708c,
2451		0x3709c, 0x370ac,
2452		0x370c0, 0x370c0,
2453		0x370c8, 0x370d0,
2454		0x370d8, 0x370e0,
2455		0x370ec, 0x3712c,
2456		0x37134, 0x37150,
2457		0x37158, 0x37158,
2458		0x37160, 0x3718c,
2459		0x3719c, 0x371ac,
2460		0x371c0, 0x371c0,
2461		0x371c8, 0x371d0,
2462		0x371d8, 0x371e0,
2463		0x371ec, 0x37290,
2464		0x37298, 0x372c4,
2465		0x372e4, 0x37390,
2466		0x37398, 0x373c4,
2467		0x373e4, 0x3742c,
2468		0x37434, 0x37450,
2469		0x37458, 0x37458,
2470		0x37460, 0x3748c,
2471		0x3749c, 0x374ac,
2472		0x374c0, 0x374c0,
2473		0x374c8, 0x374d0,
2474		0x374d8, 0x374e0,
2475		0x374ec, 0x3752c,
2476		0x37534, 0x37550,
2477		0x37558, 0x37558,
2478		0x37560, 0x3758c,
2479		0x3759c, 0x375ac,
2480		0x375c0, 0x375c0,
2481		0x375c8, 0x375d0,
2482		0x375d8, 0x375e0,
2483		0x375ec, 0x37690,
2484		0x37698, 0x376c4,
2485		0x376e4, 0x37790,
2486		0x37798, 0x377c4,
2487		0x377e4, 0x377fc,
2488		0x37814, 0x37814,
2489		0x37854, 0x37868,
2490		0x37880, 0x3788c,
2491		0x378c0, 0x378d0,
2492		0x378e8, 0x378ec,
2493		0x37900, 0x3792c,
2494		0x37934, 0x37950,
2495		0x37958, 0x37958,
2496		0x37960, 0x3798c,
2497		0x3799c, 0x379ac,
2498		0x379c0, 0x379c0,
2499		0x379c8, 0x379d0,
2500		0x379d8, 0x379e0,
2501		0x379ec, 0x37a90,
2502		0x37a98, 0x37ac4,
2503		0x37ae4, 0x37b10,
2504		0x37b24, 0x37b28,
2505		0x37b38, 0x37b50,
2506		0x37bf0, 0x37c10,
2507		0x37c24, 0x37c28,
2508		0x37c38, 0x37c50,
2509		0x37cf0, 0x37cfc,
2510		0x40040, 0x40040,
2511		0x40080, 0x40084,
2512		0x40100, 0x40100,
2513		0x40140, 0x401bc,
2514		0x40200, 0x40214,
2515		0x40228, 0x40228,
2516		0x40240, 0x40258,
2517		0x40280, 0x40280,
2518		0x40304, 0x40304,
2519		0x40330, 0x4033c,
2520		0x41304, 0x413c8,
2521		0x413d0, 0x413dc,
2522		0x413f0, 0x413f0,
2523		0x41400, 0x4140c,
2524		0x41414, 0x4141c,
2525		0x41480, 0x414d0,
2526		0x44000, 0x4407c,
2527		0x440c0, 0x441ac,
2528		0x441b4, 0x4427c,
2529		0x442c0, 0x443ac,
2530		0x443b4, 0x4447c,
2531		0x444c0, 0x445ac,
2532		0x445b4, 0x4467c,
2533		0x446c0, 0x447ac,
2534		0x447b4, 0x4487c,
2535		0x448c0, 0x449ac,
2536		0x449b4, 0x44a7c,
2537		0x44ac0, 0x44bac,
2538		0x44bb4, 0x44c7c,
2539		0x44cc0, 0x44dac,
2540		0x44db4, 0x44e7c,
2541		0x44ec0, 0x44fac,
2542		0x44fb4, 0x4507c,
2543		0x450c0, 0x451ac,
2544		0x451b4, 0x451fc,
2545		0x45800, 0x45804,
2546		0x45810, 0x45830,
2547		0x45840, 0x45860,
2548		0x45868, 0x45868,
2549		0x45880, 0x45884,
2550		0x458a0, 0x458b0,
2551		0x45a00, 0x45a04,
2552		0x45a10, 0x45a30,
2553		0x45a40, 0x45a60,
2554		0x45a68, 0x45a68,
2555		0x45a80, 0x45a84,
2556		0x45aa0, 0x45ab0,
2557		0x460c0, 0x460e4,
2558		0x47000, 0x4703c,
2559		0x47044, 0x4708c,
2560		0x47200, 0x47250,
2561		0x47400, 0x47408,
2562		0x47414, 0x47420,
2563		0x47600, 0x47618,
2564		0x47800, 0x47814,
2565		0x47820, 0x4782c,
2566		0x50000, 0x50084,
2567		0x50090, 0x500cc,
2568		0x50300, 0x50384,
2569		0x50400, 0x50400,
2570		0x50800, 0x50884,
2571		0x50890, 0x508cc,
2572		0x50b00, 0x50b84,
2573		0x50c00, 0x50c00,
2574		0x51000, 0x51020,
2575		0x51028, 0x510b0,
2576		0x51300, 0x51324,
2577	};
2578
2579	static const unsigned int t6vf_reg_ranges[] = {
2580		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
2581		VF_MPS_REG(A_MPS_VF_CTL),
2582		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
2583		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
2584		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
2585		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
2586		FW_T6VF_MBDATA_BASE_ADDR,
2587		FW_T6VF_MBDATA_BASE_ADDR +
2588		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
2589	};
2590
2591	u32 *buf_end = (u32 *)(buf + buf_size);
2592	const unsigned int *reg_ranges;
2593	int reg_ranges_size, range;
2594	unsigned int chip_version = chip_id(adap);
2595
2596	/*
2597	 * Select the right set of register ranges to dump depending on the
2598	 * adapter chip type.
2599	 */
2600	switch (chip_version) {
2601	case CHELSIO_T4:
2602		if (adap->flags & IS_VF) {
2603			reg_ranges = t4vf_reg_ranges;
2604			reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
2605		} else {
2606			reg_ranges = t4_reg_ranges;
2607			reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2608		}
2609		break;
2610
2611	case CHELSIO_T5:
2612		if (adap->flags & IS_VF) {
2613			reg_ranges = t5vf_reg_ranges;
2614			reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
2615		} else {
2616			reg_ranges = t5_reg_ranges;
2617			reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2618		}
2619		break;
2620
2621	case CHELSIO_T6:
2622		if (adap->flags & IS_VF) {
2623			reg_ranges = t6vf_reg_ranges;
2624			reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
2625		} else {
2626			reg_ranges = t6_reg_ranges;
2627			reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2628		}
2629		break;
2630
2631	default:
2632		CH_ERR(adap,
2633			"Unsupported chip version %d\n", chip_version);
2634		return;
2635	}
2636
2637	/*
2638	 * Clear the register buffer and insert the appropriate register
2639	 * values selected by the above register ranges.
2640	 */
2641	memset(buf, 0, buf_size);
2642	for (range = 0; range < reg_ranges_size; range += 2) {
2643		unsigned int reg = reg_ranges[range];
2644		unsigned int last_reg = reg_ranges[range + 1];
2645		u32 *bufp = (u32 *)(buf + reg);
2646
2647		/*
2648		 * Iterate across the register range filling in the register
2649		 * buffer but don't write past the end of the register buffer.
2650		 */
2651		while (reg <= last_reg && bufp < buf_end) {
2652			*bufp++ = t4_read_reg(adap, reg);
2653			reg += sizeof(u32);
2654		}
2655	}
2656}
2657
2658/*
2659 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
2660 * VPD-R sections.
2661 */
2662struct t4_vpd_hdr {
2663	u8  id_tag;
2664	u8  id_len[2];
2665	u8  id_data[ID_LEN];
2666	u8  vpdr_tag;
2667	u8  vpdr_len[2];
2668};
2669
2670/*
2671 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
2672 */
2673#define EEPROM_DELAY		10		/* 10us per poll spin */
2674#define EEPROM_MAX_POLL		5000		/* x 5000 == 50ms */
2675
2676#define EEPROM_STAT_ADDR	0x7bfc
2677#define VPD_SIZE		0x800
2678#define VPD_BASE		0x400
2679#define VPD_BASE_OLD		0
2680#define VPD_LEN			1024
2681#define VPD_INFO_FLD_HDR_SIZE	3
2682#define CHELSIO_VPD_UNIQUE_ID	0x82
2683
2684/*
2685 * Small utility function to wait till any outstanding VPD Access is complete.
2686 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
2687 * VPD Access in flight.  This allows us to handle the problem of having a
2688 * previous VPD Access time out and prevent an attempt to inject a new VPD
2689 * Request before any in-flight VPD reguest has completed.
2690 */
2691static int t4_seeprom_wait(struct adapter *adapter)
2692{
2693	unsigned int base = adapter->params.pci.vpd_cap_addr;
2694	int max_poll;
2695
2696	/*
2697	 * If no VPD Access is in flight, we can just return success right
2698	 * away.
2699	 */
2700	if (!adapter->vpd_busy)
2701		return 0;
2702
2703	/*
2704	 * Poll the VPD Capability Address/Flag register waiting for it
2705	 * to indicate that the operation is complete.
2706	 */
2707	max_poll = EEPROM_MAX_POLL;
2708	do {
2709		u16 val;
2710
2711		udelay(EEPROM_DELAY);
2712		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2713
2714		/*
2715		 * If the operation is complete, mark the VPD as no longer
2716		 * busy and return success.
2717		 */
2718		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2719			adapter->vpd_busy = 0;
2720			return 0;
2721		}
2722	} while (--max_poll);
2723
2724	/*
2725	 * Failure!  Note that we leave the VPD Busy status set in order to
2726	 * avoid pushing a new VPD Access request into the VPD Capability till
2727	 * the current operation eventually succeeds.  It's a bug to issue a
2728	 * new request when an existing request is in flight and will result
2729	 * in corrupt hardware state.
2730	 */
2731	return -ETIMEDOUT;
2732}
2733
2734/**
2735 *	t4_seeprom_read - read a serial EEPROM location
2736 *	@adapter: adapter to read
2737 *	@addr: EEPROM virtual address
2738 *	@data: where to store the read data
2739 *
2740 *	Read a 32-bit word from a location in serial EEPROM using the card's PCI
2741 *	VPD capability.  Note that this function must be called with a virtual
2742 *	address.
2743 */
2744int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2745{
2746	unsigned int base = adapter->params.pci.vpd_cap_addr;
2747	int ret;
2748
2749	/*
2750	 * VPD Accesses must alway be 4-byte aligned!
2751	 */
2752	if (addr >= EEPROMVSIZE || (addr & 3))
2753		return -EINVAL;
2754
2755	/*
2756	 * Wait for any previous operation which may still be in flight to
2757	 * complete.
2758	 */
2759	ret = t4_seeprom_wait(adapter);
2760	if (ret) {
2761		CH_ERR(adapter, "VPD still busy from previous operation\n");
2762		return ret;
2763	}
2764
2765	/*
2766	 * Issue our new VPD Read request, mark the VPD as being busy and wait
2767	 * for our request to complete.  If it doesn't complete, note the
2768	 * error and return it to our caller.  Note that we do not reset the
2769	 * VPD Busy status!
2770	 */
2771	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2772	adapter->vpd_busy = 1;
2773	adapter->vpd_flag = PCI_VPD_ADDR_F;
2774	ret = t4_seeprom_wait(adapter);
2775	if (ret) {
2776		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
2777		return ret;
2778	}
2779
2780	/*
2781	 * Grab the returned data, swizzle it into our endianess and
2782	 * return success.
2783	 */
2784	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2785	*data = le32_to_cpu(*data);
2786	return 0;
2787}
2788
2789/**
2790 *	t4_seeprom_write - write a serial EEPROM location
2791 *	@adapter: adapter to write
2792 *	@addr: virtual EEPROM address
2793 *	@data: value to write
2794 *
2795 *	Write a 32-bit word to a location in serial EEPROM using the card's PCI
2796 *	VPD capability.  Note that this function must be called with a virtual
2797 *	address.
2798 */
2799int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2800{
2801	unsigned int base = adapter->params.pci.vpd_cap_addr;
2802	int ret;
2803	u32 stats_reg;
2804	int max_poll;
2805
2806	/*
2807	 * VPD Accesses must alway be 4-byte aligned!
2808	 */
2809	if (addr >= EEPROMVSIZE || (addr & 3))
2810		return -EINVAL;
2811
2812	/*
2813	 * Wait for any previous operation which may still be in flight to
2814	 * complete.
2815	 */
2816	ret = t4_seeprom_wait(adapter);
2817	if (ret) {
2818		CH_ERR(adapter, "VPD still busy from previous operation\n");
2819		return ret;
2820	}
2821
2822	/*
2823	 * Issue our new VPD Read request, mark the VPD as being busy and wait
2824	 * for our request to complete.  If it doesn't complete, note the
2825	 * error and return it to our caller.  Note that we do not reset the
2826	 * VPD Busy status!
2827	 */
2828	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2829				 cpu_to_le32(data));
2830	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2831				 (u16)addr | PCI_VPD_ADDR_F);
2832	adapter->vpd_busy = 1;
2833	adapter->vpd_flag = 0;
2834	ret = t4_seeprom_wait(adapter);
2835	if (ret) {
2836		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
2837		return ret;
2838	}
2839
2840	/*
2841	 * Reset PCI_VPD_DATA register after a transaction and wait for our
2842	 * request to complete. If it doesn't complete, return error.
2843	 */
2844	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2845	max_poll = EEPROM_MAX_POLL;
2846	do {
2847		udelay(EEPROM_DELAY);
2848		t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2849	} while ((stats_reg & 0x1) && --max_poll);
2850	if (!max_poll)
2851		return -ETIMEDOUT;
2852
2853	/* Return success! */
2854	return 0;
2855}
2856
2857/**
2858 *	t4_eeprom_ptov - translate a physical EEPROM address to virtual
2859 *	@phys_addr: the physical EEPROM address
2860 *	@fn: the PCI function number
2861 *	@sz: size of function-specific area
2862 *
2863 *	Translate a physical EEPROM address to virtual.  The first 1K is
2864 *	accessed through virtual addresses starting at 31K, the rest is
2865 *	accessed through virtual addresses starting at 0.
2866 *
2867 *	The mapping is as follows:
2868 *	[0..1K) -> [31K..32K)
2869 *	[1K..1K+A) -> [ES-A..ES)
2870 *	[1K+A..ES) -> [0..ES-A-1K)
2871 *
2872 *	where A = @fn * @sz, and ES = EEPROM size.
2873 */
2874int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2875{
2876	fn *= sz;
2877	if (phys_addr < 1024)
2878		return phys_addr + (31 << 10);
2879	if (phys_addr < 1024 + fn)
2880		return EEPROMSIZE - fn + phys_addr - 1024;
2881	if (phys_addr < EEPROMSIZE)
2882		return phys_addr - 1024 - fn;
2883	return -EINVAL;
2884}
2885
2886/**
2887 *	t4_seeprom_wp - enable/disable EEPROM write protection
2888 *	@adapter: the adapter
2889 *	@enable: whether to enable or disable write protection
2890 *
2891 *	Enables or disables write protection on the serial EEPROM.
2892 */
2893int t4_seeprom_wp(struct adapter *adapter, int enable)
2894{
2895	return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2896}
2897
2898/**
2899 *	get_vpd_keyword_val - Locates an information field keyword in the VPD
2900 *	@v: Pointer to buffered vpd data structure
2901 *	@kw: The keyword to search for
2902 *
2903 *	Returns the value of the information field keyword or
2904 *	-ENOENT otherwise.
2905 */
2906static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
2907{
2908	int i;
2909	unsigned int offset , len;
2910	const u8 *buf = (const u8 *)v;
2911	const u8 *vpdr_len = &v->vpdr_len[0];
2912	offset = sizeof(struct t4_vpd_hdr);
2913	len =  (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
2914
2915	if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
2916		return -ENOENT;
2917	}
2918
2919	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
2920		if(memcmp(buf + i , kw , 2) == 0){
2921			i += VPD_INFO_FLD_HDR_SIZE;
2922			return i;
2923		}
2924
2925		i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
2926	}
2927
2928	return -ENOENT;
2929}
2930
2931
2932/**
2933 *	get_vpd_params - read VPD parameters from VPD EEPROM
2934 *	@adapter: adapter to read
2935 *	@p: where to store the parameters
2936 *	@vpd: caller provided temporary space to read the VPD into
2937 *
2938 *	Reads card parameters stored in VPD EEPROM.
2939 */
2940static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
2941    u8 *vpd)
2942{
2943	int i, ret, addr;
2944	int ec, sn, pn, na;
2945	u8 csum;
2946	const struct t4_vpd_hdr *v;
2947
2948	/*
2949	 * Card information normally starts at VPD_BASE but early cards had
2950	 * it at 0.
2951	 */
2952	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
2953	if (ret)
2954		return (ret);
2955
2956	/*
2957	 * The VPD shall have a unique identifier specified by the PCI SIG.
2958	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2959	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2960	 * is expected to automatically put this entry at the
2961	 * beginning of the VPD.
2962	 */
2963	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2964
2965	for (i = 0; i < VPD_LEN; i += 4) {
2966		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
2967		if (ret)
2968			return ret;
2969	}
2970 	v = (const struct t4_vpd_hdr *)vpd;
2971
2972#define FIND_VPD_KW(var,name) do { \
2973	var = get_vpd_keyword_val(v , name); \
2974	if (var < 0) { \
2975		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
2976		return -EINVAL; \
2977	} \
2978} while (0)
2979
2980	FIND_VPD_KW(i, "RV");
2981	for (csum = 0; i >= 0; i--)
2982		csum += vpd[i];
2983
2984	if (csum) {
2985		CH_ERR(adapter,
2986			"corrupted VPD EEPROM, actual csum %u\n", csum);
2987		return -EINVAL;
2988	}
2989
2990	FIND_VPD_KW(ec, "EC");
2991	FIND_VPD_KW(sn, "SN");
2992	FIND_VPD_KW(pn, "PN");
2993	FIND_VPD_KW(na, "NA");
2994#undef FIND_VPD_KW
2995
2996	memcpy(p->id, v->id_data, ID_LEN);
2997	strstrip(p->id);
2998	memcpy(p->ec, vpd + ec, EC_LEN);
2999	strstrip(p->ec);
3000	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
3001	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
3002	strstrip(p->sn);
3003	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
3004	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
3005	strstrip((char *)p->pn);
3006	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
3007	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
3008	strstrip((char *)p->na);
3009
3010	return 0;
3011}
3012
3013/* serial flash and firmware constants and flash config file constants */
3014enum {
3015	SF_ATTEMPTS = 10,	/* max retries for SF operations */
3016
3017	/* flash command opcodes */
3018	SF_PROG_PAGE    = 2,	/* program 256B page */
3019	SF_WR_DISABLE   = 4,	/* disable writes */
3020	SF_RD_STATUS    = 5,	/* read status register */
3021	SF_WR_ENABLE    = 6,	/* enable writes */
3022	SF_RD_DATA_FAST = 0xb,	/* read flash */
3023	SF_RD_ID	= 0x9f,	/* read ID */
3024	SF_ERASE_SECTOR = 0xd8,	/* erase 64KB sector */
3025};
3026
3027/**
3028 *	sf1_read - read data from the serial flash
3029 *	@adapter: the adapter
3030 *	@byte_cnt: number of bytes to read
3031 *	@cont: whether another operation will be chained
3032 *	@lock: whether to lock SF for PL access only
3033 *	@valp: where to store the read data
3034 *
3035 *	Reads up to 4 bytes of data from the serial flash.  The location of
3036 *	the read needs to be specified prior to calling this by issuing the
3037 *	appropriate commands to the serial flash.
3038 */
3039static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
3040		    int lock, u32 *valp)
3041{
3042	int ret;
3043
3044	if (!byte_cnt || byte_cnt > 4)
3045		return -EINVAL;
3046	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3047		return -EBUSY;
3048	t4_write_reg(adapter, A_SF_OP,
3049		     V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
3050	ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3051	if (!ret)
3052		*valp = t4_read_reg(adapter, A_SF_DATA);
3053	return ret;
3054}
3055
3056/**
3057 *	sf1_write - write data to the serial flash
3058 *	@adapter: the adapter
3059 *	@byte_cnt: number of bytes to write
3060 *	@cont: whether another operation will be chained
3061 *	@lock: whether to lock SF for PL access only
3062 *	@val: value to write
3063 *
3064 *	Writes up to 4 bytes of data to the serial flash.  The location of
3065 *	the write needs to be specified prior to calling this by issuing the
3066 *	appropriate commands to the serial flash.
3067 */
3068static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
3069		     int lock, u32 val)
3070{
3071	if (!byte_cnt || byte_cnt > 4)
3072		return -EINVAL;
3073	if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
3074		return -EBUSY;
3075	t4_write_reg(adapter, A_SF_DATA, val);
3076	t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
3077		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
3078	return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
3079}
3080
3081/**
3082 *	flash_wait_op - wait for a flash operation to complete
3083 *	@adapter: the adapter
3084 *	@attempts: max number of polls of the status register
3085 *	@delay: delay between polls in ms
3086 *
3087 *	Wait for a flash operation to complete by polling the status register.
3088 */
3089static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3090{
3091	int ret;
3092	u32 status;
3093
3094	while (1) {
3095		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3096		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3097			return ret;
3098		if (!(status & 1))
3099			return 0;
3100		if (--attempts == 0)
3101			return -EAGAIN;
3102		if (delay)
3103			msleep(delay);
3104	}
3105}
3106
3107/**
3108 *	t4_read_flash - read words from serial flash
3109 *	@adapter: the adapter
3110 *	@addr: the start address for the read
3111 *	@nwords: how many 32-bit words to read
3112 *	@data: where to store the read data
3113 *	@byte_oriented: whether to store data as bytes or as words
3114 *
3115 *	Read the specified number of 32-bit words from the serial flash.
3116 *	If @byte_oriented is set the read data is stored as a byte array
3117 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3118 *	natural endianness.
3119 */
3120int t4_read_flash(struct adapter *adapter, unsigned int addr,
3121		  unsigned int nwords, u32 *data, int byte_oriented)
3122{
3123	int ret;
3124
3125	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3126		return -EINVAL;
3127
3128	addr = swab32(addr) | SF_RD_DATA_FAST;
3129
3130	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3131	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3132		return ret;
3133
3134	for ( ; nwords; nwords--, data++) {
3135		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3136		if (nwords == 1)
3137			t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3138		if (ret)
3139			return ret;
3140		if (byte_oriented)
3141			*data = (__force __u32)(cpu_to_be32(*data));
3142	}
3143	return 0;
3144}
3145
3146/**
3147 *	t4_write_flash - write up to a page of data to the serial flash
3148 *	@adapter: the adapter
3149 *	@addr: the start address to write
3150 *	@n: length of data to write in bytes
3151 *	@data: the data to write
3152 *	@byte_oriented: whether to store data as bytes or as words
3153 *
3154 *	Writes up to a page of data (256 bytes) to the serial flash starting
3155 *	at the given address.  All the data must be written to the same page.
3156 *	If @byte_oriented is set the write data is stored as byte stream
3157 *	(i.e. matches what on disk), otherwise in big-endian.
3158 */
3159int t4_write_flash(struct adapter *adapter, unsigned int addr,
3160			  unsigned int n, const u8 *data, int byte_oriented)
3161{
3162	int ret;
3163	u32 buf[SF_PAGE_SIZE / 4];
3164	unsigned int i, c, left, val, offset = addr & 0xff;
3165
3166	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3167		return -EINVAL;
3168
3169	val = swab32(addr) | SF_PROG_PAGE;
3170
3171	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3172	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3173		goto unlock;
3174
3175	for (left = n; left; left -= c) {
3176		c = min(left, 4U);
3177		for (val = 0, i = 0; i < c; ++i)
3178			val = (val << 8) + *data++;
3179
3180		if (!byte_oriented)
3181			val = cpu_to_be32(val);
3182
3183		ret = sf1_write(adapter, c, c != left, 1, val);
3184		if (ret)
3185			goto unlock;
3186	}
3187	ret = flash_wait_op(adapter, 8, 1);
3188	if (ret)
3189		goto unlock;
3190
3191	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3192
3193	/* Read the page to verify the write succeeded */
3194	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3195			    byte_oriented);
3196	if (ret)
3197		return ret;
3198
3199	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3200		CH_ERR(adapter,
3201			"failed to correctly write the flash page at %#x\n",
3202			addr);
3203		return -EIO;
3204	}
3205	return 0;
3206
3207unlock:
3208	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3209	return ret;
3210}
3211
3212/**
3213 *	t4_get_fw_version - read the firmware version
3214 *	@adapter: the adapter
3215 *	@vers: where to place the version
3216 *
3217 *	Reads the FW version from flash.
3218 */
3219int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3220{
3221	return t4_read_flash(adapter, FLASH_FW_START +
3222			     offsetof(struct fw_hdr, fw_ver), 1,
3223			     vers, 0);
3224}
3225
3226/**
3227 *	t4_get_bs_version - read the firmware bootstrap version
3228 *	@adapter: the adapter
3229 *	@vers: where to place the version
3230 *
3231 *	Reads the FW Bootstrap version from flash.
3232 */
3233int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3234{
3235	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3236			     offsetof(struct fw_hdr, fw_ver), 1,
3237			     vers, 0);
3238}
3239
3240/**
3241 *	t4_get_tp_version - read the TP microcode version
3242 *	@adapter: the adapter
3243 *	@vers: where to place the version
3244 *
3245 *	Reads the TP microcode version from flash.
3246 */
3247int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3248{
3249	return t4_read_flash(adapter, FLASH_FW_START +
3250			     offsetof(struct fw_hdr, tp_microcode_ver),
3251			     1, vers, 0);
3252}
3253
3254/**
3255 *	t4_get_exprom_version - return the Expansion ROM version (if any)
3256 *	@adapter: the adapter
3257 *	@vers: where to place the version
3258 *
3259 *	Reads the Expansion ROM header from FLASH and returns the version
3260 *	number (if present) through the @vers return value pointer.  We return
3261 *	this in the Firmware Version Format since it's convenient.  Return
3262 *	0 on success, -ENOENT if no Expansion ROM is present.
3263 */
3264int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3265{
3266	struct exprom_header {
3267		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3268		unsigned char hdr_ver[4];	/* Expansion ROM version */
3269	} *hdr;
3270	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3271					   sizeof(u32))];
3272	int ret;
3273
3274	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3275			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3276			    0);
3277	if (ret)
3278		return ret;
3279
3280	hdr = (struct exprom_header *)exprom_header_buf;
3281	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3282		return -ENOENT;
3283
3284	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
3285		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
3286		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
3287		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
3288	return 0;
3289}
3290
3291/**
3292 *	t4_get_scfg_version - return the Serial Configuration version
3293 *	@adapter: the adapter
3294 *	@vers: where to place the version
3295 *
3296 *	Reads the Serial Configuration Version via the Firmware interface
3297 *	(thus this can only be called once we're ready to issue Firmware
3298 *	commands).  The format of the Serial Configuration version is
3299 *	adapter specific.  Returns 0 on success, an error on failure.
3300 *
3301 *	Note that early versions of the Firmware didn't include the ability
3302 *	to retrieve the Serial Configuration version, so we zero-out the
3303 *	return-value parameter in that case to avoid leaving it with
3304 *	garbage in it.
3305 *
3306 *	Also note that the Firmware will return its cached copy of the Serial
3307 *	Initialization Revision ID, not the actual Revision ID as written in
3308 *	the Serial EEPROM.  This is only an issue if a new VPD has been written
3309 *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3310 *	it's best to defer calling this routine till after a FW_RESET_CMD has
3311 *	been issued if the Host Driver will be performing a full adapter
3312 *	initialization.
3313 */
3314int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3315{
3316	u32 scfgrev_param;
3317	int ret;
3318
3319	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3320			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
3321	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3322			      1, &scfgrev_param, vers);
3323	if (ret)
3324		*vers = 0;
3325	return ret;
3326}
3327
3328/**
3329 *	t4_get_vpd_version - return the VPD version
3330 *	@adapter: the adapter
3331 *	@vers: where to place the version
3332 *
3333 *	Reads the VPD via the Firmware interface (thus this can only be called
3334 *	once we're ready to issue Firmware commands).  The format of the
3335 *	VPD version is adapter specific.  Returns 0 on success, an error on
3336 *	failure.
3337 *
3338 *	Note that early versions of the Firmware didn't include the ability
3339 *	to retrieve the VPD version, so we zero-out the return-value parameter
3340 *	in that case to avoid leaving it with garbage in it.
3341 *
3342 *	Also note that the Firmware will return its cached copy of the VPD
3343 *	Revision ID, not the actual Revision ID as written in the Serial
3344 *	EEPROM.  This is only an issue if a new VPD has been written and the
3345 *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3346 *	to defer calling this routine till after a FW_RESET_CMD has been issued
3347 *	if the Host Driver will be performing a full adapter initialization.
3348 */
3349int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3350{
3351	u32 vpdrev_param;
3352	int ret;
3353
3354	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3355			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
3356	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3357			      1, &vpdrev_param, vers);
3358	if (ret)
3359		*vers = 0;
3360	return ret;
3361}
3362
3363/**
3364 *	t4_get_version_info - extract various chip/firmware version information
3365 *	@adapter: the adapter
3366 *
3367 *	Reads various chip/firmware version numbers and stores them into the
3368 *	adapter Adapter Parameters structure.  If any of the efforts fails
3369 *	the first failure will be returned, but all of the version numbers
3370 *	will be read.
3371 */
3372int t4_get_version_info(struct adapter *adapter)
3373{
3374	int ret = 0;
3375
3376	#define FIRST_RET(__getvinfo) \
3377	do { \
3378		int __ret = __getvinfo; \
3379		if (__ret && !ret) \
3380			ret = __ret; \
3381	} while (0)
3382
3383	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3384	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3385	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3386	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3387	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3388	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3389
3390	#undef FIRST_RET
3391
3392	return ret;
3393}
3394
3395/**
3396 *	t4_flash_erase_sectors - erase a range of flash sectors
3397 *	@adapter: the adapter
3398 *	@start: the first sector to erase
3399 *	@end: the last sector to erase
3400 *
3401 *	Erases the sectors in the given inclusive range.
3402 */
3403int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3404{
3405	int ret = 0;
3406
3407	if (end >= adapter->params.sf_nsec)
3408		return -EINVAL;
3409
3410	while (start <= end) {
3411		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3412		    (ret = sf1_write(adapter, 4, 0, 1,
3413				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
3414		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3415			CH_ERR(adapter,
3416				"erase of flash sector %d failed, error %d\n",
3417				start, ret);
3418			break;
3419		}
3420		start++;
3421	}
3422	t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
3423	return ret;
3424}
3425
3426/**
3427 *	t4_flash_cfg_addr - return the address of the flash configuration file
3428 *	@adapter: the adapter
3429 *
3430 *	Return the address within the flash where the Firmware Configuration
3431 *	File is stored, or an error if the device FLASH is too small to contain
3432 *	a Firmware Configuration File.
3433 */
3434int t4_flash_cfg_addr(struct adapter *adapter)
3435{
3436	/*
3437	 * If the device FLASH isn't large enough to hold a Firmware
3438	 * Configuration File, return an error.
3439	 */
3440	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3441		return -ENOSPC;
3442
3443	return FLASH_CFG_START;
3444}
3445
3446/*
3447 * Return TRUE if the specified firmware matches the adapter.  I.e. T4
3448 * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
3449 * and emit an error message for mismatched firmware to save our caller the
3450 * effort ...
3451 */
3452static int t4_fw_matches_chip(struct adapter *adap,
3453			      const struct fw_hdr *hdr)
3454{
3455	/*
3456	 * The expression below will return FALSE for any unsupported adapter
3457	 * which will keep us "honest" in the future ...
3458	 */
3459	if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
3460	    (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
3461	    (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
3462		return 1;
3463
3464	CH_ERR(adap,
3465		"FW image (%d) is not suitable for this adapter (%d)\n",
3466		hdr->chip, chip_id(adap));
3467	return 0;
3468}
3469
3470/**
3471 *	t4_load_fw - download firmware
3472 *	@adap: the adapter
3473 *	@fw_data: the firmware image to write
3474 *	@size: image size
3475 *
3476 *	Write the supplied firmware image to the card's serial flash.
3477 */
3478int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3479{
3480	u32 csum;
3481	int ret, addr;
3482	unsigned int i;
3483	u8 first_page[SF_PAGE_SIZE];
3484	const u32 *p = (const u32 *)fw_data;
3485	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3486	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3487	unsigned int fw_start_sec;
3488	unsigned int fw_start;
3489	unsigned int fw_size;
3490
3491	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
3492		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
3493		fw_start = FLASH_FWBOOTSTRAP_START;
3494		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
3495	} else {
3496		fw_start_sec = FLASH_FW_START_SEC;
3497 		fw_start = FLASH_FW_START;
3498		fw_size = FLASH_FW_MAX_SIZE;
3499	}
3500
3501	if (!size) {
3502		CH_ERR(adap, "FW image has no data\n");
3503		return -EINVAL;
3504	}
3505	if (size & 511) {
3506		CH_ERR(adap,
3507			"FW image size not multiple of 512 bytes\n");
3508		return -EINVAL;
3509	}
3510	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
3511		CH_ERR(adap,
3512			"FW image size differs from size in FW header\n");
3513		return -EINVAL;
3514	}
3515	if (size > fw_size) {
3516		CH_ERR(adap, "FW image too large, max is %u bytes\n",
3517			fw_size);
3518		return -EFBIG;
3519	}
3520	if (!t4_fw_matches_chip(adap, hdr))
3521		return -EINVAL;
3522
3523	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3524		csum += be32_to_cpu(p[i]);
3525
3526	if (csum != 0xffffffff) {
3527		CH_ERR(adap,
3528			"corrupted firmware image, checksum %#x\n", csum);
3529		return -EINVAL;
3530	}
3531
3532	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
3533	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3534	if (ret)
3535		goto out;
3536
3537	/*
3538	 * We write the correct version at the end so the driver can see a bad
3539	 * version if the FW write fails.  Start by writing a copy of the
3540	 * first page with a bad version.
3541	 */
3542	memcpy(first_page, fw_data, SF_PAGE_SIZE);
3543	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3544	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
3545	if (ret)
3546		goto out;
3547
3548	addr = fw_start;
3549	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3550		addr += SF_PAGE_SIZE;
3551		fw_data += SF_PAGE_SIZE;
3552		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, 1);
3553		if (ret)
3554			goto out;
3555	}
3556
3557	ret = t4_write_flash(adap,
3558			     fw_start + offsetof(struct fw_hdr, fw_ver),
3559			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
3560out:
3561	if (ret)
3562		CH_ERR(adap, "firmware download failed, error %d\n",
3563			ret);
3564	return ret;
3565}
3566
3567/**
3568 *	t4_fwcache - firmware cache operation
3569 *	@adap: the adapter
3570 *	@op  : the operation (flush or flush and invalidate)
3571 */
3572int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3573{
3574	struct fw_params_cmd c;
3575
3576	memset(&c, 0, sizeof(c));
3577	c.op_to_vfn =
3578	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3579			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3580				V_FW_PARAMS_CMD_PFN(adap->pf) |
3581				V_FW_PARAMS_CMD_VFN(0));
3582	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3583	c.param[0].mnem =
3584	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3585			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
3586	c.param[0].val = (__force __be32)op;
3587
3588	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3589}
3590
3591void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3592			unsigned int *pif_req_wrptr,
3593			unsigned int *pif_rsp_wrptr)
3594{
3595	int i, j;
3596	u32 cfg, val, req, rsp;
3597
3598	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3599	if (cfg & F_LADBGEN)
3600		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3601
3602	val = t4_read_reg(adap, A_CIM_DEBUGSTS);
3603	req = G_POLADBGWRPTR(val);
3604	rsp = G_PILADBGWRPTR(val);
3605	if (pif_req_wrptr)
3606		*pif_req_wrptr = req;
3607	if (pif_rsp_wrptr)
3608		*pif_rsp_wrptr = rsp;
3609
3610	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3611		for (j = 0; j < 6; j++) {
3612			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(req) |
3613				     V_PILADBGRDPTR(rsp));
3614			*pif_req++ = t4_read_reg(adap, A_CIM_PO_LA_DEBUGDATA);
3615			*pif_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_DEBUGDATA);
3616			req++;
3617			rsp++;
3618		}
3619		req = (req + 2) & M_POLADBGRDPTR;
3620		rsp = (rsp + 2) & M_PILADBGRDPTR;
3621	}
3622	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3623}
3624
3625void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3626{
3627	u32 cfg;
3628	int i, j, idx;
3629
3630	cfg = t4_read_reg(adap, A_CIM_DEBUGCFG);
3631	if (cfg & F_LADBGEN)
3632		t4_write_reg(adap, A_CIM_DEBUGCFG, cfg ^ F_LADBGEN);
3633
3634	for (i = 0; i < CIM_MALA_SIZE; i++) {
3635		for (j = 0; j < 5; j++) {
3636			idx = 8 * i + j;
3637			t4_write_reg(adap, A_CIM_DEBUGCFG, V_POLADBGRDPTR(idx) |
3638				     V_PILADBGRDPTR(idx));
3639			*ma_req++ = t4_read_reg(adap, A_CIM_PO_LA_MADEBUGDATA);
3640			*ma_rsp++ = t4_read_reg(adap, A_CIM_PI_LA_MADEBUGDATA);
3641		}
3642	}
3643	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
3644}
3645
3646void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3647{
3648	unsigned int i, j;
3649
3650	for (i = 0; i < 8; i++) {
3651		u32 *p = la_buf + i;
3652
3653		t4_write_reg(adap, A_ULP_RX_LA_CTL, i);
3654		j = t4_read_reg(adap, A_ULP_RX_LA_WRPTR);
3655		t4_write_reg(adap, A_ULP_RX_LA_RDPTR, j);
3656		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3657			*p = t4_read_reg(adap, A_ULP_RX_LA_RDDATA);
3658	}
3659}
3660
3661/**
3662 *	t4_link_l1cfg - apply link configuration to MAC/PHY
3663 *	@phy: the PHY to setup
3664 *	@mac: the MAC to setup
3665 *	@lc: the requested link configuration
3666 *
3667 *	Set up a port's MAC and PHY according to a desired link configuration.
3668 *	- If the PHY can auto-negotiate first decide what to advertise, then
3669 *	  enable/disable auto-negotiation as desired, and reset.
3670 *	- If the PHY does not auto-negotiate just reset it.
3671 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3672 *	  otherwise do it later based on the outcome of auto-negotiation.
3673 */
3674int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
3675		  struct link_config *lc)
3676{
3677	struct fw_port_cmd c;
3678	unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
3679	unsigned int aneg, fc, fec, speed, rcap;
3680
3681	fc = 0;
3682	if (lc->requested_fc & PAUSE_RX)
3683		fc |= FW_PORT_CAP_FC_RX;
3684	if (lc->requested_fc & PAUSE_TX)
3685		fc |= FW_PORT_CAP_FC_TX;
3686
3687	fec = 0;
3688	if (lc->requested_fec & FEC_RS)
3689		fec = FW_PORT_CAP_FEC_RS;
3690	else if (lc->requested_fec & FEC_BASER_RS)
3691		fec = FW_PORT_CAP_FEC_BASER_RS;
3692
3693	if (!(lc->supported & FW_PORT_CAP_ANEG) ||
3694	    lc->requested_aneg == AUTONEG_DISABLE) {
3695		aneg = 0;
3696		switch (lc->requested_speed) {
3697		case 100000:
3698			speed = FW_PORT_CAP_SPEED_100G;
3699			break;
3700		case 40000:
3701			speed = FW_PORT_CAP_SPEED_40G;
3702			break;
3703		case 25000:
3704			speed = FW_PORT_CAP_SPEED_25G;
3705			break;
3706		case 10000:
3707			speed = FW_PORT_CAP_SPEED_10G;
3708			break;
3709		case 1000:
3710			speed = FW_PORT_CAP_SPEED_1G;
3711			break;
3712		case 100:
3713			speed = FW_PORT_CAP_SPEED_100M;
3714			break;
3715		default:
3716			return -EINVAL;
3717			break;
3718		}
3719	} else {
3720		aneg = FW_PORT_CAP_ANEG;
3721		speed = lc->supported &
3722		    V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED);
3723	}
3724
3725	rcap = aneg | speed | fc | fec | mdi;
3726	if ((rcap | lc->supported) != lc->supported) {
3727#ifdef INVARIANTS
3728		CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
3729		    lc->supported);
3730#endif
3731		rcap &= lc->supported;
3732	}
3733
3734	memset(&c, 0, sizeof(c));
3735	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3736				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3737				     V_FW_PORT_CMD_PORTID(port));
3738	c.action_to_len16 =
3739		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3740			    FW_LEN16(c));
3741	c.u.l1cfg.rcap = cpu_to_be32(rcap);
3742
3743	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3744}
3745
3746/**
3747 *	t4_restart_aneg - restart autonegotiation
3748 *	@adap: the adapter
3749 *	@mbox: mbox to use for the FW command
3750 *	@port: the port id
3751 *
3752 *	Restarts autonegotiation for the selected port.
3753 */
3754int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3755{
3756	struct fw_port_cmd c;
3757
3758	memset(&c, 0, sizeof(c));
3759	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3760				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3761				     V_FW_PORT_CMD_PORTID(port));
3762	c.action_to_len16 =
3763		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
3764			    FW_LEN16(c));
3765	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
3766	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3767}
3768
3769typedef void (*int_handler_t)(struct adapter *adap);
3770
3771struct intr_info {
3772	unsigned int mask;	/* bits to check in interrupt status */
3773	const char *msg;	/* message to print or NULL */
3774	short stat_idx;		/* stat counter to increment or -1 */
3775	unsigned short fatal;	/* whether the condition reported is fatal */
3776	int_handler_t int_handler;	/* platform-specific int handler */
3777};
3778
3779/**
3780 *	t4_handle_intr_status - table driven interrupt handler
3781 *	@adapter: the adapter that generated the interrupt
3782 *	@reg: the interrupt status register to process
3783 *	@acts: table of interrupt actions
3784 *
3785 *	A table driven interrupt handler that applies a set of masks to an
3786 *	interrupt status word and performs the corresponding actions if the
3787 *	interrupts described by the mask have occurred.  The actions include
3788 *	optionally emitting a warning or alert message.  The table is terminated
3789 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
3790 *	conditions.
3791 */
3792static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3793				 const struct intr_info *acts)
3794{
3795	int fatal = 0;
3796	unsigned int mask = 0;
3797	unsigned int status = t4_read_reg(adapter, reg);
3798
3799	for ( ; acts->mask; ++acts) {
3800		if (!(status & acts->mask))
3801			continue;
3802		if (acts->fatal) {
3803			fatal++;
3804			CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
3805				  status & acts->mask);
3806		} else if (acts->msg)
3807			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
3808				 status & acts->mask);
3809		if (acts->int_handler)
3810			acts->int_handler(adapter);
3811		mask |= acts->mask;
3812	}
3813	status &= mask;
3814	if (status)	/* clear processed interrupts */
3815		t4_write_reg(adapter, reg, status);
3816	return fatal;
3817}
3818
3819/*
3820 * Interrupt handler for the PCIE module.
3821 */
3822static void pcie_intr_handler(struct adapter *adapter)
3823{
3824	static const struct intr_info sysbus_intr_info[] = {
3825		{ F_RNPP, "RXNP array parity error", -1, 1 },
3826		{ F_RPCP, "RXPC array parity error", -1, 1 },
3827		{ F_RCIP, "RXCIF array parity error", -1, 1 },
3828		{ F_RCCP, "Rx completions control array parity error", -1, 1 },
3829		{ F_RFTP, "RXFT array parity error", -1, 1 },
3830		{ 0 }
3831	};
3832	static const struct intr_info pcie_port_intr_info[] = {
3833		{ F_TPCP, "TXPC array parity error", -1, 1 },
3834		{ F_TNPP, "TXNP array parity error", -1, 1 },
3835		{ F_TFTP, "TXFT array parity error", -1, 1 },
3836		{ F_TCAP, "TXCA array parity error", -1, 1 },
3837		{ F_TCIP, "TXCIF array parity error", -1, 1 },
3838		{ F_RCAP, "RXCA array parity error", -1, 1 },
3839		{ F_OTDD, "outbound request TLP discarded", -1, 1 },
3840		{ F_RDPE, "Rx data parity error", -1, 1 },
3841		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
3842		{ 0 }
3843	};
3844	static const struct intr_info pcie_intr_info[] = {
3845		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3846		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3847		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
3848		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3849		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3850		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3851		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3852		{ F_PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3853		{ F_PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3854		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3855		{ F_CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3856		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3857		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3858		{ F_DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3859		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3860		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3861		{ F_HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3862		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3863		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3864		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3865		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
3866		{ F_INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3867		{ F_MATAGPERR, "PCI MA tag parity error", -1, 1 },
3868		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3869		{ F_RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3870		{ F_RXWRPERR, "PCI Rx write parity error", -1, 1 },
3871		{ F_RPLPERR, "PCI replay buffer parity error", -1, 1 },
3872		{ F_PCIESINT, "PCI core secondary fault", -1, 1 },
3873		{ F_PCIEPINT, "PCI core primary fault", -1, 1 },
3874		{ F_UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3875		  0 },
3876		{ 0 }
3877	};
3878
3879	static const struct intr_info t5_pcie_intr_info[] = {
3880		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
3881		  -1, 1 },
3882		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
3883		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
3884		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3885		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3886		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3887		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3888		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
3889		  -1, 1 },
3890		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
3891		  -1, 1 },
3892		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3893		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
3894		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3895		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3896		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
3897		  -1, 1 },
3898		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3899		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3900		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
3901		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3902		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3903		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3904		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
3905		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
3906		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
3907		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3908		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
3909		  -1, 1 },
3910		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
3911		  -1, 1 },
3912		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
3913		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
3914		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3915		{ F_READRSPERR, "Outbound read error", -1,
3916		  0 },
3917		{ 0 }
3918	};
3919
3920	int fat;
3921
3922	if (is_t4(adapter))
3923		fat = t4_handle_intr_status(adapter,
3924				A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3925				sysbus_intr_info) +
3926			t4_handle_intr_status(adapter,
3927					A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3928					pcie_port_intr_info) +
3929			t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3930					      pcie_intr_info);
3931	else
3932		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
3933					    t5_pcie_intr_info);
3934	if (fat)
3935		t4_fatal_err(adapter);
3936}
3937
3938/*
3939 * TP interrupt handler.
3940 */
3941static void tp_intr_handler(struct adapter *adapter)
3942{
3943	static const struct intr_info tp_intr_info[] = {
3944		{ 0x3fffffff, "TP parity error", -1, 1 },
3945		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
3946		{ 0 }
3947	};
3948
3949	if (t4_handle_intr_status(adapter, A_TP_INT_CAUSE, tp_intr_info))
3950		t4_fatal_err(adapter);
3951}
3952
3953/*
3954 * SGE interrupt handler.
3955 */
3956static void sge_intr_handler(struct adapter *adapter)
3957{
3958	u64 v;
3959	u32 err;
3960
3961	static const struct intr_info sge_intr_info[] = {
3962		{ F_ERR_CPL_EXCEED_IQE_SIZE,
3963		  "SGE received CPL exceeding IQE size", -1, 1 },
3964		{ F_ERR_INVALID_CIDX_INC,
3965		  "SGE GTS CIDX increment too large", -1, 0 },
3966		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
3967		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
3968		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
3969		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
3970		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
3971		  0 },
3972		{ F_ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
3973		  0 },
3974		{ F_ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
3975		  0 },
3976		{ F_ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
3977		  0 },
3978		{ F_ERR_ING_CTXT_PRIO,
3979		  "SGE too many priority ingress contexts", -1, 0 },
3980		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
3981		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
3982		{ F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 |
3983		  F_ERR_PCIE_ERROR2 | F_ERR_PCIE_ERROR3,
3984		  "SGE PCIe error for a DBP thread", -1, 0 },
3985		{ 0 }
3986	};
3987
3988	static const struct intr_info t4t5_sge_intr_info[] = {
3989		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
3990		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
3991		{ F_ERR_EGR_CTXT_PRIO,
3992		  "SGE too many priority egress contexts", -1, 0 },
3993		{ 0 }
3994	};
3995
3996	/*
3997 	* For now, treat below interrupts as fatal so that we disable SGE and
3998 	* get better debug */
3999	static const struct intr_info t6_sge_intr_info[] = {
4000		{ F_FATAL_WRE_LEN,
4001		  "SGE Actual WRE packet is less than advertized length",
4002		  -1, 1 },
4003		{ 0 }
4004	};
4005
4006	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
4007		((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
4008	if (v) {
4009		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
4010				(unsigned long long)v);
4011		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
4012		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
4013	}
4014
4015	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
4016	if (chip_id(adapter) <= CHELSIO_T5)
4017		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4018					   t4t5_sge_intr_info);
4019	else
4020		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
4021					   t6_sge_intr_info);
4022
4023	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
4024	if (err & F_ERROR_QID_VALID) {
4025		CH_ERR(adapter, "SGE error for queue %u\n", G_ERROR_QID(err));
4026		if (err & F_UNCAPTURED_ERROR)
4027			CH_ERR(adapter, "SGE UNCAPTURED_ERROR set (clearing)\n");
4028		t4_write_reg(adapter, A_SGE_ERROR_STATS, F_ERROR_QID_VALID |
4029			     F_UNCAPTURED_ERROR);
4030	}
4031
4032	if (v != 0)
4033		t4_fatal_err(adapter);
4034}
4035
4036#define CIM_OBQ_INTR (F_OBQULP0PARERR | F_OBQULP1PARERR | F_OBQULP2PARERR |\
4037		      F_OBQULP3PARERR | F_OBQSGEPARERR | F_OBQNCSIPARERR)
4038#define CIM_IBQ_INTR (F_IBQTP0PARERR | F_IBQTP1PARERR | F_IBQULPPARERR |\
4039		      F_IBQSGEHIPARERR | F_IBQSGELOPARERR | F_IBQNCSIPARERR)
4040
4041/*
4042 * CIM interrupt handler.
4043 */
4044static void cim_intr_handler(struct adapter *adapter)
4045{
4046	static const struct intr_info cim_intr_info[] = {
4047		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
4048		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4049		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4050		{ F_MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
4051		{ F_MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
4052		{ F_TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
4053		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
4054		{ F_TIMER0INT, "CIM TIMER0 interrupt", -1, 1 },
4055		{ 0 }
4056	};
4057	static const struct intr_info cim_upintr_info[] = {
4058		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
4059		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
4060		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
4061		{ F_ILLRDINT, "CIM illegal read", -1, 1 },
4062		{ F_ILLRDBEINT, "CIM illegal read BE", -1, 1 },
4063		{ F_ILLWRBEINT, "CIM illegal write BE", -1, 1 },
4064		{ F_SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
4065		{ F_SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
4066		{ F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
4067		{ F_SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
4068		{ F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
4069		{ F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
4070		{ F_SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
4071		{ F_SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
4072		{ F_BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
4073		{ F_BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
4074		{ F_SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
4075		{ F_SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
4076		{ F_BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
4077		{ F_BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
4078		{ F_SGLRDPLINT , "CIM single read from PL space", -1, 1 },
4079		{ F_SGLWRPLINT , "CIM single write to PL space", -1, 1 },
4080		{ F_BLKRDPLINT , "CIM block read from PL space", -1, 1 },
4081		{ F_BLKWRPLINT , "CIM block write to PL space", -1, 1 },
4082		{ F_REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
4083		{ F_RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
4084		{ F_TIMEOUTINT , "CIM PIF timeout", -1, 1 },
4085		{ F_TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
4086		{ 0 }
4087	};
4088	u32 val, fw_err;
4089	int fat;
4090
4091	fw_err = t4_read_reg(adapter, A_PCIE_FW);
4092	if (fw_err & F_PCIE_FW_ERR)
4093		t4_report_fw_error(adapter);
4094
4095	/* When the Firmware detects an internal error which normally wouldn't
4096	 * raise a Host Interrupt, it forces a CIM Timer0 interrupt in order
4097	 * to make sure the Host sees the Firmware Crash.  So if we have a
4098	 * Timer0 interrupt and don't see a Firmware Crash, ignore the Timer0
4099	 * interrupt.
4100	 */
4101	val = t4_read_reg(adapter, A_CIM_HOST_INT_CAUSE);
4102	if (val & F_TIMER0INT)
4103		if (!(fw_err & F_PCIE_FW_ERR) ||
4104		    (G_PCIE_FW_EVAL(fw_err) != PCIE_FW_EVAL_CRASH))
4105			t4_write_reg(adapter, A_CIM_HOST_INT_CAUSE,
4106				     F_TIMER0INT);
4107
4108	fat = t4_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE,
4109				    cim_intr_info) +
4110	      t4_handle_intr_status(adapter, A_CIM_HOST_UPACC_INT_CAUSE,
4111				    cim_upintr_info);
4112	if (fat)
4113		t4_fatal_err(adapter);
4114}
4115
4116/*
4117 * ULP RX interrupt handler.
4118 */
4119static void ulprx_intr_handler(struct adapter *adapter)
4120{
4121	static const struct intr_info ulprx_intr_info[] = {
4122		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
4123		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
4124		{ 0x7fffff, "ULPRX parity error", -1, 1 },
4125		{ 0 }
4126	};
4127
4128	if (t4_handle_intr_status(adapter, A_ULP_RX_INT_CAUSE, ulprx_intr_info))
4129		t4_fatal_err(adapter);
4130}
4131
4132/*
4133 * ULP TX interrupt handler.
4134 */
4135static void ulptx_intr_handler(struct adapter *adapter)
4136{
4137	static const struct intr_info ulptx_intr_info[] = {
4138		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
4139		  0 },
4140		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
4141		  0 },
4142		{ F_PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
4143		  0 },
4144		{ F_PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
4145		  0 },
4146		{ 0xfffffff, "ULPTX parity error", -1, 1 },
4147		{ 0 }
4148	};
4149
4150	if (t4_handle_intr_status(adapter, A_ULP_TX_INT_CAUSE, ulptx_intr_info))
4151		t4_fatal_err(adapter);
4152}
4153
4154/*
4155 * PM TX interrupt handler.
4156 */
4157static void pmtx_intr_handler(struct adapter *adapter)
4158{
4159	static const struct intr_info pmtx_intr_info[] = {
4160		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
4161		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
4162		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
4163		{ F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
4164		{ 0xffffff0, "PMTX framing error", -1, 1 },
4165		{ F_OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
4166		{ F_DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
4167		  1 },
4168		{ F_ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
4169		{ F_C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
4170		{ 0 }
4171	};
4172
4173	if (t4_handle_intr_status(adapter, A_PM_TX_INT_CAUSE, pmtx_intr_info))
4174		t4_fatal_err(adapter);
4175}
4176
4177/*
4178 * PM RX interrupt handler.
4179 */
4180static void pmrx_intr_handler(struct adapter *adapter)
4181{
4182	static const struct intr_info pmrx_intr_info[] = {
4183		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
4184		{ 0x3ffff0, "PMRX framing error", -1, 1 },
4185		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
4186		{ F_DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
4187		  1 },
4188		{ F_IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
4189		{ F_E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
4190		{ 0 }
4191	};
4192
4193	if (t4_handle_intr_status(adapter, A_PM_RX_INT_CAUSE, pmrx_intr_info))
4194		t4_fatal_err(adapter);
4195}
4196
4197/*
4198 * CPL switch interrupt handler.
4199 */
4200static void cplsw_intr_handler(struct adapter *adapter)
4201{
4202	static const struct intr_info cplsw_intr_info[] = {
4203		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
4204		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
4205		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
4206		{ F_SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
4207		{ F_CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
4208		{ F_ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
4209		{ 0 }
4210	};
4211
4212	if (t4_handle_intr_status(adapter, A_CPL_INTR_CAUSE, cplsw_intr_info))
4213		t4_fatal_err(adapter);
4214}
4215
4216/*
4217 * LE interrupt handler.
4218 */
4219static void le_intr_handler(struct adapter *adap)
4220{
4221	unsigned int chip_ver = chip_id(adap);
4222	static const struct intr_info le_intr_info[] = {
4223		{ F_LIPMISS, "LE LIP miss", -1, 0 },
4224		{ F_LIP0, "LE 0 LIP error", -1, 0 },
4225		{ F_PARITYERR, "LE parity error", -1, 1 },
4226		{ F_UNKNOWNCMD, "LE unknown command", -1, 1 },
4227		{ F_REQQPARERR, "LE request queue parity error", -1, 1 },
4228		{ 0 }
4229	};
4230
4231	static const struct intr_info t6_le_intr_info[] = {
4232		{ F_T6_LIPMISS, "LE LIP miss", -1, 0 },
4233		{ F_T6_LIP0, "LE 0 LIP error", -1, 0 },
4234		{ F_TCAMINTPERR, "LE parity error", -1, 1 },
4235		{ F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
4236		{ F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
4237		{ 0 }
4238	};
4239
4240	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
4241				  (chip_ver <= CHELSIO_T5) ?
4242				  le_intr_info : t6_le_intr_info))
4243		t4_fatal_err(adap);
4244}
4245
4246/*
4247 * MPS interrupt handler.
4248 */
4249static void mps_intr_handler(struct adapter *adapter)
4250{
4251	static const struct intr_info mps_rx_intr_info[] = {
4252		{ 0xffffff, "MPS Rx parity error", -1, 1 },
4253		{ 0 }
4254	};
4255	static const struct intr_info mps_tx_intr_info[] = {
4256		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
4257		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4258		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
4259		  -1, 1 },
4260		{ V_TXDESCFIFO(M_TXDESCFIFO), "MPS Tx desc FIFO parity error",
4261		  -1, 1 },
4262		{ F_BUBBLE, "MPS Tx underflow", -1, 1 },
4263		{ F_SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
4264		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
4265		{ 0 }
4266	};
4267	static const struct intr_info mps_trc_intr_info[] = {
4268		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
4269		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
4270		  1 },
4271		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
4272		{ 0 }
4273	};
4274	static const struct intr_info mps_stat_sram_intr_info[] = {
4275		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4276		{ 0 }
4277	};
4278	static const struct intr_info mps_stat_tx_intr_info[] = {
4279		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4280		{ 0 }
4281	};
4282	static const struct intr_info mps_stat_rx_intr_info[] = {
4283		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4284		{ 0 }
4285	};
4286	static const struct intr_info mps_cls_intr_info[] = {
4287		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
4288		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
4289		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
4290		{ 0 }
4291	};
4292
4293	int fat;
4294
4295	fat = t4_handle_intr_status(adapter, A_MPS_RX_PERR_INT_CAUSE,
4296				    mps_rx_intr_info) +
4297	      t4_handle_intr_status(adapter, A_MPS_TX_INT_CAUSE,
4298				    mps_tx_intr_info) +
4299	      t4_handle_intr_status(adapter, A_MPS_TRC_INT_CAUSE,
4300				    mps_trc_intr_info) +
4301	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_SRAM,
4302				    mps_stat_sram_intr_info) +
4303	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
4304				    mps_stat_tx_intr_info) +
4305	      t4_handle_intr_status(adapter, A_MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
4306				    mps_stat_rx_intr_info) +
4307	      t4_handle_intr_status(adapter, A_MPS_CLS_INT_CAUSE,
4308				    mps_cls_intr_info);
4309
4310	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
4311	t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
4312	if (fat)
4313		t4_fatal_err(adapter);
4314}
4315
4316#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
4317		      F_ECC_UE_INT_CAUSE)
4318
4319/*
4320 * EDC/MC interrupt handler.
4321 */
4322static void mem_intr_handler(struct adapter *adapter, int idx)
4323{
4324	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4325
4326	unsigned int addr, cnt_addr, v;
4327
4328	if (idx <= MEM_EDC1) {
4329		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
4330		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
4331	} else if (idx == MEM_MC) {
4332		if (is_t4(adapter)) {
4333			addr = A_MC_INT_CAUSE;
4334			cnt_addr = A_MC_ECC_STATUS;
4335		} else {
4336			addr = A_MC_P_INT_CAUSE;
4337			cnt_addr = A_MC_P_ECC_STATUS;
4338		}
4339	} else {
4340		addr = MC_REG(A_MC_P_INT_CAUSE, 1);
4341		cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
4342	}
4343
4344	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4345	if (v & F_PERR_INT_CAUSE)
4346		CH_ALERT(adapter, "%s FIFO parity error\n",
4347			  name[idx]);
4348	if (v & F_ECC_CE_INT_CAUSE) {
4349		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
4350
4351		if (idx <= MEM_EDC1)
4352			t4_edc_err_read(adapter, idx);
4353
4354		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
4355		CH_WARN_RATELIMIT(adapter,
4356				  "%u %s correctable ECC data error%s\n",
4357				  cnt, name[idx], cnt > 1 ? "s" : "");
4358	}
4359	if (v & F_ECC_UE_INT_CAUSE)
4360		CH_ALERT(adapter,
4361			 "%s uncorrectable ECC data error\n", name[idx]);
4362
4363	t4_write_reg(adapter, addr, v);
4364	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
4365		t4_fatal_err(adapter);
4366}
4367
4368/*
4369 * MA interrupt handler.
4370 */
4371static void ma_intr_handler(struct adapter *adapter)
4372{
4373	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
4374
4375	if (status & F_MEM_PERR_INT_CAUSE) {
4376		CH_ALERT(adapter,
4377			  "MA parity error, parity status %#x\n",
4378			  t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
4379		if (is_t5(adapter))
4380			CH_ALERT(adapter,
4381				  "MA parity error, parity status %#x\n",
4382				  t4_read_reg(adapter,
4383					      A_MA_PARITY_ERROR_STATUS2));
4384	}
4385	if (status & F_MEM_WRAP_INT_CAUSE) {
4386		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
4387		CH_ALERT(adapter, "MA address wrap-around error by "
4388			  "client %u to address %#x\n",
4389			  G_MEM_WRAP_CLIENT_NUM(v),
4390			  G_MEM_WRAP_ADDRESS(v) << 4);
4391	}
4392	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
4393	t4_fatal_err(adapter);
4394}
4395
4396/*
4397 * SMB interrupt handler.
4398 */
4399static void smb_intr_handler(struct adapter *adap)
4400{
4401	static const struct intr_info smb_intr_info[] = {
4402		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
4403		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
4404		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
4405		{ 0 }
4406	};
4407
4408	if (t4_handle_intr_status(adap, A_SMB_INT_CAUSE, smb_intr_info))
4409		t4_fatal_err(adap);
4410}
4411
4412/*
4413 * NC-SI interrupt handler.
4414 */
4415static void ncsi_intr_handler(struct adapter *adap)
4416{
4417	static const struct intr_info ncsi_intr_info[] = {
4418		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
4419		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
4420		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
4421		{ F_RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
4422		{ 0 }
4423	};
4424
4425	if (t4_handle_intr_status(adap, A_NCSI_INT_CAUSE, ncsi_intr_info))
4426		t4_fatal_err(adap);
4427}
4428
4429/*
4430 * XGMAC interrupt handler.
4431 */
4432static void xgmac_intr_handler(struct adapter *adap, int port)
4433{
4434	u32 v, int_cause_reg;
4435
4436	if (is_t4(adap))
4437		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
4438	else
4439		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
4440
4441	v = t4_read_reg(adap, int_cause_reg);
4442
4443	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
4444	if (!v)
4445		return;
4446
4447	if (v & F_TXFIFO_PRTY_ERR)
4448		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
4449			  port);
4450	if (v & F_RXFIFO_PRTY_ERR)
4451		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
4452			  port);
4453	t4_write_reg(adap, int_cause_reg, v);
4454	t4_fatal_err(adap);
4455}
4456
4457/*
4458 * PL interrupt handler.
4459 */
4460static void pl_intr_handler(struct adapter *adap)
4461{
4462	static const struct intr_info pl_intr_info[] = {
4463		{ F_FATALPERR, "Fatal parity error", -1, 1 },
4464		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
4465		{ 0 }
4466	};
4467
4468	static const struct intr_info t5_pl_intr_info[] = {
4469		{ F_FATALPERR, "Fatal parity error", -1, 1 },
4470		{ 0 }
4471	};
4472
4473	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
4474				  is_t4(adap) ?
4475				  pl_intr_info : t5_pl_intr_info))
4476		t4_fatal_err(adap);
4477}
4478
4479#define PF_INTR_MASK (F_PFSW | F_PFCIM)
4480
4481/**
4482 *	t4_slow_intr_handler - control path interrupt handler
4483 *	@adapter: the adapter
4484 *
4485 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
4486 *	The designation 'slow' is because it involves register reads, while
4487 *	data interrupts typically don't involve any MMIOs.
4488 */
4489int t4_slow_intr_handler(struct adapter *adapter)
4490{
4491	u32 cause = t4_read_reg(adapter, A_PL_INT_CAUSE);
4492
4493	if (!(cause & GLBL_INTR_MASK))
4494		return 0;
4495	if (cause & F_CIM)
4496		cim_intr_handler(adapter);
4497	if (cause & F_MPS)
4498		mps_intr_handler(adapter);
4499	if (cause & F_NCSI)
4500		ncsi_intr_handler(adapter);
4501	if (cause & F_PL)
4502		pl_intr_handler(adapter);
4503	if (cause & F_SMB)
4504		smb_intr_handler(adapter);
4505	if (cause & F_MAC0)
4506		xgmac_intr_handler(adapter, 0);
4507	if (cause & F_MAC1)
4508		xgmac_intr_handler(adapter, 1);
4509	if (cause & F_MAC2)
4510		xgmac_intr_handler(adapter, 2);
4511	if (cause & F_MAC3)
4512		xgmac_intr_handler(adapter, 3);
4513	if (cause & F_PCIE)
4514		pcie_intr_handler(adapter);
4515	if (cause & F_MC0)
4516		mem_intr_handler(adapter, MEM_MC);
4517	if (is_t5(adapter) && (cause & F_MC1))
4518		mem_intr_handler(adapter, MEM_MC1);
4519	if (cause & F_EDC0)
4520		mem_intr_handler(adapter, MEM_EDC0);
4521	if (cause & F_EDC1)
4522		mem_intr_handler(adapter, MEM_EDC1);
4523	if (cause & F_LE)
4524		le_intr_handler(adapter);
4525	if (cause & F_TP)
4526		tp_intr_handler(adapter);
4527	if (cause & F_MA)
4528		ma_intr_handler(adapter);
4529	if (cause & F_PM_TX)
4530		pmtx_intr_handler(adapter);
4531	if (cause & F_PM_RX)
4532		pmrx_intr_handler(adapter);
4533	if (cause & F_ULP_RX)
4534		ulprx_intr_handler(adapter);
4535	if (cause & F_CPL_SWITCH)
4536		cplsw_intr_handler(adapter);
4537	if (cause & F_SGE)
4538		sge_intr_handler(adapter);
4539	if (cause & F_ULP_TX)
4540		ulptx_intr_handler(adapter);
4541
4542	/* Clear the interrupts just processed for which we are the master. */
4543	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
4544	(void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
4545	return 1;
4546}
4547
4548/**
4549 *	t4_intr_enable - enable interrupts
4550 *	@adapter: the adapter whose interrupts should be enabled
4551 *
4552 *	Enable PF-specific interrupts for the calling function and the top-level
4553 *	interrupt concentrator for global interrupts.  Interrupts are already
4554 *	enabled at each module,	here we just enable the roots of the interrupt
4555 *	hierarchies.
4556 *
4557 *	Note: this function should be called only when the driver manages
4558 *	non PF-specific interrupts from the various HW modules.  Only one PCI
4559 *	function at a time should be doing this.
4560 */
4561void t4_intr_enable(struct adapter *adapter)
4562{
4563	u32 val = 0;
4564	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4565	u32 pf = (chip_id(adapter) <= CHELSIO_T5
4566		  ? G_SOURCEPF(whoami)
4567		  : G_T6_SOURCEPF(whoami));
4568
4569	if (chip_id(adapter) <= CHELSIO_T5)
4570		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
4571	else
4572		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
4573	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
4574		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
4575		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
4576		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
4577		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
4578		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
4579		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
4580	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
4581	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
4582}
4583
4584/**
4585 *	t4_intr_disable - disable interrupts
4586 *	@adapter: the adapter whose interrupts should be disabled
4587 *
4588 *	Disable interrupts.  We only disable the top-level interrupt
4589 *	concentrators.  The caller must be a PCI function managing global
4590 *	interrupts.
4591 */
4592void t4_intr_disable(struct adapter *adapter)
4593{
4594	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4595	u32 pf = (chip_id(adapter) <= CHELSIO_T5
4596		  ? G_SOURCEPF(whoami)
4597		  : G_T6_SOURCEPF(whoami));
4598
4599	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
4600	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
4601}
4602
4603/**
4604 *	t4_intr_clear - clear all interrupts
4605 *	@adapter: the adapter whose interrupts should be cleared
4606 *
4607 *	Clears all interrupts.  The caller must be a PCI function managing
4608 *	global interrupts.
4609 */
4610void t4_intr_clear(struct adapter *adapter)
4611{
4612	static const unsigned int cause_reg[] = {
4613		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
4614		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
4615		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
4616		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
4617		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
4618		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
4619		A_TP_INT_CAUSE,
4620		A_ULP_RX_INT_CAUSE, A_ULP_TX_INT_CAUSE,
4621		A_PM_RX_INT_CAUSE, A_PM_TX_INT_CAUSE,
4622		A_MPS_RX_PERR_INT_CAUSE,
4623		A_CPL_INTR_CAUSE,
4624		MYPF_REG(A_PL_PF_INT_CAUSE),
4625		A_PL_PL_INT_CAUSE,
4626		A_LE_DB_INT_CAUSE,
4627	};
4628
4629	unsigned int i;
4630
4631	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
4632		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
4633
4634	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
4635				A_MC_P_INT_CAUSE, 0xffffffff);
4636
4637	if (is_t4(adapter)) {
4638		t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
4639				0xffffffff);
4640		t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
4641				0xffffffff);
4642	} else
4643		t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
4644
4645	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
4646	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
4647}
4648
4649/**
4650 *	hash_mac_addr - return the hash value of a MAC address
4651 *	@addr: the 48-bit Ethernet MAC address
4652 *
4653 *	Hashes a MAC address according to the hash function used by HW inexact
4654 *	(hash) address matching.
4655 */
4656static int hash_mac_addr(const u8 *addr)
4657{
4658	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
4659	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
4660	a ^= b;
4661	a ^= (a >> 12);
4662	a ^= (a >> 6);
4663	return a & 0x3f;
4664}
4665
4666/**
4667 *	t4_config_rss_range - configure a portion of the RSS mapping table
4668 *	@adapter: the adapter
4669 *	@mbox: mbox to use for the FW command
4670 *	@viid: virtual interface whose RSS subtable is to be written
4671 *	@start: start entry in the table to write
4672 *	@n: how many table entries to write
4673 *	@rspq: values for the "response queue" (Ingress Queue) lookup table
4674 *	@nrspq: number of values in @rspq
4675 *
4676 *	Programs the selected part of the VI's RSS mapping table with the
4677 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
4678 *	until the full table range is populated.
4679 *
4680 *	The caller must ensure the values in @rspq are in the range allowed for
4681 *	@viid.
4682 */
4683int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4684			int start, int n, const u16 *rspq, unsigned int nrspq)
4685{
4686	int ret;
4687	const u16 *rsp = rspq;
4688	const u16 *rsp_end = rspq + nrspq;
4689	struct fw_rss_ind_tbl_cmd cmd;
4690
4691	memset(&cmd, 0, sizeof(cmd));
4692	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
4693				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4694				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
4695	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4696
4697	/*
4698	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
4699	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
4700	 * a 32-bit word as 10-bit values with the upper remaining 2 bits
4701	 * reserved.
4702	 */
4703	while (n > 0) {
4704		int nq = min(n, 32);
4705		int nq_packed = 0;
4706		__be32 *qp = &cmd.iq0_to_iq2;
4707
4708		/*
4709		 * Set up the firmware RSS command header to send the next
4710		 * "nq" Ingress Queue IDs to the firmware.
4711		 */
4712		cmd.niqid = cpu_to_be16(nq);
4713		cmd.startidx = cpu_to_be16(start);
4714
4715		/*
4716		 * "nq" more done for the start of the next loop.
4717		 */
4718		start += nq;
4719		n -= nq;
4720
4721		/*
4722		 * While there are still Ingress Queue IDs to stuff into the
4723		 * current firmware RSS command, retrieve them from the
4724		 * Ingress Queue ID array and insert them into the command.
4725		 */
4726		while (nq > 0) {
4727			/*
4728			 * Grab up to the next 3 Ingress Queue IDs (wrapping
4729			 * around the Ingress Queue ID array if necessary) and
4730			 * insert them into the firmware RSS command at the
4731			 * current 3-tuple position within the commad.
4732			 */
4733			u16 qbuf[3];
4734			u16 *qbp = qbuf;
4735			int nqbuf = min(3, nq);
4736
4737			nq -= nqbuf;
4738			qbuf[0] = qbuf[1] = qbuf[2] = 0;
4739			while (nqbuf && nq_packed < 32) {
4740				nqbuf--;
4741				nq_packed++;
4742				*qbp++ = *rsp++;
4743				if (rsp >= rsp_end)
4744					rsp = rspq;
4745			}
4746			*qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
4747					    V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
4748					    V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
4749		}
4750
4751		/*
4752		 * Send this portion of the RRS table update to the firmware;
4753		 * bail out on any errors.
4754		 */
4755		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4756		if (ret)
4757			return ret;
4758	}
4759	return 0;
4760}
4761
4762/**
4763 *	t4_config_glbl_rss - configure the global RSS mode
4764 *	@adapter: the adapter
4765 *	@mbox: mbox to use for the FW command
4766 *	@mode: global RSS mode
4767 *	@flags: mode-specific flags
4768 *
4769 *	Sets the global RSS mode.
4770 */
4771int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4772		       unsigned int flags)
4773{
4774	struct fw_rss_glb_config_cmd c;
4775
4776	memset(&c, 0, sizeof(c));
4777	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
4778				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
4779	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4780	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
4781		c.u.manual.mode_pkd =
4782			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4783	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4784		c.u.basicvirtual.mode_keymode =
4785			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
4786		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
4787	} else
4788		return -EINVAL;
4789	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4790}
4791
4792/**
4793 *	t4_config_vi_rss - configure per VI RSS settings
4794 *	@adapter: the adapter
4795 *	@mbox: mbox to use for the FW command
4796 *	@viid: the VI id
4797 *	@flags: RSS flags
4798 *	@defq: id of the default RSS queue for the VI.
4799 *	@skeyidx: RSS secret key table index for non-global mode
4800 *	@skey: RSS vf_scramble key for VI.
4801 *
4802 *	Configures VI-specific RSS properties.
4803 */
4804int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4805		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
4806		     unsigned int skey)
4807{
4808	struct fw_rss_vi_config_cmd c;
4809
4810	memset(&c, 0, sizeof(c));
4811	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4812				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4813				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
4814	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4815	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4816					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
4817	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
4818					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
4819	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
4820
4821	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4822}
4823
4824/* Read an RSS table row */
4825static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4826{
4827	t4_write_reg(adap, A_TP_RSS_LKP_TABLE, 0xfff00000 | row);
4828	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
4829				   5, 0, val);
4830}
4831
4832/**
4833 *	t4_read_rss - read the contents of the RSS mapping table
4834 *	@adapter: the adapter
4835 *	@map: holds the contents of the RSS mapping table
4836 *
4837 *	Reads the contents of the RSS hash->queue mapping table.
4838 */
4839int t4_read_rss(struct adapter *adapter, u16 *map)
4840{
4841	u32 val;
4842	int i, ret;
4843
4844	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4845		ret = rd_rss_row(adapter, i, &val);
4846		if (ret)
4847			return ret;
4848		*map++ = G_LKPTBLQUEUE0(val);
4849		*map++ = G_LKPTBLQUEUE1(val);
4850	}
4851	return 0;
4852}
4853
4854/**
4855 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
4856 * @adap: the adapter
4857 * @cmd: TP fw ldst address space type
4858 * @vals: where the indirect register values are stored/written
4859 * @nregs: how many indirect registers to read/write
4860 * @start_idx: index of first indirect register to read/write
4861 * @rw: Read (1) or Write (0)
4862 * @sleep_ok: if true we may sleep while awaiting command completion
4863 *
4864 * Access TP indirect registers through LDST
4865 **/
4866static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
4867			    unsigned int nregs, unsigned int start_index,
4868			    unsigned int rw, bool sleep_ok)
4869{
4870	int ret = 0;
4871	unsigned int i;
4872	struct fw_ldst_cmd c;
4873
4874	for (i = 0; i < nregs; i++) {
4875		memset(&c, 0, sizeof(c));
4876		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
4877						F_FW_CMD_REQUEST |
4878						(rw ? F_FW_CMD_READ :
4879						      F_FW_CMD_WRITE) |
4880						V_FW_LDST_CMD_ADDRSPACE(cmd));
4881		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4882
4883		c.u.addrval.addr = cpu_to_be32(start_index + i);
4884		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
4885		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
4886				      sleep_ok);
4887		if (ret)
4888			return ret;
4889
4890		if (rw)
4891			vals[i] = be32_to_cpu(c.u.addrval.val);
4892	}
4893	return 0;
4894}
4895
4896/**
4897 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
4898 * @adap: the adapter
4899 * @reg_addr: Address Register
4900 * @reg_data: Data register
4901 * @buff: where the indirect register values are stored/written
4902 * @nregs: how many indirect registers to read/write
4903 * @start_index: index of first indirect register to read/write
4904 * @rw: READ(1) or WRITE(0)
4905 * @sleep_ok: if true we may sleep while awaiting command completion
4906 *
4907 * Read/Write TP indirect registers through LDST if possible.
4908 * Else, use backdoor access
4909 **/
4910static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
4911			      u32 *buff, u32 nregs, u32 start_index, int rw,
4912			      bool sleep_ok)
4913{
4914	int rc = -EINVAL;
4915	int cmd;
4916
4917	switch (reg_addr) {
4918	case A_TP_PIO_ADDR:
4919		cmd = FW_LDST_ADDRSPC_TP_PIO;
4920		break;
4921	case A_TP_TM_PIO_ADDR:
4922		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
4923		break;
4924	case A_TP_MIB_INDEX:
4925		cmd = FW_LDST_ADDRSPC_TP_MIB;
4926		break;
4927	default:
4928		goto indirect_access;
4929	}
4930
4931	if (t4_use_ldst(adap))
4932		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
4933				      sleep_ok);
4934
4935indirect_access:
4936
4937	if (rc) {
4938		if (rw)
4939			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
4940					 start_index);
4941		else
4942			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
4943					  start_index);
4944	}
4945}
4946
4947/**
4948 * t4_tp_pio_read - Read TP PIO registers
4949 * @adap: the adapter
4950 * @buff: where the indirect register values are written
4951 * @nregs: how many indirect registers to read
4952 * @start_index: index of first indirect register to read
4953 * @sleep_ok: if true we may sleep while awaiting command completion
4954 *
4955 * Read TP PIO Registers
4956 **/
4957void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
4958		    u32 start_index, bool sleep_ok)
4959{
4960	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, buff, nregs,
4961			  start_index, 1, sleep_ok);
4962}
4963
4964/**
4965 * t4_tp_pio_write - Write TP PIO registers
4966 * @adap: the adapter
4967 * @buff: where the indirect register values are stored
4968 * @nregs: how many indirect registers to write
4969 * @start_index: index of first indirect register to write
4970 * @sleep_ok: if true we may sleep while awaiting command completion
4971 *
4972 * Write TP PIO Registers
4973 **/
4974void t4_tp_pio_write(struct adapter *adap, const u32 *buff, u32 nregs,
4975		     u32 start_index, bool sleep_ok)
4976{
4977	t4_tp_indirect_rw(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4978	    __DECONST(u32 *, buff), nregs, start_index, 0, sleep_ok);
4979}
4980
4981/**
4982 * t4_tp_tm_pio_read - Read TP TM PIO registers
4983 * @adap: the adapter
4984 * @buff: where the indirect register values are written
4985 * @nregs: how many indirect registers to read
4986 * @start_index: index of first indirect register to read
4987 * @sleep_ok: if true we may sleep while awaiting command completion
4988 *
4989 * Read TP TM PIO Registers
4990 **/
4991void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
4992		       u32 start_index, bool sleep_ok)
4993{
4994	t4_tp_indirect_rw(adap, A_TP_TM_PIO_ADDR, A_TP_TM_PIO_DATA, buff,
4995			  nregs, start_index, 1, sleep_ok);
4996}
4997
4998/**
4999 * t4_tp_mib_read - Read TP MIB registers
5000 * @adap: the adapter
5001 * @buff: where the indirect register values are written
5002 * @nregs: how many indirect registers to read
5003 * @start_index: index of first indirect register to read
5004 * @sleep_ok: if true we may sleep while awaiting command completion
5005 *
5006 * Read TP MIB Registers
5007 **/
5008void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5009		    bool sleep_ok)
5010{
5011	t4_tp_indirect_rw(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, buff, nregs,
5012			  start_index, 1, sleep_ok);
5013}
5014
5015/**
5016 *	t4_read_rss_key - read the global RSS key
5017 *	@adap: the adapter
5018 *	@key: 10-entry array holding the 320-bit RSS key
5019 * 	@sleep_ok: if true we may sleep while awaiting command completion
5020 *
5021 *	Reads the global 320-bit RSS key.
5022 */
5023void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5024{
5025	t4_tp_pio_read(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5026}
5027
5028/**
5029 *	t4_write_rss_key - program one of the RSS keys
5030 *	@adap: the adapter
5031 *	@key: 10-entry array holding the 320-bit RSS key
5032 *	@idx: which RSS key to write
5033 * 	@sleep_ok: if true we may sleep while awaiting command completion
5034 *
5035 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
5036 *	0..15 the corresponding entry in the RSS key table is written,
5037 *	otherwise the global RSS key is written.
5038 */
5039void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5040		      bool sleep_ok)
5041{
5042	u8 rss_key_addr_cnt = 16;
5043	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
5044
5045	/*
5046	 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5047	 * allows access to key addresses 16-63 by using KeyWrAddrX
5048	 * as index[5:4](upper 2) into key table
5049	 */
5050	if ((chip_id(adap) > CHELSIO_T5) &&
5051	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
5052		rss_key_addr_cnt = 32;
5053
5054	t4_tp_pio_write(adap, key, 10, A_TP_RSS_SECRET_KEY0, sleep_ok);
5055
5056	if (idx >= 0 && idx < rss_key_addr_cnt) {
5057		if (rss_key_addr_cnt > 16)
5058			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5059				     vrt | V_KEYWRADDRX(idx >> 4) |
5060				     V_T6_VFWRADDR(idx) | F_KEYWREN);
5061		else
5062			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
5063				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
5064	}
5065}
5066
5067/**
5068 *	t4_read_rss_pf_config - read PF RSS Configuration Table
5069 *	@adapter: the adapter
5070 *	@index: the entry in the PF RSS table to read
5071 *	@valp: where to store the returned value
5072 * 	@sleep_ok: if true we may sleep while awaiting command completion
5073 *
5074 *	Reads the PF RSS Configuration Table at the specified index and returns
5075 *	the value found there.
5076 */
5077void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5078			   u32 *valp, bool sleep_ok)
5079{
5080	t4_tp_pio_read(adapter, valp, 1, A_TP_RSS_PF0_CONFIG + index, sleep_ok);
5081}
5082
5083/**
5084 *	t4_write_rss_pf_config - write PF RSS Configuration Table
5085 *	@adapter: the adapter
5086 *	@index: the entry in the VF RSS table to read
5087 *	@val: the value to store
5088 * 	@sleep_ok: if true we may sleep while awaiting command completion
5089 *
5090 *	Writes the PF RSS Configuration Table at the specified index with the
5091 *	specified value.
5092 */
5093void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
5094			    u32 val, bool sleep_ok)
5095{
5096	t4_tp_pio_write(adapter, &val, 1, A_TP_RSS_PF0_CONFIG + index,
5097			sleep_ok);
5098}
5099
5100/**
5101 *	t4_read_rss_vf_config - read VF RSS Configuration Table
5102 *	@adapter: the adapter
5103 *	@index: the entry in the VF RSS table to read
5104 *	@vfl: where to store the returned VFL
5105 *	@vfh: where to store the returned VFH
5106 * 	@sleep_ok: if true we may sleep while awaiting command completion
5107 *
5108 *	Reads the VF RSS Configuration Table at the specified index and returns
5109 *	the (VFL, VFH) values found there.
5110 */
5111void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5112			   u32 *vfl, u32 *vfh, bool sleep_ok)
5113{
5114	u32 vrt, mask, data;
5115
5116	if (chip_id(adapter) <= CHELSIO_T5) {
5117		mask = V_VFWRADDR(M_VFWRADDR);
5118		data = V_VFWRADDR(index);
5119	} else {
5120		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5121		 data = V_T6_VFWRADDR(index);
5122	}
5123	/*
5124	 * Request that the index'th VF Table values be read into VFL/VFH.
5125	 */
5126	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5127	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5128	vrt |= data | F_VFRDEN;
5129	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5130
5131	/*
5132	 * Grab the VFL/VFH values ...
5133	 */
5134	t4_tp_pio_read(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5135	t4_tp_pio_read(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5136}
5137
5138/**
5139 *	t4_write_rss_vf_config - write VF RSS Configuration Table
5140 *
5141 *	@adapter: the adapter
5142 *	@index: the entry in the VF RSS table to write
5143 *	@vfl: the VFL to store
5144 *	@vfh: the VFH to store
5145 *
5146 *	Writes the VF RSS Configuration Table at the specified index with the
5147 *	specified (VFL, VFH) values.
5148 */
5149void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
5150			    u32 vfl, u32 vfh, bool sleep_ok)
5151{
5152	u32 vrt, mask, data;
5153
5154	if (chip_id(adapter) <= CHELSIO_T5) {
5155		mask = V_VFWRADDR(M_VFWRADDR);
5156		data = V_VFWRADDR(index);
5157	} else {
5158		mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
5159		data = V_T6_VFWRADDR(index);
5160	}
5161
5162	/*
5163	 * Load up VFL/VFH with the values to be written ...
5164	 */
5165	t4_tp_pio_write(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, sleep_ok);
5166	t4_tp_pio_write(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, sleep_ok);
5167
5168	/*
5169	 * Write the VFL/VFH into the VF Table at index'th location.
5170	 */
5171	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
5172	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
5173	vrt |= data | F_VFRDEN;
5174	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
5175}
5176
5177/**
5178 *	t4_read_rss_pf_map - read PF RSS Map
5179 *	@adapter: the adapter
5180 * 	@sleep_ok: if true we may sleep while awaiting command completion
5181 *
5182 *	Reads the PF RSS Map register and returns its value.
5183 */
5184u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5185{
5186	u32 pfmap;
5187
5188	t4_tp_pio_read(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5189
5190	return pfmap;
5191}
5192
5193/**
5194 *	t4_write_rss_pf_map - write PF RSS Map
5195 *	@adapter: the adapter
5196 *	@pfmap: PF RSS Map value
5197 *
5198 *	Writes the specified value to the PF RSS Map register.
5199 */
5200void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap, bool sleep_ok)
5201{
5202	t4_tp_pio_write(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, sleep_ok);
5203}
5204
5205/**
5206 *	t4_read_rss_pf_mask - read PF RSS Mask
5207 *	@adapter: the adapter
5208 * 	@sleep_ok: if true we may sleep while awaiting command completion
5209 *
5210 *	Reads the PF RSS Mask register and returns its value.
5211 */
5212u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5213{
5214	u32 pfmask;
5215
5216	t4_tp_pio_read(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5217
5218	return pfmask;
5219}
5220
5221/**
5222 *	t4_write_rss_pf_mask - write PF RSS Mask
5223 *	@adapter: the adapter
5224 *	@pfmask: PF RSS Mask value
5225 *
5226 *	Writes the specified value to the PF RSS Mask register.
5227 */
5228void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok)
5229{
5230	t4_tp_pio_write(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, sleep_ok);
5231}
5232
5233/**
5234 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
5235 *	@adap: the adapter
5236 *	@v4: holds the TCP/IP counter values
5237 *	@v6: holds the TCP/IPv6 counter values
5238 * 	@sleep_ok: if true we may sleep while awaiting command completion
5239 *
5240 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5241 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5242 */
5243void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5244			 struct tp_tcp_stats *v6, bool sleep_ok)
5245{
5246	u32 val[A_TP_MIB_TCP_RXT_SEG_LO - A_TP_MIB_TCP_OUT_RST + 1];
5247
5248#define STAT_IDX(x) ((A_TP_MIB_TCP_##x) - A_TP_MIB_TCP_OUT_RST)
5249#define STAT(x)     val[STAT_IDX(x)]
5250#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5251
5252	if (v4) {
5253		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5254			       A_TP_MIB_TCP_OUT_RST, sleep_ok);
5255		v4->tcp_out_rsts = STAT(OUT_RST);
5256		v4->tcp_in_segs  = STAT64(IN_SEG);
5257		v4->tcp_out_segs = STAT64(OUT_SEG);
5258		v4->tcp_retrans_segs = STAT64(RXT_SEG);
5259	}
5260	if (v6) {
5261		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5262			       A_TP_MIB_TCP_V6OUT_RST, sleep_ok);
5263		v6->tcp_out_rsts = STAT(OUT_RST);
5264		v6->tcp_in_segs  = STAT64(IN_SEG);
5265		v6->tcp_out_segs = STAT64(OUT_SEG);
5266		v6->tcp_retrans_segs = STAT64(RXT_SEG);
5267	}
5268#undef STAT64
5269#undef STAT
5270#undef STAT_IDX
5271}
5272
5273/**
5274 *	t4_tp_get_err_stats - read TP's error MIB counters
5275 *	@adap: the adapter
5276 *	@st: holds the counter values
5277 * 	@sleep_ok: if true we may sleep while awaiting command completion
5278 *
5279 *	Returns the values of TP's error counters.
5280 */
5281void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5282			 bool sleep_ok)
5283{
5284	int nchan = adap->chip_params->nchan;
5285
5286	t4_tp_mib_read(adap, st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0,
5287		       sleep_ok);
5288
5289	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0,
5290		       sleep_ok);
5291
5292	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0,
5293		       sleep_ok);
5294
5295	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5296		       A_TP_MIB_TNL_CNG_DROP_0, sleep_ok);
5297
5298	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5299		       A_TP_MIB_OFD_CHN_DROP_0, sleep_ok);
5300
5301	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0,
5302		       sleep_ok);
5303
5304	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5305		       A_TP_MIB_OFD_VLN_DROP_0, sleep_ok);
5306
5307	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5308		       A_TP_MIB_TCP_V6IN_ERR_0, sleep_ok);
5309
5310	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP,
5311		       sleep_ok);
5312}
5313
5314/**
5315 *	t4_tp_get_proxy_stats - read TP's proxy MIB counters
5316 *	@adap: the adapter
5317 *	@st: holds the counter values
5318 *
5319 *	Returns the values of TP's proxy counters.
5320 */
5321void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st,
5322    bool sleep_ok)
5323{
5324	int nchan = adap->chip_params->nchan;
5325
5326	t4_tp_mib_read(adap, st->proxy, nchan, A_TP_MIB_TNL_LPBK_0, sleep_ok);
5327}
5328
5329/**
5330 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
5331 *	@adap: the adapter
5332 *	@st: holds the counter values
5333 * 	@sleep_ok: if true we may sleep while awaiting command completion
5334 *
5335 *	Returns the values of TP's CPL counters.
5336 */
5337void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5338			 bool sleep_ok)
5339{
5340	int nchan = adap->chip_params->nchan;
5341
5342	t4_tp_mib_read(adap, st->req, nchan, A_TP_MIB_CPL_IN_REQ_0, sleep_ok);
5343
5344	t4_tp_mib_read(adap, st->rsp, nchan, A_TP_MIB_CPL_OUT_RSP_0, sleep_ok);
5345}
5346
5347/**
5348 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5349 *	@adap: the adapter
5350 *	@st: holds the counter values
5351 *
5352 *	Returns the values of TP's RDMA counters.
5353 */
5354void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5355			  bool sleep_ok)
5356{
5357	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
5358		       sleep_ok);
5359}
5360
5361/**
5362 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5363 *	@adap: the adapter
5364 *	@idx: the port index
5365 *	@st: holds the counter values
5366 * 	@sleep_ok: if true we may sleep while awaiting command completion
5367 *
5368 *	Returns the values of TP's FCoE counters for the selected port.
5369 */
5370void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5371		       struct tp_fcoe_stats *st, bool sleep_ok)
5372{
5373	u32 val[2];
5374
5375	t4_tp_mib_read(adap, &st->frames_ddp, 1, A_TP_MIB_FCOE_DDP_0 + idx,
5376		       sleep_ok);
5377
5378	t4_tp_mib_read(adap, &st->frames_drop, 1,
5379		       A_TP_MIB_FCOE_DROP_0 + idx, sleep_ok);
5380
5381	t4_tp_mib_read(adap, val, 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx,
5382		       sleep_ok);
5383
5384	st->octets_ddp = ((u64)val[0] << 32) | val[1];
5385}
5386
5387/**
5388 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5389 *	@adap: the adapter
5390 *	@st: holds the counter values
5391 * 	@sleep_ok: if true we may sleep while awaiting command completion
5392 *
5393 *	Returns the values of TP's counters for non-TCP directly-placed packets.
5394 */
5395void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5396		      bool sleep_ok)
5397{
5398	u32 val[4];
5399
5400	t4_tp_mib_read(adap, val, 4, A_TP_MIB_USM_PKTS, sleep_ok);
5401
5402	st->frames = val[0];
5403	st->drops = val[1];
5404	st->octets = ((u64)val[2] << 32) | val[3];
5405}
5406
5407/**
5408 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
5409 *	@adap: the adapter
5410 *	@mtus: where to store the MTU values
5411 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
5412 *
5413 *	Reads the HW path MTU table.
5414 */
5415void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5416{
5417	u32 v;
5418	int i;
5419
5420	for (i = 0; i < NMTUS; ++i) {
5421		t4_write_reg(adap, A_TP_MTU_TABLE,
5422			     V_MTUINDEX(0xff) | V_MTUVALUE(i));
5423		v = t4_read_reg(adap, A_TP_MTU_TABLE);
5424		mtus[i] = G_MTUVALUE(v);
5425		if (mtu_log)
5426			mtu_log[i] = G_MTUWIDTH(v);
5427	}
5428}
5429
5430/**
5431 *	t4_read_cong_tbl - reads the congestion control table
5432 *	@adap: the adapter
5433 *	@incr: where to store the alpha values
5434 *
5435 *	Reads the additive increments programmed into the HW congestion
5436 *	control table.
5437 */
5438void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5439{
5440	unsigned int mtu, w;
5441
5442	for (mtu = 0; mtu < NMTUS; ++mtu)
5443		for (w = 0; w < NCCTRL_WIN; ++w) {
5444			t4_write_reg(adap, A_TP_CCTRL_TABLE,
5445				     V_ROWINDEX(0xffff) | (mtu << 5) | w);
5446			incr[mtu][w] = (u16)t4_read_reg(adap,
5447						A_TP_CCTRL_TABLE) & 0x1fff;
5448		}
5449}
5450
5451/**
5452 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5453 *	@adap: the adapter
5454 *	@addr: the indirect TP register address
5455 *	@mask: specifies the field within the register to modify
5456 *	@val: new value for the field
5457 *
5458 *	Sets a field of an indirect TP register to the given value.
5459 */
5460void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5461			    unsigned int mask, unsigned int val)
5462{
5463	t4_write_reg(adap, A_TP_PIO_ADDR, addr);
5464	val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
5465	t4_write_reg(adap, A_TP_PIO_DATA, val);
5466}
5467
5468/**
5469 *	init_cong_ctrl - initialize congestion control parameters
5470 *	@a: the alpha values for congestion control
5471 *	@b: the beta values for congestion control
5472 *
5473 *	Initialize the congestion control parameters.
5474 */
5475static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5476{
5477	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5478	a[9] = 2;
5479	a[10] = 3;
5480	a[11] = 4;
5481	a[12] = 5;
5482	a[13] = 6;
5483	a[14] = 7;
5484	a[15] = 8;
5485	a[16] = 9;
5486	a[17] = 10;
5487	a[18] = 14;
5488	a[19] = 17;
5489	a[20] = 21;
5490	a[21] = 25;
5491	a[22] = 30;
5492	a[23] = 35;
5493	a[24] = 45;
5494	a[25] = 60;
5495	a[26] = 80;
5496	a[27] = 100;
5497	a[28] = 200;
5498	a[29] = 300;
5499	a[30] = 400;
5500	a[31] = 500;
5501
5502	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5503	b[9] = b[10] = 1;
5504	b[11] = b[12] = 2;
5505	b[13] = b[14] = b[15] = b[16] = 3;
5506	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5507	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5508	b[28] = b[29] = 6;
5509	b[30] = b[31] = 7;
5510}
5511
5512/* The minimum additive increment value for the congestion control table */
5513#define CC_MIN_INCR 2U
5514
5515/**
5516 *	t4_load_mtus - write the MTU and congestion control HW tables
5517 *	@adap: the adapter
5518 *	@mtus: the values for the MTU table
5519 *	@alpha: the values for the congestion control alpha parameter
5520 *	@beta: the values for the congestion control beta parameter
5521 *
5522 *	Write the HW MTU table with the supplied MTUs and the high-speed
5523 *	congestion control table with the supplied alpha, beta, and MTUs.
5524 *	We write the two tables together because the additive increments
5525 *	depend on the MTUs.
5526 */
5527void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5528		  const unsigned short *alpha, const unsigned short *beta)
5529{
5530	static const unsigned int avg_pkts[NCCTRL_WIN] = {
5531		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5532		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5533		28672, 40960, 57344, 81920, 114688, 163840, 229376
5534	};
5535
5536	unsigned int i, w;
5537
5538	for (i = 0; i < NMTUS; ++i) {
5539		unsigned int mtu = mtus[i];
5540		unsigned int log2 = fls(mtu);
5541
5542		if (!(mtu & ((1 << log2) >> 2)))     /* round */
5543			log2--;
5544		t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
5545			     V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
5546
5547		for (w = 0; w < NCCTRL_WIN; ++w) {
5548			unsigned int inc;
5549
5550			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5551				  CC_MIN_INCR);
5552
5553			t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
5554				     (w << 16) | (beta[w] << 13) | inc);
5555		}
5556	}
5557}
5558
5559/**
5560 *	t4_set_pace_tbl - set the pace table
5561 *	@adap: the adapter
5562 *	@pace_vals: the pace values in microseconds
5563 *	@start: index of the first entry in the HW pace table to set
5564 *	@n: how many entries to set
5565 *
5566 *	Sets (a subset of the) HW pace table.
5567 */
5568int t4_set_pace_tbl(struct adapter *adap, const unsigned int *pace_vals,
5569		     unsigned int start, unsigned int n)
5570{
5571	unsigned int vals[NTX_SCHED], i;
5572	unsigned int tick_ns = dack_ticks_to_usec(adap, 1000);
5573
5574	if (n > NTX_SCHED)
5575	    return -ERANGE;
5576
5577	/* convert values from us to dack ticks, rounding to closest value */
5578	for (i = 0; i < n; i++, pace_vals++) {
5579		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
5580		if (vals[i] > 0x7ff)
5581			return -ERANGE;
5582		if (*pace_vals && vals[i] == 0)
5583			return -ERANGE;
5584	}
5585	for (i = 0; i < n; i++, start++)
5586		t4_write_reg(adap, A_TP_PACE_TABLE, (start << 16) | vals[i]);
5587	return 0;
5588}
5589
5590/**
5591 *	t4_set_sched_bps - set the bit rate for a HW traffic scheduler
5592 *	@adap: the adapter
5593 *	@kbps: target rate in Kbps
5594 *	@sched: the scheduler index
5595 *
5596 *	Configure a Tx HW scheduler for the target rate.
5597 */
5598int t4_set_sched_bps(struct adapter *adap, int sched, unsigned int kbps)
5599{
5600	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
5601	unsigned int clk = adap->params.vpd.cclk * 1000;
5602	unsigned int selected_cpt = 0, selected_bpt = 0;
5603
5604	if (kbps > 0) {
5605		kbps *= 125;     /* -> bytes */
5606		for (cpt = 1; cpt <= 255; cpt++) {
5607			tps = clk / cpt;
5608			bpt = (kbps + tps / 2) / tps;
5609			if (bpt > 0 && bpt <= 255) {
5610				v = bpt * tps;
5611				delta = v >= kbps ? v - kbps : kbps - v;
5612				if (delta < mindelta) {
5613					mindelta = delta;
5614					selected_cpt = cpt;
5615					selected_bpt = bpt;
5616				}
5617			} else if (selected_cpt)
5618				break;
5619		}
5620		if (!selected_cpt)
5621			return -EINVAL;
5622	}
5623	t4_write_reg(adap, A_TP_TM_PIO_ADDR,
5624		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
5625	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5626	if (sched & 1)
5627		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
5628	else
5629		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
5630	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5631	return 0;
5632}
5633
5634/**
5635 *	t4_set_sched_ipg - set the IPG for a Tx HW packet rate scheduler
5636 *	@adap: the adapter
5637 *	@sched: the scheduler index
5638 *	@ipg: the interpacket delay in tenths of nanoseconds
5639 *
5640 *	Set the interpacket delay for a HW packet rate scheduler.
5641 */
5642int t4_set_sched_ipg(struct adapter *adap, int sched, unsigned int ipg)
5643{
5644	unsigned int v, addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
5645
5646	/* convert ipg to nearest number of core clocks */
5647	ipg *= core_ticks_per_usec(adap);
5648	ipg = (ipg + 5000) / 10000;
5649	if (ipg > M_TXTIMERSEPQ0)
5650		return -EINVAL;
5651
5652	t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
5653	v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
5654	if (sched & 1)
5655		v = (v & V_TXTIMERSEPQ0(M_TXTIMERSEPQ0)) | V_TXTIMERSEPQ1(ipg);
5656	else
5657		v = (v & V_TXTIMERSEPQ1(M_TXTIMERSEPQ1)) | V_TXTIMERSEPQ0(ipg);
5658	t4_write_reg(adap, A_TP_TM_PIO_DATA, v);
5659	t4_read_reg(adap, A_TP_TM_PIO_DATA);
5660	return 0;
5661}
5662
5663/*
5664 * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5665 * clocks.  The formula is
5666 *
5667 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5668 *
5669 * which is equivalent to
5670 *
5671 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5672 */
5673static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5674{
5675	u64 v = bytes256 * adap->params.vpd.cclk;
5676
5677	return v * 62 + v / 2;
5678}
5679
5680/**
5681 *	t4_get_chan_txrate - get the current per channel Tx rates
5682 *	@adap: the adapter
5683 *	@nic_rate: rates for NIC traffic
5684 *	@ofld_rate: rates for offloaded traffic
5685 *
5686 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
5687 *	for each channel.
5688 */
5689void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5690{
5691	u32 v;
5692
5693	v = t4_read_reg(adap, A_TP_TX_TRATE);
5694	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
5695	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
5696	if (adap->chip_params->nchan > 2) {
5697		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
5698		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
5699	}
5700
5701	v = t4_read_reg(adap, A_TP_TX_ORATE);
5702	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
5703	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
5704	if (adap->chip_params->nchan > 2) {
5705		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
5706		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
5707	}
5708}
5709
5710/**
5711 *	t4_set_trace_filter - configure one of the tracing filters
5712 *	@adap: the adapter
5713 *	@tp: the desired trace filter parameters
5714 *	@idx: which filter to configure
5715 *	@enable: whether to enable or disable the filter
5716 *
5717 *	Configures one of the tracing filters available in HW.  If @tp is %NULL
5718 *	it indicates that the filter is already written in the register and it
5719 *	just needs to be enabled or disabled.
5720 */
5721int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5722    int idx, int enable)
5723{
5724	int i, ofst = idx * 4;
5725	u32 data_reg, mask_reg, cfg;
5726	u32 multitrc = F_TRCMULTIFILTER;
5727	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
5728
5729	if (idx < 0 || idx >= NTRACE)
5730		return -EINVAL;
5731
5732	if (tp == NULL || !enable) {
5733		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
5734		    enable ? en : 0);
5735		return 0;
5736	}
5737
5738	/*
5739	 * TODO - After T4 data book is updated, specify the exact
5740	 * section below.
5741	 *
5742	 * See T4 data book - MPS section for a complete description
5743	 * of the below if..else handling of A_MPS_TRC_CFG register
5744	 * value.
5745	 */
5746	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
5747	if (cfg & F_TRCMULTIFILTER) {
5748		/*
5749		 * If multiple tracers are enabled, then maximum
5750		 * capture size is 2.5KB (FIFO size of a single channel)
5751		 * minus 2 flits for CPL_TRACE_PKT header.
5752		 */
5753		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5754			return -EINVAL;
5755	} else {
5756		/*
5757		 * If multiple tracers are disabled, to avoid deadlocks
5758		 * maximum packet capture size of 9600 bytes is recommended.
5759		 * Also in this mode, only trace0 can be enabled and running.
5760		 */
5761		multitrc = 0;
5762		if (tp->snap_len > 9600 || idx)
5763			return -EINVAL;
5764	}
5765
5766	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
5767	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
5768	    tp->min_len > M_TFMINPKTSIZE)
5769		return -EINVAL;
5770
5771	/* stop the tracer we'll be changing */
5772	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
5773
5774	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
5775	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
5776	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
5777
5778	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5779		t4_write_reg(adap, data_reg, tp->data[i]);
5780		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5781	}
5782	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
5783		     V_TFCAPTUREMAX(tp->snap_len) |
5784		     V_TFMINPKTSIZE(tp->min_len));
5785	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
5786		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
5787		     (is_t4(adap) ?
5788		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
5789		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
5790
5791	return 0;
5792}
5793
5794/**
5795 *	t4_get_trace_filter - query one of the tracing filters
5796 *	@adap: the adapter
5797 *	@tp: the current trace filter parameters
5798 *	@idx: which trace filter to query
5799 *	@enabled: non-zero if the filter is enabled
5800 *
5801 *	Returns the current settings of one of the HW tracing filters.
5802 */
5803void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5804			 int *enabled)
5805{
5806	u32 ctla, ctlb;
5807	int i, ofst = idx * 4;
5808	u32 data_reg, mask_reg;
5809
5810	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
5811	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
5812
5813	if (is_t4(adap)) {
5814		*enabled = !!(ctla & F_TFEN);
5815		tp->port =  G_TFPORT(ctla);
5816		tp->invert = !!(ctla & F_TFINVERTMATCH);
5817	} else {
5818		*enabled = !!(ctla & F_T5_TFEN);
5819		tp->port = G_T5_TFPORT(ctla);
5820		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
5821	}
5822	tp->snap_len = G_TFCAPTUREMAX(ctlb);
5823	tp->min_len = G_TFMINPKTSIZE(ctlb);
5824	tp->skip_ofst = G_TFOFFSET(ctla);
5825	tp->skip_len = G_TFLENGTH(ctla);
5826
5827	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
5828	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
5829	mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
5830
5831	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5832		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5833		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5834	}
5835}
5836
5837/**
5838 *	t4_pmtx_get_stats - returns the HW stats from PMTX
5839 *	@adap: the adapter
5840 *	@cnt: where to store the count statistics
5841 *	@cycles: where to store the cycle statistics
5842 *
5843 *	Returns performance statistics from PMTX.
5844 */
5845void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5846{
5847	int i;
5848	u32 data[2];
5849
5850	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5851		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
5852		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
5853		if (is_t4(adap))
5854			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
5855		else {
5856			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
5857					 A_PM_TX_DBG_DATA, data, 2,
5858					 A_PM_TX_DBG_STAT_MSB);
5859			cycles[i] = (((u64)data[0] << 32) | data[1]);
5860		}
5861	}
5862}
5863
5864/**
5865 *	t4_pmrx_get_stats - returns the HW stats from PMRX
5866 *	@adap: the adapter
5867 *	@cnt: where to store the count statistics
5868 *	@cycles: where to store the cycle statistics
5869 *
5870 *	Returns performance statistics from PMRX.
5871 */
5872void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5873{
5874	int i;
5875	u32 data[2];
5876
5877	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
5878		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
5879		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
5880		if (is_t4(adap)) {
5881			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
5882		} else {
5883			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
5884					 A_PM_RX_DBG_DATA, data, 2,
5885					 A_PM_RX_DBG_STAT_MSB);
5886			cycles[i] = (((u64)data[0] << 32) | data[1]);
5887		}
5888	}
5889}
5890
5891/**
5892 *	t4_get_mps_bg_map - return the buffer groups associated with a port
5893 *	@adap: the adapter
5894 *	@idx: the port index
5895 *
5896 *	Returns a bitmap indicating which MPS buffer groups are associated
5897 *	with the given port.  Bit i is set if buffer group i is used by the
5898 *	port.
5899 */
5900static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
5901{
5902	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
5903
5904	if (n == 0)
5905		return idx == 0 ? 0xf : 0;
5906	if (n == 1 && chip_id(adap) <= CHELSIO_T5)
5907		return idx < 2 ? (3 << (2 * idx)) : 0;
5908	return 1 << idx;
5909}
5910
5911/**
5912 *      t4_get_port_type_description - return Port Type string description
5913 *      @port_type: firmware Port Type enumeration
5914 */
5915const char *t4_get_port_type_description(enum fw_port_type port_type)
5916{
5917	static const char *const port_type_description[] = {
5918		"Fiber_XFI",
5919		"Fiber_XAUI",
5920		"BT_SGMII",
5921		"BT_XFI",
5922		"BT_XAUI",
5923		"KX4",
5924		"CX4",
5925		"KX",
5926		"KR",
5927		"SFP",
5928		"BP_AP",
5929		"BP4_AP",
5930		"QSFP_10G",
5931		"QSA",
5932		"QSFP",
5933		"BP40_BA",
5934		"KR4_100G",
5935		"CR4_QSFP",
5936		"CR_QSFP",
5937		"CR2_QSFP",
5938		"SFP28",
5939		"KR_SFP28",
5940	};
5941
5942	if (port_type < ARRAY_SIZE(port_type_description))
5943		return port_type_description[port_type];
5944	return "UNKNOWN";
5945}
5946
5947/**
5948 *      t4_get_port_stats_offset - collect port stats relative to a previous
5949 *				   snapshot
5950 *      @adap: The adapter
5951 *      @idx: The port
5952 *      @stats: Current stats to fill
5953 *      @offset: Previous stats snapshot
5954 */
5955void t4_get_port_stats_offset(struct adapter *adap, int idx,
5956		struct port_stats *stats,
5957		struct port_stats *offset)
5958{
5959	u64 *s, *o;
5960	int i;
5961
5962	t4_get_port_stats(adap, idx, stats);
5963	for (i = 0, s = (u64 *)stats, o = (u64 *)offset ;
5964			i < (sizeof(struct port_stats)/sizeof(u64)) ;
5965			i++, s++, o++)
5966		*s -= *o;
5967}
5968
5969/**
5970 *	t4_get_port_stats - collect port statistics
5971 *	@adap: the adapter
5972 *	@idx: the port index
5973 *	@p: the stats structure to fill
5974 *
5975 *	Collect statistics related to the given port from HW.
5976 */
5977void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5978{
5979	u32 bgmap = t4_get_mps_bg_map(adap, idx);
5980	u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
5981
5982#define GET_STAT(name) \
5983	t4_read_reg64(adap, \
5984	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
5985	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
5986#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
5987
5988	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
5989	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
5990	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
5991	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
5992	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
5993	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
5994	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
5995	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
5996	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
5997	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
5998	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
5999	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
6000	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
6001	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
6002	p->tx_drop		= GET_STAT(TX_PORT_DROP);
6003	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
6004	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
6005	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
6006	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
6007	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
6008	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
6009	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
6010	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
6011
6012	if (chip_id(adap) >= CHELSIO_T5) {
6013		if (stat_ctl & F_COUNTPAUSESTATTX) {
6014			p->tx_frames -= p->tx_pause;
6015			p->tx_octets -= p->tx_pause * 64;
6016		}
6017		if (stat_ctl & F_COUNTPAUSEMCTX)
6018			p->tx_mcast_frames -= p->tx_pause;
6019	}
6020
6021	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
6022	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
6023	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
6024	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
6025	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
6026	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
6027	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
6028	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
6029	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
6030	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
6031	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
6032	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
6033	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
6034	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
6035	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
6036	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
6037	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
6038	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
6039	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
6040	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
6041	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
6042	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
6043	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
6044	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
6045	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
6046	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
6047	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
6048
6049	if (chip_id(adap) >= CHELSIO_T5) {
6050		if (stat_ctl & F_COUNTPAUSESTATRX) {
6051			p->rx_frames -= p->rx_pause;
6052			p->rx_octets -= p->rx_pause * 64;
6053		}
6054		if (stat_ctl & F_COUNTPAUSEMCRX)
6055			p->rx_mcast_frames -= p->rx_pause;
6056	}
6057
6058	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6059	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6060	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6061	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6062	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6063	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6064	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6065	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6066
6067#undef GET_STAT
6068#undef GET_STAT_COM
6069}
6070
6071/**
6072 *	t4_get_lb_stats - collect loopback port statistics
6073 *	@adap: the adapter
6074 *	@idx: the loopback port index
6075 *	@p: the stats structure to fill
6076 *
6077 *	Return HW statistics for the given loopback port.
6078 */
6079void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6080{
6081	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6082
6083#define GET_STAT(name) \
6084	t4_read_reg64(adap, \
6085	(is_t4(adap) ? \
6086	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
6087	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
6088#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
6089
6090	p->octets	= GET_STAT(BYTES);
6091	p->frames	= GET_STAT(FRAMES);
6092	p->bcast_frames	= GET_STAT(BCAST);
6093	p->mcast_frames	= GET_STAT(MCAST);
6094	p->ucast_frames	= GET_STAT(UCAST);
6095	p->error_frames	= GET_STAT(ERROR);
6096
6097	p->frames_64		= GET_STAT(64B);
6098	p->frames_65_127	= GET_STAT(65B_127B);
6099	p->frames_128_255	= GET_STAT(128B_255B);
6100	p->frames_256_511	= GET_STAT(256B_511B);
6101	p->frames_512_1023	= GET_STAT(512B_1023B);
6102	p->frames_1024_1518	= GET_STAT(1024B_1518B);
6103	p->frames_1519_max	= GET_STAT(1519B_MAX);
6104	p->drop			= GET_STAT(DROP_FRAMES);
6105
6106	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6107	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6108	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6109	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6110	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6111	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6112	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6113	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6114
6115#undef GET_STAT
6116#undef GET_STAT_COM
6117}
6118
6119/**
6120 *	t4_wol_magic_enable - enable/disable magic packet WoL
6121 *	@adap: the adapter
6122 *	@port: the physical port index
6123 *	@addr: MAC address expected in magic packets, %NULL to disable
6124 *
6125 *	Enables/disables magic packet wake-on-LAN for the selected port.
6126 */
6127void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
6128			 const u8 *addr)
6129{
6130	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
6131
6132	if (is_t4(adap)) {
6133		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
6134		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
6135		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6136	} else {
6137		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
6138		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
6139		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6140	}
6141
6142	if (addr) {
6143		t4_write_reg(adap, mag_id_reg_l,
6144			     (addr[2] << 24) | (addr[3] << 16) |
6145			     (addr[4] << 8) | addr[5]);
6146		t4_write_reg(adap, mag_id_reg_h,
6147			     (addr[0] << 8) | addr[1]);
6148	}
6149	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
6150			 V_MAGICEN(addr != NULL));
6151}
6152
6153/**
6154 *	t4_wol_pat_enable - enable/disable pattern-based WoL
6155 *	@adap: the adapter
6156 *	@port: the physical port index
6157 *	@map: bitmap of which HW pattern filters to set
6158 *	@mask0: byte mask for bytes 0-63 of a packet
6159 *	@mask1: byte mask for bytes 64-127 of a packet
6160 *	@crc: Ethernet CRC for selected bytes
6161 *	@enable: enable/disable switch
6162 *
6163 *	Sets the pattern filters indicated in @map to mask out the bytes
6164 *	specified in @mask0/@mask1 in received packets and compare the CRC of
6165 *	the resulting packet against @crc.  If @enable is %true pattern-based
6166 *	WoL is enabled, otherwise disabled.
6167 */
6168int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
6169		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
6170{
6171	int i;
6172	u32 port_cfg_reg;
6173
6174	if (is_t4(adap))
6175		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
6176	else
6177		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
6178
6179	if (!enable) {
6180		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
6181		return 0;
6182	}
6183	if (map > 0xff)
6184		return -EINVAL;
6185
6186#define EPIO_REG(name) \
6187	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
6188	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
6189
6190	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
6191	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
6192	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
6193
6194	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
6195		if (!(map & 1))
6196			continue;
6197
6198		/* write byte masks */
6199		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
6200		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i) | F_EPIOWR);
6201		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
6202		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6203			return -ETIMEDOUT;
6204
6205		/* write CRC */
6206		t4_write_reg(adap, EPIO_REG(DATA0), crc);
6207		t4_write_reg(adap, EPIO_REG(OP), V_ADDRESS(i + 32) | F_EPIOWR);
6208		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
6209		if (t4_read_reg(adap, EPIO_REG(OP)) & F_BUSY)
6210			return -ETIMEDOUT;
6211	}
6212#undef EPIO_REG
6213
6214	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
6215	return 0;
6216}
6217
6218/*     t4_mk_filtdelwr - create a delete filter WR
6219 *     @ftid: the filter ID
6220 *     @wr: the filter work request to populate
6221 *     @qid: ingress queue to receive the delete notification
6222 *
6223 *     Creates a filter work request to delete the supplied filter.  If @qid is
6224 *     negative the delete notification is suppressed.
6225 */
6226void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6227{
6228	memset(wr, 0, sizeof(*wr));
6229	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
6230	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
6231	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
6232				    V_FW_FILTER_WR_NOREPLY(qid < 0));
6233	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
6234	if (qid >= 0)
6235		wr->rx_chan_rx_rpl_iq =
6236				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
6237}
6238
6239#define INIT_CMD(var, cmd, rd_wr) do { \
6240	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
6241					F_FW_CMD_REQUEST | \
6242					F_FW_CMD_##rd_wr); \
6243	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6244} while (0)
6245
6246int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6247			  u32 addr, u32 val)
6248{
6249	u32 ldst_addrspace;
6250	struct fw_ldst_cmd c;
6251
6252	memset(&c, 0, sizeof(c));
6253	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
6254	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6255					F_FW_CMD_REQUEST |
6256					F_FW_CMD_WRITE |
6257					ldst_addrspace);
6258	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6259	c.u.addrval.addr = cpu_to_be32(addr);
6260	c.u.addrval.val = cpu_to_be32(val);
6261
6262	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6263}
6264
6265/**
6266 *	t4_mdio_rd - read a PHY register through MDIO
6267 *	@adap: the adapter
6268 *	@mbox: mailbox to use for the FW command
6269 *	@phy_addr: the PHY address
6270 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6271 *	@reg: the register to read
6272 *	@valp: where to store the value
6273 *
6274 *	Issues a FW command through the given mailbox to read a PHY register.
6275 */
6276int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6277	       unsigned int mmd, unsigned int reg, unsigned int *valp)
6278{
6279	int ret;
6280	u32 ldst_addrspace;
6281	struct fw_ldst_cmd c;
6282
6283	memset(&c, 0, sizeof(c));
6284	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6285	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6286					F_FW_CMD_REQUEST | F_FW_CMD_READ |
6287					ldst_addrspace);
6288	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6289	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6290					 V_FW_LDST_CMD_MMD(mmd));
6291	c.u.mdio.raddr = cpu_to_be16(reg);
6292
6293	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6294	if (ret == 0)
6295		*valp = be16_to_cpu(c.u.mdio.rval);
6296	return ret;
6297}
6298
6299/**
6300 *	t4_mdio_wr - write a PHY register through MDIO
6301 *	@adap: the adapter
6302 *	@mbox: mailbox to use for the FW command
6303 *	@phy_addr: the PHY address
6304 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6305 *	@reg: the register to write
6306 *	@valp: value to write
6307 *
6308 *	Issues a FW command through the given mailbox to write a PHY register.
6309 */
6310int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6311	       unsigned int mmd, unsigned int reg, unsigned int val)
6312{
6313	u32 ldst_addrspace;
6314	struct fw_ldst_cmd c;
6315
6316	memset(&c, 0, sizeof(c));
6317	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
6318	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6319					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6320					ldst_addrspace);
6321	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6322	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
6323					 V_FW_LDST_CMD_MMD(mmd));
6324	c.u.mdio.raddr = cpu_to_be16(reg);
6325	c.u.mdio.rval = cpu_to_be16(val);
6326
6327	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6328}
6329
6330/**
6331 *
6332 *	t4_sge_decode_idma_state - decode the idma state
6333 *	@adap: the adapter
6334 *	@state: the state idma is stuck in
6335 */
6336void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6337{
6338	static const char * const t4_decode[] = {
6339		"IDMA_IDLE",
6340		"IDMA_PUSH_MORE_CPL_FIFO",
6341		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6342		"Not used",
6343		"IDMA_PHYSADDR_SEND_PCIEHDR",
6344		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6345		"IDMA_PHYSADDR_SEND_PAYLOAD",
6346		"IDMA_SEND_FIFO_TO_IMSG",
6347		"IDMA_FL_REQ_DATA_FL_PREP",
6348		"IDMA_FL_REQ_DATA_FL",
6349		"IDMA_FL_DROP",
6350		"IDMA_FL_H_REQ_HEADER_FL",
6351		"IDMA_FL_H_SEND_PCIEHDR",
6352		"IDMA_FL_H_PUSH_CPL_FIFO",
6353		"IDMA_FL_H_SEND_CPL",
6354		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6355		"IDMA_FL_H_SEND_IP_HDR",
6356		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6357		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6358		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6359		"IDMA_FL_D_SEND_PCIEHDR",
6360		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6361		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6362		"IDMA_FL_SEND_PCIEHDR",
6363		"IDMA_FL_PUSH_CPL_FIFO",
6364		"IDMA_FL_SEND_CPL",
6365		"IDMA_FL_SEND_PAYLOAD_FIRST",
6366		"IDMA_FL_SEND_PAYLOAD",
6367		"IDMA_FL_REQ_NEXT_DATA_FL",
6368		"IDMA_FL_SEND_NEXT_PCIEHDR",
6369		"IDMA_FL_SEND_PADDING",
6370		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6371		"IDMA_FL_SEND_FIFO_TO_IMSG",
6372		"IDMA_FL_REQ_DATAFL_DONE",
6373		"IDMA_FL_REQ_HEADERFL_DONE",
6374	};
6375	static const char * const t5_decode[] = {
6376		"IDMA_IDLE",
6377		"IDMA_ALMOST_IDLE",
6378		"IDMA_PUSH_MORE_CPL_FIFO",
6379		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6380		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6381		"IDMA_PHYSADDR_SEND_PCIEHDR",
6382		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6383		"IDMA_PHYSADDR_SEND_PAYLOAD",
6384		"IDMA_SEND_FIFO_TO_IMSG",
6385		"IDMA_FL_REQ_DATA_FL",
6386		"IDMA_FL_DROP",
6387		"IDMA_FL_DROP_SEND_INC",
6388		"IDMA_FL_H_REQ_HEADER_FL",
6389		"IDMA_FL_H_SEND_PCIEHDR",
6390		"IDMA_FL_H_PUSH_CPL_FIFO",
6391		"IDMA_FL_H_SEND_CPL",
6392		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6393		"IDMA_FL_H_SEND_IP_HDR",
6394		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6395		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6396		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6397		"IDMA_FL_D_SEND_PCIEHDR",
6398		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6399		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6400		"IDMA_FL_SEND_PCIEHDR",
6401		"IDMA_FL_PUSH_CPL_FIFO",
6402		"IDMA_FL_SEND_CPL",
6403		"IDMA_FL_SEND_PAYLOAD_FIRST",
6404		"IDMA_FL_SEND_PAYLOAD",
6405		"IDMA_FL_REQ_NEXT_DATA_FL",
6406		"IDMA_FL_SEND_NEXT_PCIEHDR",
6407		"IDMA_FL_SEND_PADDING",
6408		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6409	};
6410	static const char * const t6_decode[] = {
6411		"IDMA_IDLE",
6412		"IDMA_PUSH_MORE_CPL_FIFO",
6413		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6414		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6415		"IDMA_PHYSADDR_SEND_PCIEHDR",
6416		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6417		"IDMA_PHYSADDR_SEND_PAYLOAD",
6418		"IDMA_FL_REQ_DATA_FL",
6419		"IDMA_FL_DROP",
6420		"IDMA_FL_DROP_SEND_INC",
6421		"IDMA_FL_H_REQ_HEADER_FL",
6422		"IDMA_FL_H_SEND_PCIEHDR",
6423		"IDMA_FL_H_PUSH_CPL_FIFO",
6424		"IDMA_FL_H_SEND_CPL",
6425		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6426		"IDMA_FL_H_SEND_IP_HDR",
6427		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6428		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6429		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6430		"IDMA_FL_D_SEND_PCIEHDR",
6431		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6432		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6433		"IDMA_FL_SEND_PCIEHDR",
6434		"IDMA_FL_PUSH_CPL_FIFO",
6435		"IDMA_FL_SEND_CPL",
6436		"IDMA_FL_SEND_PAYLOAD_FIRST",
6437		"IDMA_FL_SEND_PAYLOAD",
6438		"IDMA_FL_REQ_NEXT_DATA_FL",
6439		"IDMA_FL_SEND_NEXT_PCIEHDR",
6440		"IDMA_FL_SEND_PADDING",
6441		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6442	};
6443	static const u32 sge_regs[] = {
6444		A_SGE_DEBUG_DATA_LOW_INDEX_2,
6445		A_SGE_DEBUG_DATA_LOW_INDEX_3,
6446		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
6447	};
6448	const char * const *sge_idma_decode;
6449	int sge_idma_decode_nstates;
6450	int i;
6451	unsigned int chip_version = chip_id(adapter);
6452
6453	/* Select the right set of decode strings to dump depending on the
6454	 * adapter chip type.
6455	 */
6456	switch (chip_version) {
6457	case CHELSIO_T4:
6458		sge_idma_decode = (const char * const *)t4_decode;
6459		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6460		break;
6461
6462	case CHELSIO_T5:
6463		sge_idma_decode = (const char * const *)t5_decode;
6464		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6465		break;
6466
6467	case CHELSIO_T6:
6468		sge_idma_decode = (const char * const *)t6_decode;
6469		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6470		break;
6471
6472	default:
6473		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
6474		return;
6475	}
6476
6477	if (state < sge_idma_decode_nstates)
6478		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6479	else
6480		CH_WARN(adapter, "idma state %d unknown\n", state);
6481
6482	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6483		CH_WARN(adapter, "SGE register %#x value %#x\n",
6484			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6485}
6486
6487/**
6488 *      t4_sge_ctxt_flush - flush the SGE context cache
6489 *      @adap: the adapter
6490 *      @mbox: mailbox to use for the FW command
6491 *
6492 *      Issues a FW command through the given mailbox to flush the
6493 *      SGE context cache.
6494 */
6495int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6496{
6497	int ret;
6498	u32 ldst_addrspace;
6499	struct fw_ldst_cmd c;
6500
6501	memset(&c, 0, sizeof(c));
6502	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
6503	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
6504					F_FW_CMD_REQUEST | F_FW_CMD_READ |
6505					ldst_addrspace);
6506	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6507	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
6508
6509	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6510	return ret;
6511}
6512
6513/**
6514 *      t4_fw_hello - establish communication with FW
6515 *      @adap: the adapter
6516 *      @mbox: mailbox to use for the FW command
6517 *      @evt_mbox: mailbox to receive async FW events
6518 *      @master: specifies the caller's willingness to be the device master
6519 *	@state: returns the current device state (if non-NULL)
6520 *
6521 *	Issues a command to establish communication with FW.  Returns either
6522 *	an error (negative integer) or the mailbox of the Master PF.
6523 */
6524int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6525		enum dev_master master, enum dev_state *state)
6526{
6527	int ret;
6528	struct fw_hello_cmd c;
6529	u32 v;
6530	unsigned int master_mbox;
6531	int retries = FW_CMD_HELLO_RETRIES;
6532
6533retry:
6534	memset(&c, 0, sizeof(c));
6535	INIT_CMD(c, HELLO, WRITE);
6536	c.err_to_clearinit = cpu_to_be32(
6537		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
6538		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
6539		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
6540					mbox : M_FW_HELLO_CMD_MBMASTER) |
6541		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
6542		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
6543		F_FW_HELLO_CMD_CLEARINIT);
6544
6545	/*
6546	 * Issue the HELLO command to the firmware.  If it's not successful
6547	 * but indicates that we got a "busy" or "timeout" condition, retry
6548	 * the HELLO until we exhaust our retry limit.  If we do exceed our
6549	 * retry limit, check to see if the firmware left us any error
6550	 * information and report that if so ...
6551	 */
6552	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6553	if (ret != FW_SUCCESS) {
6554		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6555			goto retry;
6556		if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
6557			t4_report_fw_error(adap);
6558		return ret;
6559	}
6560
6561	v = be32_to_cpu(c.err_to_clearinit);
6562	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
6563	if (state) {
6564		if (v & F_FW_HELLO_CMD_ERR)
6565			*state = DEV_STATE_ERR;
6566		else if (v & F_FW_HELLO_CMD_INIT)
6567			*state = DEV_STATE_INIT;
6568		else
6569			*state = DEV_STATE_UNINIT;
6570	}
6571
6572	/*
6573	 * If we're not the Master PF then we need to wait around for the
6574	 * Master PF Driver to finish setting up the adapter.
6575	 *
6576	 * Note that we also do this wait if we're a non-Master-capable PF and
6577	 * there is no current Master PF; a Master PF may show up momentarily
6578	 * and we wouldn't want to fail pointlessly.  (This can happen when an
6579	 * OS loads lots of different drivers rapidly at the same time).  In
6580	 * this case, the Master PF returned by the firmware will be
6581	 * M_PCIE_FW_MASTER so the test below will work ...
6582	 */
6583	if ((v & (F_FW_HELLO_CMD_ERR|F_FW_HELLO_CMD_INIT)) == 0 &&
6584	    master_mbox != mbox) {
6585		int waiting = FW_CMD_HELLO_TIMEOUT;
6586
6587		/*
6588		 * Wait for the firmware to either indicate an error or
6589		 * initialized state.  If we see either of these we bail out
6590		 * and report the issue to the caller.  If we exhaust the
6591		 * "hello timeout" and we haven't exhausted our retries, try
6592		 * again.  Otherwise bail with a timeout error.
6593		 */
6594		for (;;) {
6595			u32 pcie_fw;
6596
6597			msleep(50);
6598			waiting -= 50;
6599
6600			/*
6601			 * If neither Error nor Initialialized are indicated
6602			 * by the firmware keep waiting till we exhaust our
6603			 * timeout ... and then retry if we haven't exhausted
6604			 * our retries ...
6605			 */
6606			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
6607			if (!(pcie_fw & (F_PCIE_FW_ERR|F_PCIE_FW_INIT))) {
6608				if (waiting <= 0) {
6609					if (retries-- > 0)
6610						goto retry;
6611
6612					return -ETIMEDOUT;
6613				}
6614				continue;
6615			}
6616
6617			/*
6618			 * We either have an Error or Initialized condition
6619			 * report errors preferentially.
6620			 */
6621			if (state) {
6622				if (pcie_fw & F_PCIE_FW_ERR)
6623					*state = DEV_STATE_ERR;
6624				else if (pcie_fw & F_PCIE_FW_INIT)
6625					*state = DEV_STATE_INIT;
6626			}
6627
6628			/*
6629			 * If we arrived before a Master PF was selected and
6630			 * there's not a valid Master PF, grab its identity
6631			 * for our caller.
6632			 */
6633			if (master_mbox == M_PCIE_FW_MASTER &&
6634			    (pcie_fw & F_PCIE_FW_MASTER_VLD))
6635				master_mbox = G_PCIE_FW_MASTER(pcie_fw);
6636			break;
6637		}
6638	}
6639
6640	return master_mbox;
6641}
6642
6643/**
6644 *	t4_fw_bye - end communication with FW
6645 *	@adap: the adapter
6646 *	@mbox: mailbox to use for the FW command
6647 *
6648 *	Issues a command to terminate communication with FW.
6649 */
6650int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6651{
6652	struct fw_bye_cmd c;
6653
6654	memset(&c, 0, sizeof(c));
6655	INIT_CMD(c, BYE, WRITE);
6656	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6657}
6658
6659/**
6660 *	t4_fw_reset - issue a reset to FW
6661 *	@adap: the adapter
6662 *	@mbox: mailbox to use for the FW command
6663 *	@reset: specifies the type of reset to perform
6664 *
6665 *	Issues a reset command of the specified type to FW.
6666 */
6667int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6668{
6669	struct fw_reset_cmd c;
6670
6671	memset(&c, 0, sizeof(c));
6672	INIT_CMD(c, RESET, WRITE);
6673	c.val = cpu_to_be32(reset);
6674	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6675}
6676
6677/**
6678 *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6679 *	@adap: the adapter
6680 *	@mbox: mailbox to use for the FW RESET command (if desired)
6681 *	@force: force uP into RESET even if FW RESET command fails
6682 *
6683 *	Issues a RESET command to firmware (if desired) with a HALT indication
6684 *	and then puts the microprocessor into RESET state.  The RESET command
6685 *	will only be issued if a legitimate mailbox is provided (mbox <=
6686 *	M_PCIE_FW_MASTER).
6687 *
6688 *	This is generally used in order for the host to safely manipulate the
6689 *	adapter without fear of conflicting with whatever the firmware might
6690 *	be doing.  The only way out of this state is to RESTART the firmware
6691 *	...
6692 */
6693int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6694{
6695	int ret = 0;
6696
6697	/*
6698	 * If a legitimate mailbox is provided, issue a RESET command
6699	 * with a HALT indication.
6700	 */
6701	if (mbox <= M_PCIE_FW_MASTER) {
6702		struct fw_reset_cmd c;
6703
6704		memset(&c, 0, sizeof(c));
6705		INIT_CMD(c, RESET, WRITE);
6706		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
6707		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
6708		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6709	}
6710
6711	/*
6712	 * Normally we won't complete the operation if the firmware RESET
6713	 * command fails but if our caller insists we'll go ahead and put the
6714	 * uP into RESET.  This can be useful if the firmware is hung or even
6715	 * missing ...  We'll have to take the risk of putting the uP into
6716	 * RESET without the cooperation of firmware in that case.
6717	 *
6718	 * We also force the firmware's HALT flag to be on in case we bypassed
6719	 * the firmware RESET command above or we're dealing with old firmware
6720	 * which doesn't have the HALT capability.  This will serve as a flag
6721	 * for the incoming firmware to know that it's coming out of a HALT
6722	 * rather than a RESET ... if it's new enough to understand that ...
6723	 */
6724	if (ret == 0 || force) {
6725		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
6726		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
6727				 F_PCIE_FW_HALT);
6728	}
6729
6730	/*
6731	 * And we always return the result of the firmware RESET command
6732	 * even when we force the uP into RESET ...
6733	 */
6734	return ret;
6735}
6736
6737/**
6738 *	t4_fw_restart - restart the firmware by taking the uP out of RESET
6739 *	@adap: the adapter
6740 *	@reset: if we want to do a RESET to restart things
6741 *
6742 *	Restart firmware previously halted by t4_fw_halt().  On successful
6743 *	return the previous PF Master remains as the new PF Master and there
6744 *	is no need to issue a new HELLO command, etc.
6745 *
6746 *	We do this in two ways:
6747 *
6748 *	 1. If we're dealing with newer firmware we'll simply want to take
6749 *	    the chip's microprocessor out of RESET.  This will cause the
6750 *	    firmware to start up from its start vector.  And then we'll loop
6751 *	    until the firmware indicates it's started again (PCIE_FW.HALT
6752 *	    reset to 0) or we timeout.
6753 *
6754 *	 2. If we're dealing with older firmware then we'll need to RESET
6755 *	    the chip since older firmware won't recognize the PCIE_FW.HALT
6756 *	    flag and automatically RESET itself on startup.
6757 */
6758int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6759{
6760	if (reset) {
6761		/*
6762		 * Since we're directing the RESET instead of the firmware
6763		 * doing it automatically, we need to clear the PCIE_FW.HALT
6764		 * bit.
6765		 */
6766		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
6767
6768		/*
6769		 * If we've been given a valid mailbox, first try to get the
6770		 * firmware to do the RESET.  If that works, great and we can
6771		 * return success.  Otherwise, if we haven't been given a
6772		 * valid mailbox or the RESET command failed, fall back to
6773		 * hitting the chip with a hammer.
6774		 */
6775		if (mbox <= M_PCIE_FW_MASTER) {
6776			t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6777			msleep(100);
6778			if (t4_fw_reset(adap, mbox,
6779					F_PIORST | F_PIORSTMODE) == 0)
6780				return 0;
6781		}
6782
6783		t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
6784		msleep(2000);
6785	} else {
6786		int ms;
6787
6788		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
6789		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6790			if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
6791				return FW_SUCCESS;
6792			msleep(100);
6793			ms += 100;
6794		}
6795		return -ETIMEDOUT;
6796	}
6797	return 0;
6798}
6799
6800/**
6801 *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6802 *	@adap: the adapter
6803 *	@mbox: mailbox to use for the FW RESET command (if desired)
6804 *	@fw_data: the firmware image to write
6805 *	@size: image size
6806 *	@force: force upgrade even if firmware doesn't cooperate
6807 *
6808 *	Perform all of the steps necessary for upgrading an adapter's
6809 *	firmware image.  Normally this requires the cooperation of the
6810 *	existing firmware in order to halt all existing activities
6811 *	but if an invalid mailbox token is passed in we skip that step
6812 *	(though we'll still put the adapter microprocessor into RESET in
6813 *	that case).
6814 *
6815 *	On successful return the new firmware will have been loaded and
6816 *	the adapter will have been fully RESET losing all previous setup
6817 *	state.  On unsuccessful return the adapter may be completely hosed ...
6818 *	positive errno indicates that the adapter is ~probably~ intact, a
6819 *	negative errno indicates that things are looking bad ...
6820 */
6821int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6822		  const u8 *fw_data, unsigned int size, int force)
6823{
6824	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6825	unsigned int bootstrap =
6826	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
6827	int reset, ret;
6828
6829	if (!t4_fw_matches_chip(adap, fw_hdr))
6830		return -EINVAL;
6831
6832	if (!bootstrap) {
6833		ret = t4_fw_halt(adap, mbox, force);
6834		if (ret < 0 && !force)
6835			return ret;
6836	}
6837
6838	ret = t4_load_fw(adap, fw_data, size);
6839	if (ret < 0 || bootstrap)
6840		return ret;
6841
6842	/*
6843	 * Older versions of the firmware don't understand the new
6844	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6845	 * restart.  So for newly loaded older firmware we'll have to do the
6846	 * RESET for it so it starts up on a clean slate.  We can tell if
6847	 * the newly loaded firmware will handle this right by checking
6848	 * its header flags to see if it advertises the capability.
6849	 */
6850	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6851	return t4_fw_restart(adap, mbox, reset);
6852}
6853
6854/**
6855 *	t4_fw_initialize - ask FW to initialize the device
6856 *	@adap: the adapter
6857 *	@mbox: mailbox to use for the FW command
6858 *
6859 *	Issues a command to FW to partially initialize the device.  This
6860 *	performs initialization that generally doesn't depend on user input.
6861 */
6862int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6863{
6864	struct fw_initialize_cmd c;
6865
6866	memset(&c, 0, sizeof(c));
6867	INIT_CMD(c, INITIALIZE, WRITE);
6868	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6869}
6870
6871/**
6872 *	t4_query_params_rw - query FW or device parameters
6873 *	@adap: the adapter
6874 *	@mbox: mailbox to use for the FW command
6875 *	@pf: the PF
6876 *	@vf: the VF
6877 *	@nparams: the number of parameters
6878 *	@params: the parameter names
6879 *	@val: the parameter values
6880 *	@rw: Write and read flag
6881 *
6882 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
6883 *	queried at once.
6884 */
6885int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6886		       unsigned int vf, unsigned int nparams, const u32 *params,
6887		       u32 *val, int rw)
6888{
6889	int i, ret;
6890	struct fw_params_cmd c;
6891	__be32 *p = &c.param[0].mnem;
6892
6893	if (nparams > 7)
6894		return -EINVAL;
6895
6896	memset(&c, 0, sizeof(c));
6897	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6898				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
6899				  V_FW_PARAMS_CMD_PFN(pf) |
6900				  V_FW_PARAMS_CMD_VFN(vf));
6901	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6902
6903	for (i = 0; i < nparams; i++) {
6904		*p++ = cpu_to_be32(*params++);
6905		if (rw)
6906			*p = cpu_to_be32(*(val + i));
6907		p++;
6908	}
6909
6910	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6911	if (ret == 0)
6912		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
6913			*val++ = be32_to_cpu(*p);
6914	return ret;
6915}
6916
6917int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6918		    unsigned int vf, unsigned int nparams, const u32 *params,
6919		    u32 *val)
6920{
6921	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6922}
6923
6924/**
6925 *      t4_set_params_timeout - sets FW or device parameters
6926 *      @adap: the adapter
6927 *      @mbox: mailbox to use for the FW command
6928 *      @pf: the PF
6929 *      @vf: the VF
6930 *      @nparams: the number of parameters
6931 *      @params: the parameter names
6932 *      @val: the parameter values
6933 *      @timeout: the timeout time
6934 *
6935 *      Sets the value of FW or device parameters.  Up to 7 parameters can be
6936 *      specified at once.
6937 */
6938int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
6939			  unsigned int pf, unsigned int vf,
6940			  unsigned int nparams, const u32 *params,
6941			  const u32 *val, int timeout)
6942{
6943	struct fw_params_cmd c;
6944	__be32 *p = &c.param[0].mnem;
6945
6946	if (nparams > 7)
6947		return -EINVAL;
6948
6949	memset(&c, 0, sizeof(c));
6950	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
6951				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
6952				  V_FW_PARAMS_CMD_PFN(pf) |
6953				  V_FW_PARAMS_CMD_VFN(vf));
6954	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6955
6956	while (nparams--) {
6957		*p++ = cpu_to_be32(*params++);
6958		*p++ = cpu_to_be32(*val++);
6959	}
6960
6961	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
6962}
6963
6964/**
6965 *	t4_set_params - sets FW or device parameters
6966 *	@adap: the adapter
6967 *	@mbox: mailbox to use for the FW command
6968 *	@pf: the PF
6969 *	@vf: the VF
6970 *	@nparams: the number of parameters
6971 *	@params: the parameter names
6972 *	@val: the parameter values
6973 *
6974 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
6975 *	specified at once.
6976 */
6977int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6978		  unsigned int vf, unsigned int nparams, const u32 *params,
6979		  const u32 *val)
6980{
6981	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6982				     FW_CMD_MAX_TIMEOUT);
6983}
6984
6985/**
6986 *	t4_cfg_pfvf - configure PF/VF resource limits
6987 *	@adap: the adapter
6988 *	@mbox: mailbox to use for the FW command
6989 *	@pf: the PF being configured
6990 *	@vf: the VF being configured
6991 *	@txq: the max number of egress queues
6992 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
6993 *	@rxqi: the max number of interrupt-capable ingress queues
6994 *	@rxq: the max number of interruptless ingress queues
6995 *	@tc: the PCI traffic class
6996 *	@vi: the max number of virtual interfaces
6997 *	@cmask: the channel access rights mask for the PF/VF
6998 *	@pmask: the port access rights mask for the PF/VF
6999 *	@nexact: the maximum number of exact MPS filters
7000 *	@rcaps: read capabilities
7001 *	@wxcaps: write/execute capabilities
7002 *
7003 *	Configures resource limits and capabilities for a physical or virtual
7004 *	function.
7005 */
7006int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7007		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7008		unsigned int rxqi, unsigned int rxq, unsigned int tc,
7009		unsigned int vi, unsigned int cmask, unsigned int pmask,
7010		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7011{
7012	struct fw_pfvf_cmd c;
7013
7014	memset(&c, 0, sizeof(c));
7015	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
7016				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
7017				  V_FW_PFVF_CMD_VFN(vf));
7018	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7019	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
7020				     V_FW_PFVF_CMD_NIQ(rxq));
7021	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
7022				    V_FW_PFVF_CMD_PMASK(pmask) |
7023				    V_FW_PFVF_CMD_NEQ(txq));
7024	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
7025				      V_FW_PFVF_CMD_NVI(vi) |
7026				      V_FW_PFVF_CMD_NEXACTF(nexact));
7027	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
7028				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
7029				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
7030	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7031}
7032
7033/**
7034 *	t4_alloc_vi_func - allocate a virtual interface
7035 *	@adap: the adapter
7036 *	@mbox: mailbox to use for the FW command
7037 *	@port: physical port associated with the VI
7038 *	@pf: the PF owning the VI
7039 *	@vf: the VF owning the VI
7040 *	@nmac: number of MAC addresses needed (1 to 5)
7041 *	@mac: the MAC addresses of the VI
7042 *	@rss_size: size of RSS table slice associated with this VI
7043 *	@portfunc: which Port Application Function MAC Address is desired
7044 *	@idstype: Intrusion Detection Type
7045 *
7046 *	Allocates a virtual interface for the given physical port.  If @mac is
7047 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
7048 *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
7049 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
7050 *	stored consecutively so the space needed is @nmac * 6 bytes.
7051 *	Returns a negative error number or the non-negative VI id.
7052 */
7053int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
7054		     unsigned int port, unsigned int pf, unsigned int vf,
7055		     unsigned int nmac, u8 *mac, u16 *rss_size,
7056		     unsigned int portfunc, unsigned int idstype)
7057{
7058	int ret;
7059	struct fw_vi_cmd c;
7060
7061	memset(&c, 0, sizeof(c));
7062	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
7063				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
7064				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
7065	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
7066	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
7067				     V_FW_VI_CMD_FUNC(portfunc));
7068	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
7069	c.nmac = nmac - 1;
7070	if(!rss_size)
7071		c.norss_rsssize = F_FW_VI_CMD_NORSS;
7072
7073	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7074	if (ret)
7075		return ret;
7076
7077	if (mac) {
7078		memcpy(mac, c.mac, sizeof(c.mac));
7079		switch (nmac) {
7080		case 5:
7081			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7082		case 4:
7083			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7084		case 3:
7085			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7086		case 2:
7087			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
7088		}
7089	}
7090	if (rss_size)
7091		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
7092	return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
7093}
7094
7095/**
7096 *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
7097 *      @adap: the adapter
7098 *      @mbox: mailbox to use for the FW command
7099 *      @port: physical port associated with the VI
7100 *      @pf: the PF owning the VI
7101 *      @vf: the VF owning the VI
7102 *      @nmac: number of MAC addresses needed (1 to 5)
7103 *      @mac: the MAC addresses of the VI
7104 *      @rss_size: size of RSS table slice associated with this VI
7105 *
7106 *	backwards compatible and convieniance routine to allocate a Virtual
7107 *	Interface with a Ethernet Port Application Function and Intrustion
7108 *	Detection System disabled.
7109 */
7110int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7111		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7112		u16 *rss_size)
7113{
7114	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
7115				FW_VI_FUNC_ETH, 0);
7116}
7117
7118/**
7119 * 	t4_free_vi - free a virtual interface
7120 * 	@adap: the adapter
7121 * 	@mbox: mailbox to use for the FW command
7122 * 	@pf: the PF owning the VI
7123 * 	@vf: the VF owning the VI
7124 * 	@viid: virtual interface identifiler
7125 *
7126 * 	Free a previously allocated virtual interface.
7127 */
7128int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7129	       unsigned int vf, unsigned int viid)
7130{
7131	struct fw_vi_cmd c;
7132
7133	memset(&c, 0, sizeof(c));
7134	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
7135				  F_FW_CMD_REQUEST |
7136				  F_FW_CMD_EXEC |
7137				  V_FW_VI_CMD_PFN(pf) |
7138				  V_FW_VI_CMD_VFN(vf));
7139	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
7140	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
7141
7142	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7143}
7144
7145/**
7146 *	t4_set_rxmode - set Rx properties of a virtual interface
7147 *	@adap: the adapter
7148 *	@mbox: mailbox to use for the FW command
7149 *	@viid: the VI id
7150 *	@mtu: the new MTU or -1
7151 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7152 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7153 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7154 *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7155 *	@sleep_ok: if true we may sleep while awaiting command completion
7156 *
7157 *	Sets Rx properties of a virtual interface.
7158 */
7159int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7160		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
7161		  bool sleep_ok)
7162{
7163	struct fw_vi_rxmode_cmd c;
7164
7165	/* convert to FW values */
7166	if (mtu < 0)
7167		mtu = M_FW_VI_RXMODE_CMD_MTU;
7168	if (promisc < 0)
7169		promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
7170	if (all_multi < 0)
7171		all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
7172	if (bcast < 0)
7173		bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
7174	if (vlanex < 0)
7175		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
7176
7177	memset(&c, 0, sizeof(c));
7178	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
7179				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7180				   V_FW_VI_RXMODE_CMD_VIID(viid));
7181	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7182	c.mtu_to_vlanexen =
7183		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
7184			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
7185			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
7186			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
7187			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
7188	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7189}
7190
7191/**
7192 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7193 *	@adap: the adapter
7194 *	@mbox: mailbox to use for the FW command
7195 *	@viid: the VI id
7196 *	@free: if true any existing filters for this VI id are first removed
7197 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
7198 *	@addr: the MAC address(es)
7199 *	@idx: where to store the index of each allocated filter
7200 *	@hash: pointer to hash address filter bitmap
7201 *	@sleep_ok: call is allowed to sleep
7202 *
7203 *	Allocates an exact-match filter for each of the supplied addresses and
7204 *	sets it to the corresponding address.  If @idx is not %NULL it should
7205 *	have at least @naddr entries, each of which will be set to the index of
7206 *	the filter allocated for the corresponding MAC address.  If a filter
7207 *	could not be allocated for an address its index is set to 0xffff.
7208 *	If @hash is not %NULL addresses that fail to allocate an exact filter
7209 *	are hashed and update the hash filter bitmap pointed at by @hash.
7210 *
7211 *	Returns a negative error number or the number of filters allocated.
7212 */
7213int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7214		      unsigned int viid, bool free, unsigned int naddr,
7215		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7216{
7217	int offset, ret = 0;
7218	struct fw_vi_mac_cmd c;
7219	unsigned int nfilters = 0;
7220	unsigned int max_naddr = adap->chip_params->mps_tcam_size;
7221	unsigned int rem = naddr;
7222
7223	if (naddr > max_naddr)
7224		return -EINVAL;
7225
7226	for (offset = 0; offset < naddr ; /**/) {
7227		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7228					 ? rem
7229					 : ARRAY_SIZE(c.u.exact));
7230		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7231						     u.exact[fw_naddr]), 16);
7232		struct fw_vi_mac_exact *p;
7233		int i;
7234
7235		memset(&c, 0, sizeof(c));
7236		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7237					   F_FW_CMD_REQUEST |
7238					   F_FW_CMD_WRITE |
7239					   V_FW_CMD_EXEC(free) |
7240					   V_FW_VI_MAC_CMD_VIID(viid));
7241		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
7242						  V_FW_CMD_LEN16(len16));
7243
7244		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7245			p->valid_to_idx =
7246				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7247					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
7248			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7249		}
7250
7251		/*
7252		 * It's okay if we run out of space in our MAC address arena.
7253		 * Some of the addresses we submit may get stored so we need
7254		 * to run through the reply to see what the results were ...
7255		 */
7256		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7257		if (ret && ret != -FW_ENOMEM)
7258			break;
7259
7260		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7261			u16 index = G_FW_VI_MAC_CMD_IDX(
7262						be16_to_cpu(p->valid_to_idx));
7263
7264			if (idx)
7265				idx[offset+i] = (index >=  max_naddr
7266						 ? 0xffff
7267						 : index);
7268			if (index < max_naddr)
7269				nfilters++;
7270			else if (hash)
7271				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
7272		}
7273
7274		free = false;
7275		offset += fw_naddr;
7276		rem -= fw_naddr;
7277	}
7278
7279	if (ret == 0 || ret == -FW_ENOMEM)
7280		ret = nfilters;
7281	return ret;
7282}
7283
7284/**
7285 *	t4_change_mac - modifies the exact-match filter for a MAC address
7286 *	@adap: the adapter
7287 *	@mbox: mailbox to use for the FW command
7288 *	@viid: the VI id
7289 *	@idx: index of existing filter for old value of MAC address, or -1
7290 *	@addr: the new MAC address value
7291 *	@persist: whether a new MAC allocation should be persistent
7292 *	@add_smt: if true also add the address to the HW SMT
7293 *
7294 *	Modifies an exact-match filter and sets it to the new MAC address if
7295 *	@idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
7296 *	latter case the address is added persistently if @persist is %true.
7297 *
7298 *	Note that in general it is not possible to modify the value of a given
7299 *	filter so the generic way to modify an address filter is to free the one
7300 *	being used by the old address value and allocate a new filter for the
7301 *	new address value.
7302 *
7303 *	Returns a negative error number or the index of the filter with the new
7304 *	MAC value.  Note that this index may differ from @idx.
7305 */
7306int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7307		  int idx, const u8 *addr, bool persist, bool add_smt)
7308{
7309	int ret, mode;
7310	struct fw_vi_mac_cmd c;
7311	struct fw_vi_mac_exact *p = c.u.exact;
7312	unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
7313
7314	if (idx < 0)		/* new allocation */
7315		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7316	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7317
7318	memset(&c, 0, sizeof(c));
7319	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7320				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7321				   V_FW_VI_MAC_CMD_VIID(viid));
7322	c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
7323	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
7324				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
7325				      V_FW_VI_MAC_CMD_IDX(idx));
7326	memcpy(p->macaddr, addr, sizeof(p->macaddr));
7327
7328	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7329	if (ret == 0) {
7330		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
7331		if (ret >= max_mac_addr)
7332			ret = -ENOMEM;
7333	}
7334	return ret;
7335}
7336
7337/**
7338 *	t4_set_addr_hash - program the MAC inexact-match hash filter
7339 *	@adap: the adapter
7340 *	@mbox: mailbox to use for the FW command
7341 *	@viid: the VI id
7342 *	@ucast: whether the hash filter should also match unicast addresses
7343 *	@vec: the value to be written to the hash filter
7344 *	@sleep_ok: call is allowed to sleep
7345 *
7346 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
7347 */
7348int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7349		     bool ucast, u64 vec, bool sleep_ok)
7350{
7351	struct fw_vi_mac_cmd c;
7352	u32 val;
7353
7354	memset(&c, 0, sizeof(c));
7355	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
7356				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
7357				   V_FW_VI_ENABLE_CMD_VIID(viid));
7358	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
7359	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
7360	c.freemacs_to_len16 = cpu_to_be32(val);
7361	c.u.hash.hashvec = cpu_to_be64(vec);
7362	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7363}
7364
7365/**
7366 *      t4_enable_vi_params - enable/disable a virtual interface
7367 *      @adap: the adapter
7368 *      @mbox: mailbox to use for the FW command
7369 *      @viid: the VI id
7370 *      @rx_en: 1=enable Rx, 0=disable Rx
7371 *      @tx_en: 1=enable Tx, 0=disable Tx
7372 *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
7373 *
7374 *      Enables/disables a virtual interface.  Note that setting DCB Enable
7375 *      only makes sense when enabling a Virtual Interface ...
7376 */
7377int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7378			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7379{
7380	struct fw_vi_enable_cmd c;
7381
7382	memset(&c, 0, sizeof(c));
7383	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7384				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7385				   V_FW_VI_ENABLE_CMD_VIID(viid));
7386	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
7387				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
7388				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
7389				     FW_LEN16(c));
7390	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7391}
7392
7393/**
7394 *	t4_enable_vi - enable/disable a virtual interface
7395 *	@adap: the adapter
7396 *	@mbox: mailbox to use for the FW command
7397 *	@viid: the VI id
7398 *	@rx_en: 1=enable Rx, 0=disable Rx
7399 *	@tx_en: 1=enable Tx, 0=disable Tx
7400 *
7401 *	Enables/disables a virtual interface.  Note that setting DCB Enable
7402 *	only makes sense when enabling a Virtual Interface ...
7403 */
7404int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7405		 bool rx_en, bool tx_en)
7406{
7407	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7408}
7409
7410/**
7411 *	t4_identify_port - identify a VI's port by blinking its LED
7412 *	@adap: the adapter
7413 *	@mbox: mailbox to use for the FW command
7414 *	@viid: the VI id
7415 *	@nblinks: how many times to blink LED at 2.5 Hz
7416 *
7417 *	Identifies a VI's port by blinking its LED.
7418 */
7419int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7420		     unsigned int nblinks)
7421{
7422	struct fw_vi_enable_cmd c;
7423
7424	memset(&c, 0, sizeof(c));
7425	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
7426				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7427				   V_FW_VI_ENABLE_CMD_VIID(viid));
7428	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
7429	c.blinkdur = cpu_to_be16(nblinks);
7430	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7431}
7432
7433/**
7434 *	t4_iq_stop - stop an ingress queue and its FLs
7435 *	@adap: the adapter
7436 *	@mbox: mailbox to use for the FW command
7437 *	@pf: the PF owning the queues
7438 *	@vf: the VF owning the queues
7439 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7440 *	@iqid: ingress queue id
7441 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
7442 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
7443 *
7444 *	Stops an ingress queue and its associated FLs, if any.  This causes
7445 *	any current or future data/messages destined for these queues to be
7446 *	tossed.
7447 */
7448int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7449	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
7450	       unsigned int fl0id, unsigned int fl1id)
7451{
7452	struct fw_iq_cmd c;
7453
7454	memset(&c, 0, sizeof(c));
7455	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7456				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7457				  V_FW_IQ_CMD_VFN(vf));
7458	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
7459	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7460	c.iqid = cpu_to_be16(iqid);
7461	c.fl0id = cpu_to_be16(fl0id);
7462	c.fl1id = cpu_to_be16(fl1id);
7463	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7464}
7465
7466/**
7467 *	t4_iq_free - free an ingress queue and its FLs
7468 *	@adap: the adapter
7469 *	@mbox: mailbox to use for the FW command
7470 *	@pf: the PF owning the queues
7471 *	@vf: the VF owning the queues
7472 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7473 *	@iqid: ingress queue id
7474 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
7475 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
7476 *
7477 *	Frees an ingress queue and its associated FLs, if any.
7478 */
7479int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7480	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
7481	       unsigned int fl0id, unsigned int fl1id)
7482{
7483	struct fw_iq_cmd c;
7484
7485	memset(&c, 0, sizeof(c));
7486	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
7487				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
7488				  V_FW_IQ_CMD_VFN(vf));
7489	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
7490	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
7491	c.iqid = cpu_to_be16(iqid);
7492	c.fl0id = cpu_to_be16(fl0id);
7493	c.fl1id = cpu_to_be16(fl1id);
7494	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7495}
7496
7497/**
7498 *	t4_eth_eq_free - free an Ethernet egress queue
7499 *	@adap: the adapter
7500 *	@mbox: mailbox to use for the FW command
7501 *	@pf: the PF owning the queue
7502 *	@vf: the VF owning the queue
7503 *	@eqid: egress queue id
7504 *
7505 *	Frees an Ethernet egress queue.
7506 */
7507int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7508		   unsigned int vf, unsigned int eqid)
7509{
7510	struct fw_eq_eth_cmd c;
7511
7512	memset(&c, 0, sizeof(c));
7513	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
7514				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7515				  V_FW_EQ_ETH_CMD_PFN(pf) |
7516				  V_FW_EQ_ETH_CMD_VFN(vf));
7517	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
7518	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
7519	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7520}
7521
7522/**
7523 *	t4_ctrl_eq_free - free a control egress queue
7524 *	@adap: the adapter
7525 *	@mbox: mailbox to use for the FW command
7526 *	@pf: the PF owning the queue
7527 *	@vf: the VF owning the queue
7528 *	@eqid: egress queue id
7529 *
7530 *	Frees a control egress queue.
7531 */
7532int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7533		    unsigned int vf, unsigned int eqid)
7534{
7535	struct fw_eq_ctrl_cmd c;
7536
7537	memset(&c, 0, sizeof(c));
7538	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
7539				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7540				  V_FW_EQ_CTRL_CMD_PFN(pf) |
7541				  V_FW_EQ_CTRL_CMD_VFN(vf));
7542	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
7543	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
7544	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7545}
7546
7547/**
7548 *	t4_ofld_eq_free - free an offload egress queue
7549 *	@adap: the adapter
7550 *	@mbox: mailbox to use for the FW command
7551 *	@pf: the PF owning the queue
7552 *	@vf: the VF owning the queue
7553 *	@eqid: egress queue id
7554 *
7555 *	Frees a control egress queue.
7556 */
7557int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7558		    unsigned int vf, unsigned int eqid)
7559{
7560	struct fw_eq_ofld_cmd c;
7561
7562	memset(&c, 0, sizeof(c));
7563	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
7564				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
7565				  V_FW_EQ_OFLD_CMD_PFN(pf) |
7566				  V_FW_EQ_OFLD_CMD_VFN(vf));
7567	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
7568	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
7569	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7570}
7571
7572/**
7573 *	t4_link_down_rc_str - return a string for a Link Down Reason Code
7574 *	@link_down_rc: Link Down Reason Code
7575 *
7576 *	Returns a string representation of the Link Down Reason Code.
7577 */
7578const char *t4_link_down_rc_str(unsigned char link_down_rc)
7579{
7580	static const char *reason[] = {
7581		"Link Down",
7582		"Remote Fault",
7583		"Auto-negotiation Failure",
7584		"Reserved3",
7585		"Insufficient Airflow",
7586		"Unable To Determine Reason",
7587		"No RX Signal Detected",
7588		"Reserved7",
7589	};
7590
7591	if (link_down_rc >= ARRAY_SIZE(reason))
7592		return "Bad Reason Code";
7593
7594	return reason[link_down_rc];
7595}
7596
7597/*
7598 * Updates all fields owned by the common code in port_info and link_config
7599 * based on information provided by the firmware.  Does not touch any
7600 * requested_* field.
7601 */
7602static void handle_port_info(struct port_info *pi, const struct fw_port_info *p)
7603{
7604	struct link_config *lc = &pi->link_cfg;
7605	int speed;
7606	unsigned char fc, fec;
7607	u32 stat = be32_to_cpu(p->lstatus_to_modtype);
7608
7609	pi->port_type = G_FW_PORT_CMD_PTYPE(stat);
7610	pi->mod_type = G_FW_PORT_CMD_MODTYPE(stat);
7611	pi->mdio_addr = stat & F_FW_PORT_CMD_MDIOCAP ?
7612	    G_FW_PORT_CMD_MDIOADDR(stat) : -1;
7613
7614	lc->supported = be16_to_cpu(p->pcap);
7615	lc->advertising = be16_to_cpu(p->acap);
7616	lc->lp_advertising = be16_to_cpu(p->lpacap);
7617	lc->link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
7618	lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
7619
7620	speed = 0;
7621	if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
7622		speed = 100;
7623	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
7624		speed = 1000;
7625	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
7626		speed = 10000;
7627	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
7628		speed = 25000;
7629	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
7630		speed = 40000;
7631	else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
7632		speed = 100000;
7633	lc->speed = speed;
7634
7635	fc = 0;
7636	if (stat & F_FW_PORT_CMD_RXPAUSE)
7637		fc |= PAUSE_RX;
7638	if (stat & F_FW_PORT_CMD_TXPAUSE)
7639		fc |= PAUSE_TX;
7640	lc->fc = fc;
7641
7642	fec = 0;
7643	if (lc->advertising & FW_PORT_CAP_FEC_RS)
7644		fec = FEC_RS;
7645	else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS)
7646		fec = FEC_BASER_RS;
7647	lc->fec = fec;
7648}
7649
7650/**
7651 *	t4_update_port_info - retrieve and update port information if changed
7652 *	@pi: the port_info
7653 *
7654 *	We issue a Get Port Information Command to the Firmware and, if
7655 *	successful, we check to see if anything is different from what we
7656 *	last recorded and update things accordingly.
7657 */
7658 int t4_update_port_info(struct port_info *pi)
7659 {
7660	struct fw_port_cmd port_cmd;
7661	int ret;
7662
7663	memset(&port_cmd, 0, sizeof port_cmd);
7664	port_cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
7665					    F_FW_CMD_REQUEST | F_FW_CMD_READ |
7666					    V_FW_PORT_CMD_PORTID(pi->tx_chan));
7667	port_cmd.action_to_len16 = cpu_to_be32(
7668		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
7669		FW_LEN16(port_cmd));
7670	ret = t4_wr_mbox_ns(pi->adapter, pi->adapter->mbox,
7671			 &port_cmd, sizeof(port_cmd), &port_cmd);
7672	if (ret)
7673		return ret;
7674
7675	handle_port_info(pi, &port_cmd.u.info);
7676	return 0;
7677}
7678
7679/**
7680 *	t4_handle_fw_rpl - process a FW reply message
7681 *	@adap: the adapter
7682 *	@rpl: start of the FW message
7683 *
7684 *	Processes a FW message, such as link state change messages.
7685 */
7686int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7687{
7688	u8 opcode = *(const u8 *)rpl;
7689	const struct fw_port_cmd *p = (const void *)rpl;
7690	unsigned int action =
7691			G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
7692
7693	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7694		/* link/module state change message */
7695		int i, old_ptype, old_mtype;
7696		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
7697		struct port_info *pi = NULL;
7698		struct link_config *lc, *old_lc;
7699
7700		for_each_port(adap, i) {
7701			pi = adap2pinfo(adap, i);
7702			if (pi->tx_chan == chan)
7703				break;
7704		}
7705
7706		lc = &pi->link_cfg;
7707		PORT_LOCK(pi);
7708		old_lc = &pi->old_link_cfg;
7709		old_ptype = pi->port_type;
7710		old_mtype = pi->mod_type;
7711		handle_port_info(pi, &p->u.info);
7712		PORT_UNLOCK(pi);
7713		if (old_ptype != pi->port_type || old_mtype != pi->mod_type) {
7714			t4_os_portmod_changed(pi);
7715		}
7716		PORT_LOCK(pi);
7717		if (old_lc->link_ok != lc->link_ok ||
7718		    old_lc->speed != lc->speed ||
7719		    old_lc->fec != lc->fec ||
7720		    old_lc->fc != lc->fc) {
7721			t4_os_link_changed(pi);
7722			*old_lc = *lc;
7723		}
7724		PORT_UNLOCK(pi);
7725	} else {
7726		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
7727		return -EINVAL;
7728	}
7729	return 0;
7730}
7731
7732/**
7733 *	get_pci_mode - determine a card's PCI mode
7734 *	@adapter: the adapter
7735 *	@p: where to store the PCI settings
7736 *
7737 *	Determines a card's PCI mode and associated parameters, such as speed
7738 *	and width.
7739 */
7740static void get_pci_mode(struct adapter *adapter,
7741				   struct pci_params *p)
7742{
7743	u16 val;
7744	u32 pcie_cap;
7745
7746	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7747	if (pcie_cap) {
7748		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_LNKSTA, &val);
7749		p->speed = val & PCI_EXP_LNKSTA_CLS;
7750		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7751	}
7752}
7753
7754struct flash_desc {
7755	u32 vendor_and_model_id;
7756	u32 size_mb;
7757};
7758
7759int t4_get_flash_params(struct adapter *adapter)
7760{
7761	/*
7762	 * Table for non-standard supported Flash parts.  Note, all Flash
7763	 * parts must have 64KB sectors.
7764	 */
7765	static struct flash_desc supported_flash[] = {
7766		{ 0x00150201, 4 << 20 },	/* Spansion 4MB S25FL032P */
7767	};
7768
7769	int ret;
7770	u32 flashid = 0;
7771	unsigned int part, manufacturer;
7772	unsigned int density, size = 0;
7773
7774
7775	/*
7776	 * Issue a Read ID Command to the Flash part.  We decode supported
7777	 * Flash parts and their sizes from this.  There's a newer Query
7778	 * Command which can retrieve detailed geometry information but many
7779	 * Flash parts don't support it.
7780	 */
7781	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
7782	if (!ret)
7783		ret = sf1_read(adapter, 3, 0, 1, &flashid);
7784	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
7785	if (ret < 0)
7786		return ret;
7787
7788	/*
7789	 * Check to see if it's one of our non-standard supported Flash parts.
7790	 */
7791	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
7792		if (supported_flash[part].vendor_and_model_id == flashid) {
7793			adapter->params.sf_size =
7794				supported_flash[part].size_mb;
7795			adapter->params.sf_nsec =
7796				adapter->params.sf_size / SF_SEC_SIZE;
7797			goto found;
7798		}
7799
7800	/*
7801	 * Decode Flash part size.  The code below looks repetative with
7802	 * common encodings, but that's not guaranteed in the JEDEC
7803	 * specification for the Read JADEC ID command.  The only thing that
7804	 * we're guaranteed by the JADEC specification is where the
7805	 * Manufacturer ID is in the returned result.  After that each
7806	 * Manufacturer ~could~ encode things completely differently.
7807	 * Note, all Flash parts must have 64KB sectors.
7808	 */
7809	manufacturer = flashid & 0xff;
7810	switch (manufacturer) {
7811	case 0x20: /* Micron/Numonix */
7812		/*
7813		 * This Density -> Size decoding table is taken from Micron
7814		 * Data Sheets.
7815		 */
7816		density = (flashid >> 16) & 0xff;
7817		switch (density) {
7818		case 0x14: size = 1 << 20; break; /*   1MB */
7819		case 0x15: size = 1 << 21; break; /*   2MB */
7820		case 0x16: size = 1 << 22; break; /*   4MB */
7821		case 0x17: size = 1 << 23; break; /*   8MB */
7822		case 0x18: size = 1 << 24; break; /*  16MB */
7823		case 0x19: size = 1 << 25; break; /*  32MB */
7824		case 0x20: size = 1 << 26; break; /*  64MB */
7825		case 0x21: size = 1 << 27; break; /* 128MB */
7826		case 0x22: size = 1 << 28; break; /* 256MB */
7827		}
7828		break;
7829
7830	case 0x9d: /* ISSI -- Integrated Silicon Solution, Inc. */
7831		/*
7832		 * This Density -> Size decoding table is taken from ISSI
7833		 * Data Sheets.
7834		 */
7835		density = (flashid >> 16) & 0xff;
7836		switch (density) {
7837		case 0x16: size = 1 << 25; break; /*  32MB */
7838		case 0x17: size = 1 << 26; break; /*  64MB */
7839		}
7840		break;
7841
7842	case 0xc2: /* Macronix */
7843		/*
7844		 * This Density -> Size decoding table is taken from Macronix
7845		 * Data Sheets.
7846		 */
7847		density = (flashid >> 16) & 0xff;
7848		switch (density) {
7849		case 0x17: size = 1 << 23; break; /*   8MB */
7850		case 0x18: size = 1 << 24; break; /*  16MB */
7851		}
7852		break;
7853
7854	case 0xef: /* Winbond */
7855		/*
7856		 * This Density -> Size decoding table is taken from Winbond
7857		 * Data Sheets.
7858		 */
7859		density = (flashid >> 16) & 0xff;
7860		switch (density) {
7861		case 0x17: size = 1 << 23; break; /*   8MB */
7862		case 0x18: size = 1 << 24; break; /*  16MB */
7863		}
7864		break;
7865	}
7866
7867	/* If we didn't recognize the FLASH part, that's no real issue: the
7868	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
7869	 * use a FLASH part which is at least 4MB in size and has 64KB
7870	 * sectors.  The unrecognized FLASH part is likely to be much larger
7871	 * than 4MB, but that's all we really need.
7872	 */
7873	if (size == 0) {
7874		CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
7875		size = 1 << 22;
7876	}
7877
7878	/*
7879	 * Store decoded Flash size and fall through into vetting code.
7880	 */
7881	adapter->params.sf_size = size;
7882	adapter->params.sf_nsec = size / SF_SEC_SIZE;
7883
7884 found:
7885	/*
7886	 * We should ~probably~ reject adapters with FLASHes which are too
7887	 * small but we have some legacy FPGAs with small FLASHes that we'd
7888	 * still like to use.  So instead we emit a scary message ...
7889	 */
7890	if (adapter->params.sf_size < FLASH_MIN_SIZE)
7891		CH_WARN(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
7892			flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
7893
7894	return 0;
7895}
7896
7897static void set_pcie_completion_timeout(struct adapter *adapter,
7898						  u8 range)
7899{
7900	u16 val;
7901	u32 pcie_cap;
7902
7903	pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
7904	if (pcie_cap) {
7905		t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
7906		val &= 0xfff0;
7907		val |= range ;
7908		t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
7909	}
7910}
7911
7912const struct chip_params *t4_get_chip_params(int chipid)
7913{
7914	static const struct chip_params chip_params[] = {
7915		{
7916			/* T4 */
7917			.nchan = NCHAN,
7918			.pm_stats_cnt = PM_NSTATS,
7919			.cng_ch_bits_log = 2,
7920			.nsched_cls = 15,
7921			.cim_num_obq = CIM_NUM_OBQ,
7922			.mps_rplc_size = 128,
7923			.vfcount = 128,
7924			.sge_fl_db = F_DBPRIO,
7925			.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
7926		},
7927		{
7928			/* T5 */
7929			.nchan = NCHAN,
7930			.pm_stats_cnt = PM_NSTATS,
7931			.cng_ch_bits_log = 2,
7932			.nsched_cls = 16,
7933			.cim_num_obq = CIM_NUM_OBQ_T5,
7934			.mps_rplc_size = 128,
7935			.vfcount = 128,
7936			.sge_fl_db = F_DBPRIO | F_DBTYPE,
7937			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7938		},
7939		{
7940			/* T6 */
7941			.nchan = T6_NCHAN,
7942			.pm_stats_cnt = T6_PM_NSTATS,
7943			.cng_ch_bits_log = 3,
7944			.nsched_cls = 16,
7945			.cim_num_obq = CIM_NUM_OBQ_T5,
7946			.mps_rplc_size = 256,
7947			.vfcount = 256,
7948			.sge_fl_db = 0,
7949			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
7950		},
7951	};
7952
7953	chipid -= CHELSIO_T4;
7954	if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
7955		return NULL;
7956
7957	return &chip_params[chipid];
7958}
7959
7960/**
7961 *	t4_prep_adapter - prepare SW and HW for operation
7962 *	@adapter: the adapter
7963 *	@buf: temporary space of at least VPD_LEN size provided by the caller.
7964 *
7965 *	Initialize adapter SW state for the various HW modules, set initial
7966 *	values for some adapter tunables, take PHYs out of reset, and
7967 *	initialize the MDIO interface.
7968 */
7969int t4_prep_adapter(struct adapter *adapter, u8 *buf)
7970{
7971	int ret;
7972	uint16_t device_id;
7973	uint32_t pl_rev;
7974
7975	get_pci_mode(adapter, &adapter->params.pci);
7976
7977	pl_rev = t4_read_reg(adapter, A_PL_REV);
7978	adapter->params.chipid = G_CHIPID(pl_rev);
7979	adapter->params.rev = G_REV(pl_rev);
7980	if (adapter->params.chipid == 0) {
7981		/* T4 did not have chipid in PL_REV (T5 onwards do) */
7982		adapter->params.chipid = CHELSIO_T4;
7983
7984		/* T4A1 chip is not supported */
7985		if (adapter->params.rev == 1) {
7986			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
7987			return -EINVAL;
7988		}
7989	}
7990
7991	adapter->chip_params = t4_get_chip_params(chip_id(adapter));
7992	if (adapter->chip_params == NULL)
7993		return -EINVAL;
7994
7995	adapter->params.pci.vpd_cap_addr =
7996	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
7997
7998	ret = t4_get_flash_params(adapter);
7999	if (ret < 0)
8000		return ret;
8001
8002	ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
8003	if (ret < 0)
8004		return ret;
8005
8006	/* Cards with real ASICs have the chipid in the PCIe device id */
8007	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
8008	if (device_id >> 12 == chip_id(adapter))
8009		adapter->params.cim_la_size = CIMLA_SIZE;
8010	else {
8011		/* FPGA */
8012		adapter->params.fpga = 1;
8013		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
8014	}
8015
8016	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8017
8018	/*
8019	 * Default port and clock for debugging in case we can't reach FW.
8020	 */
8021	adapter->params.nports = 1;
8022	adapter->params.portvec = 1;
8023	adapter->params.vpd.cclk = 50000;
8024
8025	/* Set pci completion timeout value to 4 seconds. */
8026	set_pcie_completion_timeout(adapter, 0xd);
8027	return 0;
8028}
8029
8030/**
8031 *	t4_shutdown_adapter - shut down adapter, host & wire
8032 *	@adapter: the adapter
8033 *
8034 *	Perform an emergency shutdown of the adapter and stop it from
8035 *	continuing any further communication on the ports or DMA to the
8036 *	host.  This is typically used when the adapter and/or firmware
8037 *	have crashed and we want to prevent any further accidental
8038 *	communication with the rest of the world.  This will also force
8039 *	the port Link Status to go down -- if register writes work --
8040 *	which should help our peers figure out that we're down.
8041 */
8042int t4_shutdown_adapter(struct adapter *adapter)
8043{
8044	int port;
8045
8046	t4_intr_disable(adapter);
8047	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
8048	for_each_port(adapter, port) {
8049		u32 a_port_cfg = is_t4(adapter) ?
8050				 PORT_REG(port, A_XGMAC_PORT_CFG) :
8051				 T5_PORT_REG(port, A_MAC_PORT_CFG);
8052
8053		t4_write_reg(adapter, a_port_cfg,
8054			     t4_read_reg(adapter, a_port_cfg)
8055			     & ~V_SIGNAL_DET(1));
8056	}
8057	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
8058
8059	return 0;
8060}
8061
8062/**
8063 *	t4_init_devlog_params - initialize adapter->params.devlog
8064 *	@adap: the adapter
8065 *	@fw_attach: whether we can talk to the firmware
8066 *
8067 *	Initialize various fields of the adapter's Firmware Device Log
8068 *	Parameters structure.
8069 */
8070int t4_init_devlog_params(struct adapter *adap, int fw_attach)
8071{
8072	struct devlog_params *dparams = &adap->params.devlog;
8073	u32 pf_dparams;
8074	unsigned int devlog_meminfo;
8075	struct fw_devlog_cmd devlog_cmd;
8076	int ret;
8077
8078	/* If we're dealing with newer firmware, the Device Log Paramerters
8079	 * are stored in a designated register which allows us to access the
8080	 * Device Log even if we can't talk to the firmware.
8081	 */
8082	pf_dparams =
8083		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
8084	if (pf_dparams) {
8085		unsigned int nentries, nentries128;
8086
8087		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
8088		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
8089
8090		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
8091		nentries = (nentries128 + 1) * 128;
8092		dparams->size = nentries * sizeof(struct fw_devlog_e);
8093
8094		return 0;
8095	}
8096
8097	/*
8098	 * For any failing returns ...
8099	 */
8100	memset(dparams, 0, sizeof *dparams);
8101
8102	/*
8103	 * If we can't talk to the firmware, there's really nothing we can do
8104	 * at this point.
8105	 */
8106	if (!fw_attach)
8107		return -ENXIO;
8108
8109	/* Otherwise, ask the firmware for it's Device Log Parameters.
8110	 */
8111	memset(&devlog_cmd, 0, sizeof devlog_cmd);
8112	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
8113					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
8114	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8115	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8116			 &devlog_cmd);
8117	if (ret)
8118		return ret;
8119
8120	devlog_meminfo =
8121		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8122	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
8123	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
8124	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8125
8126	return 0;
8127}
8128
8129/**
8130 *	t4_init_sge_params - initialize adap->params.sge
8131 *	@adapter: the adapter
8132 *
8133 *	Initialize various fields of the adapter's SGE Parameters structure.
8134 */
8135int t4_init_sge_params(struct adapter *adapter)
8136{
8137	u32 r;
8138	struct sge_params *sp = &adapter->params.sge;
8139	unsigned i, tscale = 1;
8140
8141	r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
8142	sp->counter_val[0] = G_THRESHOLD_0(r);
8143	sp->counter_val[1] = G_THRESHOLD_1(r);
8144	sp->counter_val[2] = G_THRESHOLD_2(r);
8145	sp->counter_val[3] = G_THRESHOLD_3(r);
8146
8147	if (chip_id(adapter) >= CHELSIO_T6) {
8148		r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
8149		tscale = G_TSCALE(r);
8150		if (tscale == 0)
8151			tscale = 1;
8152		else
8153			tscale += 2;
8154	}
8155
8156	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
8157	sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
8158	sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
8159	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
8160	sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
8161	sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
8162	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
8163	sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
8164	sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
8165
8166	r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
8167	sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
8168	if (is_t4(adapter))
8169		sp->fl_starve_threshold2 = sp->fl_starve_threshold;
8170	else if (is_t5(adapter))
8171		sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
8172	else
8173		sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
8174
8175	/* egress queues: log2 of # of doorbells per BAR2 page */
8176	r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
8177	r >>= S_QUEUESPERPAGEPF0 +
8178	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8179	sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
8180
8181	/* ingress queues: log2 of # of doorbells per BAR2 page */
8182	r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
8183	r >>= S_QUEUESPERPAGEPF0 +
8184	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
8185	sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
8186
8187	r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
8188	r >>= S_HOSTPAGESIZEPF0 +
8189	    (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
8190	sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
8191
8192	r = t4_read_reg(adapter, A_SGE_CONTROL);
8193	sp->sge_control = r;
8194	sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
8195	sp->fl_pktshift = G_PKTSHIFT(r);
8196	if (chip_id(adapter) <= CHELSIO_T5) {
8197		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8198		    X_INGPADBOUNDARY_SHIFT);
8199	} else {
8200		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
8201		    X_T6_INGPADBOUNDARY_SHIFT);
8202	}
8203	if (is_t4(adapter))
8204		sp->pack_boundary = sp->pad_boundary;
8205	else {
8206		r = t4_read_reg(adapter, A_SGE_CONTROL2);
8207		if (G_INGPACKBOUNDARY(r) == 0)
8208			sp->pack_boundary = 16;
8209		else
8210			sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
8211	}
8212	for (i = 0; i < SGE_FLBUF_SIZES; i++)
8213		sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
8214		    A_SGE_FL_BUFFER_SIZE0 + (4 * i));
8215
8216	return 0;
8217}
8218
8219/*
8220 * Read and cache the adapter's compressed filter mode and ingress config.
8221 */
8222static void read_filter_mode_and_ingress_config(struct adapter *adap,
8223    bool sleep_ok)
8224{
8225	struct tp_params *tpp = &adap->params.tp;
8226
8227	t4_tp_pio_read(adap, &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP,
8228	    sleep_ok);
8229	t4_tp_pio_read(adap, &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG,
8230	    sleep_ok);
8231
8232	/*
8233	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8234	 * shift positions of several elements of the Compressed Filter Tuple
8235	 * for this adapter which we need frequently ...
8236	 */
8237	tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
8238	tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
8239	tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
8240	tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
8241	tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
8242	tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
8243	tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
8244	tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
8245	tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
8246	tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
8247
8248	/*
8249	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8250	 * represents the presense of an Outer VLAN instead of a VNIC ID.
8251	 */
8252	if ((tpp->ingress_config & F_VNIC) == 0)
8253		tpp->vnic_shift = -1;
8254}
8255
8256/**
8257 *      t4_init_tp_params - initialize adap->params.tp
8258 *      @adap: the adapter
8259 *
8260 *      Initialize various fields of the adapter's TP Parameters structure.
8261 */
8262int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8263{
8264	int chan;
8265	u32 v;
8266	struct tp_params *tpp = &adap->params.tp;
8267
8268	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
8269	tpp->tre = G_TIMERRESOLUTION(v);
8270	tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
8271
8272	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8273	for (chan = 0; chan < MAX_NCHAN; chan++)
8274		tpp->tx_modq[chan] = chan;
8275
8276	read_filter_mode_and_ingress_config(adap, sleep_ok);
8277
8278	/*
8279	 * Cache a mask of the bits that represent the error vector portion of
8280	 * rx_pkt.err_vec.  T6+ can use a compressed error vector to make room
8281	 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
8282	 */
8283	tpp->err_vec_mask = htobe16(0xffff);
8284	if (chip_id(adap) > CHELSIO_T5) {
8285		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
8286		if (v & F_CRXPKTENC) {
8287			tpp->err_vec_mask =
8288			    htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
8289		}
8290	}
8291
8292	return 0;
8293}
8294
8295/**
8296 *      t4_filter_field_shift - calculate filter field shift
8297 *      @adap: the adapter
8298 *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8299 *
8300 *      Return the shift position of a filter field within the Compressed
8301 *      Filter Tuple.  The filter field is specified via its selection bit
8302 *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
8303 */
8304int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8305{
8306	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8307	unsigned int sel;
8308	int field_shift;
8309
8310	if ((filter_mode & filter_sel) == 0)
8311		return -1;
8312
8313	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8314		switch (filter_mode & sel) {
8315		case F_FCOE:
8316			field_shift += W_FT_FCOE;
8317			break;
8318		case F_PORT:
8319			field_shift += W_FT_PORT;
8320			break;
8321		case F_VNIC_ID:
8322			field_shift += W_FT_VNIC_ID;
8323			break;
8324		case F_VLAN:
8325			field_shift += W_FT_VLAN;
8326			break;
8327		case F_TOS:
8328			field_shift += W_FT_TOS;
8329			break;
8330		case F_PROTOCOL:
8331			field_shift += W_FT_PROTOCOL;
8332			break;
8333		case F_ETHERTYPE:
8334			field_shift += W_FT_ETHERTYPE;
8335			break;
8336		case F_MACMATCH:
8337			field_shift += W_FT_MACMATCH;
8338			break;
8339		case F_MPSHITTYPE:
8340			field_shift += W_FT_MPSHITTYPE;
8341			break;
8342		case F_FRAGMENTATION:
8343			field_shift += W_FT_FRAGMENTATION;
8344			break;
8345		}
8346	}
8347	return field_shift;
8348}
8349
8350int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
8351{
8352	u8 addr[6];
8353	int ret, i, j;
8354	u16 rss_size;
8355	struct port_info *p = adap2pinfo(adap, port_id);
8356	u32 param, val;
8357
8358	for (i = 0, j = -1; i <= p->port_id; i++) {
8359		do {
8360			j++;
8361		} while ((adap->params.portvec & (1 << j)) == 0);
8362	}
8363
8364	if (!(adap->flags & IS_VF) ||
8365	    adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
8366 		t4_update_port_info(p);
8367	}
8368
8369	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
8370	if (ret < 0)
8371		return ret;
8372
8373	p->vi[0].viid = ret;
8374	if (chip_id(adap) <= CHELSIO_T5)
8375		p->vi[0].smt_idx = (ret & 0x7f) << 1;
8376	else
8377		p->vi[0].smt_idx = (ret & 0x7f);
8378	p->tx_chan = j;
8379	p->rx_chan_map = t4_get_mps_bg_map(adap, j);
8380	p->lport = j;
8381	p->vi[0].rss_size = rss_size;
8382	t4_os_set_hw_addr(p, addr);
8383
8384	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8385	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
8386	    V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
8387	ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
8388	if (ret)
8389		p->vi[0].rss_base = 0xffff;
8390	else {
8391		/* MPASS((val >> 16) == rss_size); */
8392		p->vi[0].rss_base = val & 0xffff;
8393	}
8394
8395	return 0;
8396}
8397
8398/**
8399 *	t4_read_cimq_cfg - read CIM queue configuration
8400 *	@adap: the adapter
8401 *	@base: holds the queue base addresses in bytes
8402 *	@size: holds the queue sizes in bytes
8403 *	@thres: holds the queue full thresholds in bytes
8404 *
8405 *	Returns the current configuration of the CIM queues, starting with
8406 *	the IBQs, then the OBQs.
8407 */
8408void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
8409{
8410	unsigned int i, v;
8411	int cim_num_obq = adap->chip_params->cim_num_obq;
8412
8413	for (i = 0; i < CIM_NUM_IBQ; i++) {
8414		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
8415			     V_QUENUMSELECT(i));
8416		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8417		/* value is in 256-byte units */
8418		*base++ = G_CIMQBASE(v) * 256;
8419		*size++ = G_CIMQSIZE(v) * 256;
8420		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
8421	}
8422	for (i = 0; i < cim_num_obq; i++) {
8423		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8424			     V_QUENUMSELECT(i));
8425		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8426		/* value is in 256-byte units */
8427		*base++ = G_CIMQBASE(v) * 256;
8428		*size++ = G_CIMQSIZE(v) * 256;
8429	}
8430}
8431
8432/**
8433 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
8434 *	@adap: the adapter
8435 *	@qid: the queue index
8436 *	@data: where to store the queue contents
8437 *	@n: capacity of @data in 32-bit words
8438 *
8439 *	Reads the contents of the selected CIM queue starting at address 0 up
8440 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
8441 *	error and the number of 32-bit words actually read on success.
8442 */
8443int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8444{
8445	int i, err, attempts;
8446	unsigned int addr;
8447	const unsigned int nwords = CIM_IBQ_SIZE * 4;
8448
8449	if (qid > 5 || (n & 3))
8450		return -EINVAL;
8451
8452	addr = qid * nwords;
8453	if (n > nwords)
8454		n = nwords;
8455
8456	/* It might take 3-10ms before the IBQ debug read access is allowed.
8457	 * Wait for 1 Sec with a delay of 1 usec.
8458	 */
8459	attempts = 1000000;
8460
8461	for (i = 0; i < n; i++, addr++) {
8462		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
8463			     F_IBQDBGEN);
8464		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
8465				      attempts, 1);
8466		if (err)
8467			return err;
8468		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
8469	}
8470	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
8471	return i;
8472}
8473
8474/**
8475 *	t4_read_cim_obq - read the contents of a CIM outbound queue
8476 *	@adap: the adapter
8477 *	@qid: the queue index
8478 *	@data: where to store the queue contents
8479 *	@n: capacity of @data in 32-bit words
8480 *
8481 *	Reads the contents of the selected CIM queue starting at address 0 up
8482 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
8483 *	error and the number of 32-bit words actually read on success.
8484 */
8485int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
8486{
8487	int i, err;
8488	unsigned int addr, v, nwords;
8489	int cim_num_obq = adap->chip_params->cim_num_obq;
8490
8491	if ((qid > (cim_num_obq - 1)) || (n & 3))
8492		return -EINVAL;
8493
8494	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
8495		     V_QUENUMSELECT(qid));
8496	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
8497
8498	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
8499	nwords = G_CIMQSIZE(v) * 64;  /* same */
8500	if (n > nwords)
8501		n = nwords;
8502
8503	for (i = 0; i < n; i++, addr++) {
8504		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
8505			     F_OBQDBGEN);
8506		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
8507				      2, 1);
8508		if (err)
8509			return err;
8510		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
8511	}
8512	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
8513	return i;
8514}
8515
8516enum {
8517	CIM_QCTL_BASE     = 0,
8518	CIM_CTL_BASE      = 0x2000,
8519	CIM_PBT_ADDR_BASE = 0x2800,
8520	CIM_PBT_LRF_BASE  = 0x3000,
8521	CIM_PBT_DATA_BASE = 0x3800
8522};
8523
8524/**
8525 *	t4_cim_read - read a block from CIM internal address space
8526 *	@adap: the adapter
8527 *	@addr: the start address within the CIM address space
8528 *	@n: number of words to read
8529 *	@valp: where to store the result
8530 *
8531 *	Reads a block of 4-byte words from the CIM intenal address space.
8532 */
8533int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
8534		unsigned int *valp)
8535{
8536	int ret = 0;
8537
8538	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8539		return -EBUSY;
8540
8541	for ( ; !ret && n--; addr += 4) {
8542		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
8543		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8544				      0, 5, 2);
8545		if (!ret)
8546			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
8547	}
8548	return ret;
8549}
8550
8551/**
8552 *	t4_cim_write - write a block into CIM internal address space
8553 *	@adap: the adapter
8554 *	@addr: the start address within the CIM address space
8555 *	@n: number of words to write
8556 *	@valp: set of values to write
8557 *
8558 *	Writes a block of 4-byte words into the CIM intenal address space.
8559 */
8560int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8561		 const unsigned int *valp)
8562{
8563	int ret = 0;
8564
8565	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
8566		return -EBUSY;
8567
8568	for ( ; !ret && n--; addr += 4) {
8569		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
8570		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
8571		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
8572				      0, 5, 2);
8573	}
8574	return ret;
8575}
8576
8577static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8578			 unsigned int val)
8579{
8580	return t4_cim_write(adap, addr, 1, &val);
8581}
8582
8583/**
8584 *	t4_cim_ctl_read - read a block from CIM control region
8585 *	@adap: the adapter
8586 *	@addr: the start address within the CIM control region
8587 *	@n: number of words to read
8588 *	@valp: where to store the result
8589 *
8590 *	Reads a block of 4-byte words from the CIM control region.
8591 */
8592int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
8593		    unsigned int *valp)
8594{
8595	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
8596}
8597
8598/**
8599 *	t4_cim_read_la - read CIM LA capture buffer
8600 *	@adap: the adapter
8601 *	@la_buf: where to store the LA data
8602 *	@wrptr: the HW write pointer within the capture buffer
8603 *
8604 *	Reads the contents of the CIM LA buffer with the most recent entry at
8605 *	the end	of the returned data and with the entry at @wrptr first.
8606 *	We try to leave the LA in the running state we find it in.
8607 */
8608int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8609{
8610	int i, ret;
8611	unsigned int cfg, val, idx;
8612
8613	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
8614	if (ret)
8615		return ret;
8616
8617	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
8618		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
8619		if (ret)
8620			return ret;
8621	}
8622
8623	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8624	if (ret)
8625		goto restart;
8626
8627	idx = G_UPDBGLAWRPTR(val);
8628	if (wrptr)
8629		*wrptr = idx;
8630
8631	for (i = 0; i < adap->params.cim_la_size; i++) {
8632		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8633				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
8634		if (ret)
8635			break;
8636		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
8637		if (ret)
8638			break;
8639		if (val & F_UPDBGLARDEN) {
8640			ret = -ETIMEDOUT;
8641			break;
8642		}
8643		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
8644		if (ret)
8645			break;
8646
8647		/* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
8648		idx = (idx + 1) & M_UPDBGLARDPTR;
8649		/*
8650		 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
8651		 * identify the 32-bit portion of the full 312-bit data
8652		 */
8653		if (is_t6(adap))
8654			while ((idx & 0xf) > 9)
8655				idx = (idx + 1) % M_UPDBGLARDPTR;
8656	}
8657restart:
8658	if (cfg & F_UPDBGLAEN) {
8659		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
8660				      cfg & ~F_UPDBGLARDEN);
8661		if (!ret)
8662			ret = r;
8663	}
8664	return ret;
8665}
8666
8667/**
8668 *	t4_tp_read_la - read TP LA capture buffer
8669 *	@adap: the adapter
8670 *	@la_buf: where to store the LA data
8671 *	@wrptr: the HW write pointer within the capture buffer
8672 *
8673 *	Reads the contents of the TP LA buffer with the most recent entry at
8674 *	the end	of the returned data and with the entry at @wrptr first.
8675 *	We leave the LA in the running state we find it in.
8676 */
8677void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8678{
8679	bool last_incomplete;
8680	unsigned int i, cfg, val, idx;
8681
8682	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
8683	if (cfg & F_DBGLAENABLE)			/* freeze LA */
8684		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8685			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
8686
8687	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
8688	idx = G_DBGLAWPTR(val);
8689	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
8690	if (last_incomplete)
8691		idx = (idx + 1) & M_DBGLARPTR;
8692	if (wrptr)
8693		*wrptr = idx;
8694
8695	val &= 0xffff;
8696	val &= ~V_DBGLARPTR(M_DBGLARPTR);
8697	val |= adap->params.tp.la_mask;
8698
8699	for (i = 0; i < TPLA_SIZE; i++) {
8700		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
8701		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
8702		idx = (idx + 1) & M_DBGLARPTR;
8703	}
8704
8705	/* Wipe out last entry if it isn't valid */
8706	if (last_incomplete)
8707		la_buf[TPLA_SIZE - 1] = ~0ULL;
8708
8709	if (cfg & F_DBGLAENABLE)		/* restore running state */
8710		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
8711			     cfg | adap->params.tp.la_mask);
8712}
8713
8714/*
8715 * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8716 * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
8717 * state for more than the Warning Threshold then we'll issue a warning about
8718 * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
8719 * appears to be hung every Warning Repeat second till the situation clears.
8720 * If the situation clears, we'll note that as well.
8721 */
8722#define SGE_IDMA_WARN_THRESH 1
8723#define SGE_IDMA_WARN_REPEAT 300
8724
8725/**
8726 *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8727 *	@adapter: the adapter
8728 *	@idma: the adapter IDMA Monitor state
8729 *
8730 *	Initialize the state of an SGE Ingress DMA Monitor.
8731 */
8732void t4_idma_monitor_init(struct adapter *adapter,
8733			  struct sge_idma_monitor_state *idma)
8734{
8735	/* Initialize the state variables for detecting an SGE Ingress DMA
8736	 * hang.  The SGE has internal counters which count up on each clock
8737	 * tick whenever the SGE finds its Ingress DMA State Engines in the
8738	 * same state they were on the previous clock tick.  The clock used is
8739	 * the Core Clock so we have a limit on the maximum "time" they can
8740	 * record; typically a very small number of seconds.  For instance,
8741	 * with a 600MHz Core Clock, we can only count up to a bit more than
8742	 * 7s.  So we'll synthesize a larger counter in order to not run the
8743	 * risk of having the "timers" overflow and give us the flexibility to
8744	 * maintain a Hung SGE State Machine of our own which operates across
8745	 * a longer time frame.
8746	 */
8747	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8748	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
8749}
8750
8751/**
8752 *	t4_idma_monitor - monitor SGE Ingress DMA state
8753 *	@adapter: the adapter
8754 *	@idma: the adapter IDMA Monitor state
8755 *	@hz: number of ticks/second
8756 *	@ticks: number of ticks since the last IDMA Monitor call
8757 */
8758void t4_idma_monitor(struct adapter *adapter,
8759		     struct sge_idma_monitor_state *idma,
8760		     int hz, int ticks)
8761{
8762	int i, idma_same_state_cnt[2];
8763
8764	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
8765	  * are counters inside the SGE which count up on each clock when the
8766	  * SGE finds its Ingress DMA State Engines in the same states they
8767	  * were in the previous clock.  The counters will peg out at
8768	  * 0xffffffff without wrapping around so once they pass the 1s
8769	  * threshold they'll stay above that till the IDMA state changes.
8770	  */
8771	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
8772	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
8773	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8774
8775	for (i = 0; i < 2; i++) {
8776		u32 debug0, debug11;
8777
8778		/* If the Ingress DMA Same State Counter ("timer") is less
8779		 * than 1s, then we can reset our synthesized Stall Timer and
8780		 * continue.  If we have previously emitted warnings about a
8781		 * potential stalled Ingress Queue, issue a note indicating
8782		 * that the Ingress Queue has resumed forward progress.
8783		 */
8784		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8785			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
8786				CH_WARN(adapter, "SGE idma%d, queue %u, "
8787					"resumed after %d seconds\n",
8788					i, idma->idma_qid[i],
8789					idma->idma_stalled[i]/hz);
8790			idma->idma_stalled[i] = 0;
8791			continue;
8792		}
8793
8794		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8795		 * domain.  The first time we get here it'll be because we
8796		 * passed the 1s Threshold; each additional time it'll be
8797		 * because the RX Timer Callback is being fired on its regular
8798		 * schedule.
8799		 *
8800		 * If the stall is below our Potential Hung Ingress Queue
8801		 * Warning Threshold, continue.
8802		 */
8803		if (idma->idma_stalled[i] == 0) {
8804			idma->idma_stalled[i] = hz;
8805			idma->idma_warn[i] = 0;
8806		} else {
8807			idma->idma_stalled[i] += ticks;
8808			idma->idma_warn[i] -= ticks;
8809		}
8810
8811		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
8812			continue;
8813
8814		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8815		 */
8816		if (idma->idma_warn[i] > 0)
8817			continue;
8818		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
8819
8820		/* Read and save the SGE IDMA State and Queue ID information.
8821		 * We do this every time in case it changes across time ...
8822		 * can't be too careful ...
8823		 */
8824		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
8825		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8826		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8827
8828		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
8829		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
8830		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8831
8832		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
8833			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8834			i, idma->idma_qid[i], idma->idma_state[i],
8835			idma->idma_stalled[i]/hz,
8836			debug0, debug11);
8837		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8838	}
8839}
8840
8841/**
8842 *	t4_read_pace_tbl - read the pace table
8843 *	@adap: the adapter
8844 *	@pace_vals: holds the returned values
8845 *
8846 *	Returns the values of TP's pace table in microseconds.
8847 */
8848void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
8849{
8850	unsigned int i, v;
8851
8852	for (i = 0; i < NTX_SCHED; i++) {
8853		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
8854		v = t4_read_reg(adap, A_TP_PACE_TABLE);
8855		pace_vals[i] = dack_ticks_to_usec(adap, v);
8856	}
8857}
8858
8859/**
8860 *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
8861 *	@adap: the adapter
8862 *	@sched: the scheduler index
8863 *	@kbps: the byte rate in Kbps
8864 *	@ipg: the interpacket delay in tenths of nanoseconds
8865 *
8866 *	Return the current configuration of a HW Tx scheduler.
8867 */
8868void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
8869		     unsigned int *ipg, bool sleep_ok)
8870{
8871	unsigned int v, addr, bpt, cpt;
8872
8873	if (kbps) {
8874		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
8875		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
8876		if (sched & 1)
8877			v >>= 16;
8878		bpt = (v >> 8) & 0xff;
8879		cpt = v & 0xff;
8880		if (!cpt)
8881			*kbps = 0;	/* scheduler disabled */
8882		else {
8883			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
8884			*kbps = (v * bpt) / 125;
8885		}
8886	}
8887	if (ipg) {
8888		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
8889		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
8890		if (sched & 1)
8891			v >>= 16;
8892		v &= 0xffff;
8893		*ipg = (10000 * v) / core_ticks_per_usec(adap);
8894	}
8895}
8896
8897/**
8898 *	t4_load_cfg - download config file
8899 *	@adap: the adapter
8900 *	@cfg_data: the cfg text file to write
8901 *	@size: text file size
8902 *
8903 *	Write the supplied config text file to the card's serial flash.
8904 */
8905int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
8906{
8907	int ret, i, n, cfg_addr;
8908	unsigned int addr;
8909	unsigned int flash_cfg_start_sec;
8910	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
8911
8912	cfg_addr = t4_flash_cfg_addr(adap);
8913	if (cfg_addr < 0)
8914		return cfg_addr;
8915
8916	addr = cfg_addr;
8917	flash_cfg_start_sec = addr / SF_SEC_SIZE;
8918
8919	if (size > FLASH_CFG_MAX_SIZE) {
8920		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
8921		       FLASH_CFG_MAX_SIZE);
8922		return -EFBIG;
8923	}
8924
8925	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
8926			 sf_sec_size);
8927	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
8928				     flash_cfg_start_sec + i - 1);
8929	/*
8930	 * If size == 0 then we're simply erasing the FLASH sectors associated
8931	 * with the on-adapter Firmware Configuration File.
8932	 */
8933	if (ret || size == 0)
8934		goto out;
8935
8936	/* this will write to the flash up to SF_PAGE_SIZE at a time */
8937	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
8938		if ( (size - i) <  SF_PAGE_SIZE)
8939			n = size - i;
8940		else
8941			n = SF_PAGE_SIZE;
8942		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
8943		if (ret)
8944			goto out;
8945
8946		addr += SF_PAGE_SIZE;
8947		cfg_data += SF_PAGE_SIZE;
8948	}
8949
8950out:
8951	if (ret)
8952		CH_ERR(adap, "config file %s failed %d\n",
8953		       (size == 0 ? "clear" : "download"), ret);
8954	return ret;
8955}
8956
8957/**
8958 *	t5_fw_init_extern_mem - initialize the external memory
8959 *	@adap: the adapter
8960 *
8961 *	Initializes the external memory on T5.
8962 */
8963int t5_fw_init_extern_mem(struct adapter *adap)
8964{
8965	u32 params[1], val[1];
8966	int ret;
8967
8968	if (!is_t5(adap))
8969		return 0;
8970
8971	val[0] = 0xff; /* Initialize all MCs */
8972	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
8973			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
8974	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
8975			FW_CMD_MAX_TIMEOUT);
8976
8977	return ret;
8978}
8979
8980/* BIOS boot headers */
8981typedef struct pci_expansion_rom_header {
8982	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
8983	u8	reserved[22]; /* Reserved per processor Architecture data */
8984	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
8985} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
8986
8987/* Legacy PCI Expansion ROM Header */
8988typedef struct legacy_pci_expansion_rom_header {
8989	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
8990	u8	size512; /* Current Image Size in units of 512 bytes */
8991	u8	initentry_point[4];
8992	u8	cksum; /* Checksum computed on the entire Image */
8993	u8	reserved[16]; /* Reserved */
8994	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
8995} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
8996
8997/* EFI PCI Expansion ROM Header */
8998typedef struct efi_pci_expansion_rom_header {
8999	u8	signature[2]; // ROM signature. The value 0xaa55
9000	u8	initialization_size[2]; /* Units 512. Includes this header */
9001	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
9002	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
9003	u8	efi_machine_type[2]; /* Machine type from EFI image header */
9004	u8	compression_type[2]; /* Compression type. */
9005		/*
9006		 * Compression type definition
9007		 * 0x0: uncompressed
9008		 * 0x1: Compressed
9009		 * 0x2-0xFFFF: Reserved
9010		 */
9011	u8	reserved[8]; /* Reserved */
9012	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
9013	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
9014} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
9015
9016/* PCI Data Structure Format */
9017typedef struct pcir_data_structure { /* PCI Data Structure */
9018	u8	signature[4]; /* Signature. The string "PCIR" */
9019	u8	vendor_id[2]; /* Vendor Identification */
9020	u8	device_id[2]; /* Device Identification */
9021	u8	vital_product[2]; /* Pointer to Vital Product Data */
9022	u8	length[2]; /* PCIR Data Structure Length */
9023	u8	revision; /* PCIR Data Structure Revision */
9024	u8	class_code[3]; /* Class Code */
9025	u8	image_length[2]; /* Image Length. Multiple of 512B */
9026	u8	code_revision[2]; /* Revision Level of Code/Data */
9027	u8	code_type; /* Code Type. */
9028		/*
9029		 * PCI Expansion ROM Code Types
9030		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
9031		 * 0x01: Open Firmware standard for PCI. FCODE
9032		 * 0x02: Hewlett-Packard PA RISC. HP reserved
9033		 * 0x03: EFI Image. EFI
9034		 * 0x04-0xFF: Reserved.
9035		 */
9036	u8	indicator; /* Indicator. Identifies the last image in the ROM */
9037	u8	reserved[2]; /* Reserved */
9038} pcir_data_t; /* PCI__DATA_STRUCTURE */
9039
9040/* BOOT constants */
9041enum {
9042	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
9043	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
9044	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
9045	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
9046	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
9047	VENDOR_ID = 0x1425, /* Vendor ID */
9048	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
9049};
9050
9051/*
9052 *	modify_device_id - Modifies the device ID of the Boot BIOS image
9053 *	@adatper: the device ID to write.
9054 *	@boot_data: the boot image to modify.
9055 *
9056 *	Write the supplied device ID to the boot BIOS image.
9057 */
9058static void modify_device_id(int device_id, u8 *boot_data)
9059{
9060	legacy_pci_exp_rom_header_t *header;
9061	pcir_data_t *pcir_header;
9062	u32 cur_header = 0;
9063
9064	/*
9065	 * Loop through all chained images and change the device ID's
9066	 */
9067	while (1) {
9068		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
9069		pcir_header = (pcir_data_t *) &boot_data[cur_header +
9070			      le16_to_cpu(*(u16*)header->pcir_offset)];
9071
9072		/*
9073		 * Only modify the Device ID if code type is Legacy or HP.
9074		 * 0x00: Okay to modify
9075		 * 0x01: FCODE. Do not be modify
9076		 * 0x03: Okay to modify
9077		 * 0x04-0xFF: Do not modify
9078		 */
9079		if (pcir_header->code_type == 0x00) {
9080			u8 csum = 0;
9081			int i;
9082
9083			/*
9084			 * Modify Device ID to match current adatper
9085			 */
9086			*(u16*) pcir_header->device_id = device_id;
9087
9088			/*
9089			 * Set checksum temporarily to 0.
9090			 * We will recalculate it later.
9091			 */
9092			header->cksum = 0x0;
9093
9094			/*
9095			 * Calculate and update checksum
9096			 */
9097			for (i = 0; i < (header->size512 * 512); i++)
9098				csum += (u8)boot_data[cur_header + i];
9099
9100			/*
9101			 * Invert summed value to create the checksum
9102			 * Writing new checksum value directly to the boot data
9103			 */
9104			boot_data[cur_header + 7] = -csum;
9105
9106		} else if (pcir_header->code_type == 0x03) {
9107
9108			/*
9109			 * Modify Device ID to match current adatper
9110			 */
9111			*(u16*) pcir_header->device_id = device_id;
9112
9113		}
9114
9115
9116		/*
9117		 * Check indicator element to identify if this is the last
9118		 * image in the ROM.
9119		 */
9120		if (pcir_header->indicator & 0x80)
9121			break;
9122
9123		/*
9124		 * Move header pointer up to the next image in the ROM.
9125		 */
9126		cur_header += header->size512 * 512;
9127	}
9128}
9129
9130/*
9131 *	t4_load_boot - download boot flash
9132 *	@adapter: the adapter
9133 *	@boot_data: the boot image to write
9134 *	@boot_addr: offset in flash to write boot_data
9135 *	@size: image size
9136 *
9137 *	Write the supplied boot image to the card's serial flash.
9138 *	The boot image has the following sections: a 28-byte header and the
9139 *	boot image.
9140 */
9141int t4_load_boot(struct adapter *adap, u8 *boot_data,
9142		 unsigned int boot_addr, unsigned int size)
9143{
9144	pci_exp_rom_header_t *header;
9145	int pcir_offset ;
9146	pcir_data_t *pcir_header;
9147	int ret, addr;
9148	uint16_t device_id;
9149	unsigned int i;
9150	unsigned int boot_sector = (boot_addr * 1024 );
9151	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9152
9153	/*
9154	 * Make sure the boot image does not encroach on the firmware region
9155	 */
9156	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
9157		CH_ERR(adap, "boot image encroaching on firmware region\n");
9158		return -EFBIG;
9159	}
9160
9161	/*
9162	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
9163	 * and Boot configuration data sections. These 3 boot sections span
9164	 * sectors 0 to 7 in flash and live right before the FW image location.
9165	 */
9166	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
9167			sf_sec_size);
9168	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
9169				     (boot_sector >> 16) + i - 1);
9170
9171	/*
9172	 * If size == 0 then we're simply erasing the FLASH sectors associated
9173	 * with the on-adapter option ROM file
9174	 */
9175	if (ret || (size == 0))
9176		goto out;
9177
9178	/* Get boot header */
9179	header = (pci_exp_rom_header_t *)boot_data;
9180	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
9181	/* PCIR Data Structure */
9182	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
9183
9184	/*
9185	 * Perform some primitive sanity testing to avoid accidentally
9186	 * writing garbage over the boot sectors.  We ought to check for
9187	 * more but it's not worth it for now ...
9188	 */
9189	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
9190		CH_ERR(adap, "boot image too small/large\n");
9191		return -EFBIG;
9192	}
9193
9194#ifndef CHELSIO_T4_DIAGS
9195	/*
9196	 * Check BOOT ROM header signature
9197	 */
9198	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
9199		CH_ERR(adap, "Boot image missing signature\n");
9200		return -EINVAL;
9201	}
9202
9203	/*
9204	 * Check PCI header signature
9205	 */
9206	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
9207		CH_ERR(adap, "PCI header missing signature\n");
9208		return -EINVAL;
9209	}
9210
9211	/*
9212	 * Check Vendor ID matches Chelsio ID
9213	 */
9214	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
9215		CH_ERR(adap, "Vendor ID missing signature\n");
9216		return -EINVAL;
9217	}
9218#endif
9219
9220	/*
9221	 * Retrieve adapter's device ID
9222	 */
9223	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
9224	/* Want to deal with PF 0 so I strip off PF 4 indicator */
9225	device_id = device_id & 0xf0ff;
9226
9227	/*
9228	 * Check PCIE Device ID
9229	 */
9230	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
9231		/*
9232		 * Change the device ID in the Boot BIOS image to match
9233		 * the Device ID of the current adapter.
9234		 */
9235		modify_device_id(device_id, boot_data);
9236	}
9237
9238	/*
9239	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
9240	 * we finish copying the rest of the boot image. This will ensure
9241	 * that the BIOS boot header will only be written if the boot image
9242	 * was written in full.
9243	 */
9244	addr = boot_sector;
9245	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
9246		addr += SF_PAGE_SIZE;
9247		boot_data += SF_PAGE_SIZE;
9248		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
9249		if (ret)
9250			goto out;
9251	}
9252
9253	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
9254			     (const u8 *)header, 0);
9255
9256out:
9257	if (ret)
9258		CH_ERR(adap, "boot image download failed, error %d\n", ret);
9259	return ret;
9260}
9261
9262/*
9263 *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
9264 *	@adapter: the adapter
9265 *
9266 *	Return the address within the flash where the OptionROM Configuration
9267 *	is stored, or an error if the device FLASH is too small to contain
9268 *	a OptionROM Configuration.
9269 */
9270static int t4_flash_bootcfg_addr(struct adapter *adapter)
9271{
9272	/*
9273	 * If the device FLASH isn't large enough to hold a Firmware
9274	 * Configuration File, return an error.
9275	 */
9276	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
9277		return -ENOSPC;
9278
9279	return FLASH_BOOTCFG_START;
9280}
9281
9282int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
9283{
9284	int ret, i, n, cfg_addr;
9285	unsigned int addr;
9286	unsigned int flash_cfg_start_sec;
9287	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9288
9289	cfg_addr = t4_flash_bootcfg_addr(adap);
9290	if (cfg_addr < 0)
9291		return cfg_addr;
9292
9293	addr = cfg_addr;
9294	flash_cfg_start_sec = addr / SF_SEC_SIZE;
9295
9296	if (size > FLASH_BOOTCFG_MAX_SIZE) {
9297		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
9298			FLASH_BOOTCFG_MAX_SIZE);
9299		return -EFBIG;
9300	}
9301
9302	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
9303			 sf_sec_size);
9304	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9305					flash_cfg_start_sec + i - 1);
9306
9307	/*
9308	 * If size == 0 then we're simply erasing the FLASH sectors associated
9309	 * with the on-adapter OptionROM Configuration File.
9310	 */
9311	if (ret || size == 0)
9312		goto out;
9313
9314	/* this will write to the flash up to SF_PAGE_SIZE at a time */
9315	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
9316		if ( (size - i) <  SF_PAGE_SIZE)
9317			n = size - i;
9318		else
9319			n = SF_PAGE_SIZE;
9320		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
9321		if (ret)
9322			goto out;
9323
9324		addr += SF_PAGE_SIZE;
9325		cfg_data += SF_PAGE_SIZE;
9326	}
9327
9328out:
9329	if (ret)
9330		CH_ERR(adap, "boot config data %s failed %d\n",
9331				(size == 0 ? "clear" : "download"), ret);
9332	return ret;
9333}
9334
9335/**
9336 *	t4_set_filter_mode - configure the optional components of filter tuples
9337 *	@adap: the adapter
9338 *	@mode_map: a bitmap selcting which optional filter components to enable
9339 * 	@sleep_ok: if true we may sleep while awaiting command completion
9340 *
9341 *	Sets the filter mode by selecting the optional components to enable
9342 *	in filter tuples.  Returns 0 on success and a negative error if the
9343 *	requested mode needs more bits than are available for optional
9344 *	components.
9345 */
9346int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map,
9347		       bool sleep_ok)
9348{
9349	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
9350
9351	int i, nbits = 0;
9352
9353	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
9354		if (mode_map & (1 << i))
9355			nbits += width[i];
9356	if (nbits > FILTER_OPT_LEN)
9357		return -EINVAL;
9358	t4_tp_pio_write(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, sleep_ok);
9359	read_filter_mode_and_ingress_config(adap, sleep_ok);
9360
9361	return 0;
9362}
9363
9364/**
9365 *	t4_clr_port_stats - clear port statistics
9366 *	@adap: the adapter
9367 *	@idx: the port index
9368 *
9369 *	Clear HW statistics for the given port.
9370 */
9371void t4_clr_port_stats(struct adapter *adap, int idx)
9372{
9373	unsigned int i;
9374	u32 bgmap = t4_get_mps_bg_map(adap, idx);
9375	u32 port_base_addr;
9376
9377	if (is_t4(adap))
9378		port_base_addr = PORT_BASE(idx);
9379	else
9380		port_base_addr = T5_PORT_BASE(idx);
9381
9382	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
9383			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
9384		t4_write_reg(adap, port_base_addr + i, 0);
9385	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
9386			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
9387		t4_write_reg(adap, port_base_addr + i, 0);
9388	for (i = 0; i < 4; i++)
9389		if (bgmap & (1 << i)) {
9390			t4_write_reg(adap,
9391			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
9392			t4_write_reg(adap,
9393			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
9394		}
9395}
9396
9397/**
9398 *	t4_i2c_rd - read I2C data from adapter
9399 *	@adap: the adapter
9400 *	@port: Port number if per-port device; <0 if not
9401 *	@devid: per-port device ID or absolute device ID
9402 *	@offset: byte offset into device I2C space
9403 *	@len: byte length of I2C space data
9404 *	@buf: buffer in which to return I2C data
9405 *
9406 *	Reads the I2C data from the indicated device and location.
9407 */
9408int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
9409	      int port, unsigned int devid,
9410	      unsigned int offset, unsigned int len,
9411	      u8 *buf)
9412{
9413	u32 ldst_addrspace;
9414	struct fw_ldst_cmd ldst;
9415	int ret;
9416
9417	if (port >= 4 ||
9418	    devid >= 256 ||
9419	    offset >= 256 ||
9420	    len > sizeof ldst.u.i2c.data)
9421		return -EINVAL;
9422
9423	memset(&ldst, 0, sizeof ldst);
9424	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9425	ldst.op_to_addrspace =
9426		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9427			    F_FW_CMD_REQUEST |
9428			    F_FW_CMD_READ |
9429			    ldst_addrspace);
9430	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9431	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9432	ldst.u.i2c.did = devid;
9433	ldst.u.i2c.boffset = offset;
9434	ldst.u.i2c.blen = len;
9435	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9436	if (!ret)
9437		memcpy(buf, ldst.u.i2c.data, len);
9438	return ret;
9439}
9440
9441/**
9442 *	t4_i2c_wr - write I2C data to adapter
9443 *	@adap: the adapter
9444 *	@port: Port number if per-port device; <0 if not
9445 *	@devid: per-port device ID or absolute device ID
9446 *	@offset: byte offset into device I2C space
9447 *	@len: byte length of I2C space data
9448 *	@buf: buffer containing new I2C data
9449 *
9450 *	Write the I2C data to the indicated device and location.
9451 */
9452int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
9453	      int port, unsigned int devid,
9454	      unsigned int offset, unsigned int len,
9455	      u8 *buf)
9456{
9457	u32 ldst_addrspace;
9458	struct fw_ldst_cmd ldst;
9459
9460	if (port >= 4 ||
9461	    devid >= 256 ||
9462	    offset >= 256 ||
9463	    len > sizeof ldst.u.i2c.data)
9464		return -EINVAL;
9465
9466	memset(&ldst, 0, sizeof ldst);
9467	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
9468	ldst.op_to_addrspace =
9469		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9470			    F_FW_CMD_REQUEST |
9471			    F_FW_CMD_WRITE |
9472			    ldst_addrspace);
9473	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
9474	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
9475	ldst.u.i2c.did = devid;
9476	ldst.u.i2c.boffset = offset;
9477	ldst.u.i2c.blen = len;
9478	memcpy(ldst.u.i2c.data, buf, len);
9479	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
9480}
9481
9482/**
9483 * 	t4_sge_ctxt_rd - read an SGE context through FW
9484 * 	@adap: the adapter
9485 * 	@mbox: mailbox to use for the FW command
9486 * 	@cid: the context id
9487 * 	@ctype: the context type
9488 * 	@data: where to store the context data
9489 *
9490 * 	Issues a FW command through the given mailbox to read an SGE context.
9491 */
9492int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9493		   enum ctxt_type ctype, u32 *data)
9494{
9495	int ret;
9496	struct fw_ldst_cmd c;
9497
9498	if (ctype == CTXT_EGRESS)
9499		ret = FW_LDST_ADDRSPC_SGE_EGRC;
9500	else if (ctype == CTXT_INGRESS)
9501		ret = FW_LDST_ADDRSPC_SGE_INGC;
9502	else if (ctype == CTXT_FLM)
9503		ret = FW_LDST_ADDRSPC_SGE_FLMC;
9504	else
9505		ret = FW_LDST_ADDRSPC_SGE_CONMC;
9506
9507	memset(&c, 0, sizeof(c));
9508	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
9509					F_FW_CMD_REQUEST | F_FW_CMD_READ |
9510					V_FW_LDST_CMD_ADDRSPACE(ret));
9511	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9512	c.u.idctxt.physid = cpu_to_be32(cid);
9513
9514	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9515	if (ret == 0) {
9516		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9517		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9518		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9519		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9520		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9521		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9522	}
9523	return ret;
9524}
9525
9526/**
9527 * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9528 * 	@adap: the adapter
9529 * 	@cid: the context id
9530 * 	@ctype: the context type
9531 * 	@data: where to store the context data
9532 *
9533 * 	Reads an SGE context directly, bypassing FW.  This is only for
9534 * 	debugging when FW is unavailable.
9535 */
9536int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
9537		      u32 *data)
9538{
9539	int i, ret;
9540
9541	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
9542	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
9543	if (!ret)
9544		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
9545			*data++ = t4_read_reg(adap, i);
9546	return ret;
9547}
9548
9549int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
9550    int sleep_ok)
9551{
9552	struct fw_sched_cmd cmd;
9553
9554	memset(&cmd, 0, sizeof(cmd));
9555	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9556				      F_FW_CMD_REQUEST |
9557				      F_FW_CMD_WRITE);
9558	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9559
9560	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
9561	cmd.u.config.type = type;
9562	cmd.u.config.minmaxen = minmaxen;
9563
9564	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9565			       NULL, sleep_ok);
9566}
9567
9568int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9569		    int rateunit, int ratemode, int channel, int cl,
9570		    int minrate, int maxrate, int weight, int pktsize,
9571		    int sleep_ok)
9572{
9573	struct fw_sched_cmd cmd;
9574
9575	memset(&cmd, 0, sizeof(cmd));
9576	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9577				      F_FW_CMD_REQUEST |
9578				      F_FW_CMD_WRITE);
9579	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9580
9581	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9582	cmd.u.params.type = type;
9583	cmd.u.params.level = level;
9584	cmd.u.params.mode = mode;
9585	cmd.u.params.ch = channel;
9586	cmd.u.params.cl = cl;
9587	cmd.u.params.unit = rateunit;
9588	cmd.u.params.rate = ratemode;
9589	cmd.u.params.min = cpu_to_be32(minrate);
9590	cmd.u.params.max = cpu_to_be32(maxrate);
9591	cmd.u.params.weight = cpu_to_be16(weight);
9592	cmd.u.params.pktsize = cpu_to_be16(pktsize);
9593
9594	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9595			       NULL, sleep_ok);
9596}
9597
9598int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
9599    unsigned int maxrate, int sleep_ok)
9600{
9601	struct fw_sched_cmd cmd;
9602
9603	memset(&cmd, 0, sizeof(cmd));
9604	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9605				      F_FW_CMD_REQUEST |
9606				      F_FW_CMD_WRITE);
9607	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9608
9609	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9610	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9611	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
9612	cmd.u.params.ch = channel;
9613	cmd.u.params.rate = ratemode;		/* REL or ABS */
9614	cmd.u.params.max = cpu_to_be32(maxrate);/*  %  or kbps */
9615
9616	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9617			       NULL, sleep_ok);
9618}
9619
9620int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
9621    int weight, int sleep_ok)
9622{
9623	struct fw_sched_cmd cmd;
9624
9625	if (weight < 0 || weight > 100)
9626		return -EINVAL;
9627
9628	memset(&cmd, 0, sizeof(cmd));
9629	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9630				      F_FW_CMD_REQUEST |
9631				      F_FW_CMD_WRITE);
9632	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9633
9634	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9635	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9636	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
9637	cmd.u.params.ch = channel;
9638	cmd.u.params.cl = cl;
9639	cmd.u.params.weight = cpu_to_be16(weight);
9640
9641	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9642			       NULL, sleep_ok);
9643}
9644
9645int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
9646    int mode, unsigned int maxrate, int pktsize, int sleep_ok)
9647{
9648	struct fw_sched_cmd cmd;
9649
9650	memset(&cmd, 0, sizeof(cmd));
9651	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
9652				      F_FW_CMD_REQUEST |
9653				      F_FW_CMD_WRITE);
9654	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9655
9656	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9657	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
9658	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
9659	cmd.u.params.mode = mode;
9660	cmd.u.params.ch = channel;
9661	cmd.u.params.cl = cl;
9662	cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
9663	cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
9664	cmd.u.params.max = cpu_to_be32(maxrate);
9665	cmd.u.params.pktsize = cpu_to_be16(pktsize);
9666
9667	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
9668			       NULL, sleep_ok);
9669}
9670
9671/*
9672 *	t4_config_watchdog - configure (enable/disable) a watchdog timer
9673 *	@adapter: the adapter
9674 * 	@mbox: mailbox to use for the FW command
9675 * 	@pf: the PF owning the queue
9676 * 	@vf: the VF owning the queue
9677 *	@timeout: watchdog timeout in ms
9678 *	@action: watchdog timer / action
9679 *
9680 *	There are separate watchdog timers for each possible watchdog
9681 *	action.  Configure one of the watchdog timers by setting a non-zero
9682 *	timeout.  Disable a watchdog timer by using a timeout of zero.
9683 */
9684int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
9685		       unsigned int pf, unsigned int vf,
9686		       unsigned int timeout, unsigned int action)
9687{
9688	struct fw_watchdog_cmd wdog;
9689	unsigned int ticks;
9690
9691	/*
9692	 * The watchdog command expects a timeout in units of 10ms so we need
9693	 * to convert it here (via rounding) and force a minimum of one 10ms
9694	 * "tick" if the timeout is non-zero but the convertion results in 0
9695	 * ticks.
9696	 */
9697	ticks = (timeout + 5)/10;
9698	if (timeout && !ticks)
9699		ticks = 1;
9700
9701	memset(&wdog, 0, sizeof wdog);
9702	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
9703				     F_FW_CMD_REQUEST |
9704				     F_FW_CMD_WRITE |
9705				     V_FW_PARAMS_CMD_PFN(pf) |
9706				     V_FW_PARAMS_CMD_VFN(vf));
9707	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
9708	wdog.timeout = cpu_to_be32(ticks);
9709	wdog.action = cpu_to_be32(action);
9710
9711	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
9712}
9713
9714int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
9715{
9716	struct fw_devlog_cmd devlog_cmd;
9717	int ret;
9718
9719	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9720	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9721					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
9722	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9723	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9724			 sizeof(devlog_cmd), &devlog_cmd);
9725	if (ret)
9726		return ret;
9727
9728	*level = devlog_cmd.level;
9729	return 0;
9730}
9731
9732int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
9733{
9734	struct fw_devlog_cmd devlog_cmd;
9735
9736	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9737	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
9738					     F_FW_CMD_REQUEST |
9739					     F_FW_CMD_WRITE);
9740	devlog_cmd.level = level;
9741	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9742	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
9743			  sizeof(devlog_cmd), &devlog_cmd);
9744}
9745