bxe.c revision 305615
1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/dev/bxe/bxe.c 305615 2016-09-08 15:06:28Z pfg $");
29
30#define BXE_DRIVER_VERSION "1.78.81"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60/*
61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap
62 * for older kernels that don't include this changeset.
63 */
64#if __FreeBSD_version < 900035
65#define pci_find_cap pci_find_extcap
66#endif
67
68#define BXE_DEF_SB_ATT_IDX 0x0001
69#define BXE_DEF_SB_IDX     0x0002
70
71/*
72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
73 * function HW initialization.
74 */
75#define FLR_WAIT_USEC     10000 /* 10 msecs */
76#define FLR_WAIT_INTERVAL 50    /* usecs */
77#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
78
79struct pbf_pN_buf_regs {
80    int pN;
81    uint32_t init_crd;
82    uint32_t crd;
83    uint32_t crd_freed;
84};
85
86struct pbf_pN_cmd_regs {
87    int pN;
88    uint32_t lines_occup;
89    uint32_t lines_freed;
90};
91
92/*
93 * PCI Device ID Table used by bxe_probe().
94 */
95#define BXE_DEVDESC_MAX 64
96static struct bxe_device_type bxe_devs[] = {
97    {
98        BRCM_VENDORID,
99        CHIP_NUM_57710,
100        PCI_ANY_ID, PCI_ANY_ID,
101        "QLogic NetXtreme II BCM57710 10GbE"
102    },
103    {
104        BRCM_VENDORID,
105        CHIP_NUM_57711,
106        PCI_ANY_ID, PCI_ANY_ID,
107        "QLogic NetXtreme II BCM57711 10GbE"
108    },
109    {
110        BRCM_VENDORID,
111        CHIP_NUM_57711E,
112        PCI_ANY_ID, PCI_ANY_ID,
113        "QLogic NetXtreme II BCM57711E 10GbE"
114    },
115    {
116        BRCM_VENDORID,
117        CHIP_NUM_57712,
118        PCI_ANY_ID, PCI_ANY_ID,
119        "QLogic NetXtreme II BCM57712 10GbE"
120    },
121    {
122        BRCM_VENDORID,
123        CHIP_NUM_57712_MF,
124        PCI_ANY_ID, PCI_ANY_ID,
125        "QLogic NetXtreme II BCM57712 MF 10GbE"
126    },
127    {
128        BRCM_VENDORID,
129        CHIP_NUM_57800,
130        PCI_ANY_ID, PCI_ANY_ID,
131        "QLogic NetXtreme II BCM57800 10GbE"
132    },
133    {
134        BRCM_VENDORID,
135        CHIP_NUM_57800_MF,
136        PCI_ANY_ID, PCI_ANY_ID,
137        "QLogic NetXtreme II BCM57800 MF 10GbE"
138    },
139    {
140        BRCM_VENDORID,
141        CHIP_NUM_57810,
142        PCI_ANY_ID, PCI_ANY_ID,
143        "QLogic NetXtreme II BCM57810 10GbE"
144    },
145    {
146        BRCM_VENDORID,
147        CHIP_NUM_57810_MF,
148        PCI_ANY_ID, PCI_ANY_ID,
149        "QLogic NetXtreme II BCM57810 MF 10GbE"
150    },
151    {
152        BRCM_VENDORID,
153        CHIP_NUM_57811,
154        PCI_ANY_ID, PCI_ANY_ID,
155        "QLogic NetXtreme II BCM57811 10GbE"
156    },
157    {
158        BRCM_VENDORID,
159        CHIP_NUM_57811_MF,
160        PCI_ANY_ID, PCI_ANY_ID,
161        "QLogic NetXtreme II BCM57811 MF 10GbE"
162    },
163    {
164        BRCM_VENDORID,
165        CHIP_NUM_57840_4_10,
166        PCI_ANY_ID, PCI_ANY_ID,
167        "QLogic NetXtreme II BCM57840 4x10GbE"
168    },
169    {
170        BRCM_VENDORID,
171        CHIP_NUM_57840_MF,
172        PCI_ANY_ID, PCI_ANY_ID,
173        "QLogic NetXtreme II BCM57840 MF 10GbE"
174    },
175    {
176        0, 0, 0, 0, NULL
177    }
178};
179
180MALLOC_DECLARE(M_BXE_ILT);
181MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
182
183/*
184 * FreeBSD device entry points.
185 */
186static int bxe_probe(device_t);
187static int bxe_attach(device_t);
188static int bxe_detach(device_t);
189static int bxe_shutdown(device_t);
190
191/*
192 * FreeBSD KLD module/device interface event handler method.
193 */
194static device_method_t bxe_methods[] = {
195    /* Device interface (device_if.h) */
196    DEVMETHOD(device_probe,     bxe_probe),
197    DEVMETHOD(device_attach,    bxe_attach),
198    DEVMETHOD(device_detach,    bxe_detach),
199    DEVMETHOD(device_shutdown,  bxe_shutdown),
200    /* Bus interface (bus_if.h) */
201    DEVMETHOD(bus_print_child,  bus_generic_print_child),
202    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
203    KOBJMETHOD_END
204};
205
206/*
207 * FreeBSD KLD Module data declaration
208 */
209static driver_t bxe_driver = {
210    "bxe",                   /* module name */
211    bxe_methods,             /* event handler */
212    sizeof(struct bxe_softc) /* extra data */
213};
214
215/*
216 * FreeBSD dev class is needed to manage dev instances and
217 * to associate with a bus type
218 */
219static devclass_t bxe_devclass;
220
221MODULE_DEPEND(bxe, pci, 1, 1, 1);
222MODULE_DEPEND(bxe, ether, 1, 1, 1);
223DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0);
224
225/* resources needed for unloading a previously loaded device */
226
227#define BXE_PREV_WAIT_NEEDED 1
228struct mtx bxe_prev_mtx;
229MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
230struct bxe_prev_list_node {
231    LIST_ENTRY(bxe_prev_list_node) node;
232    uint8_t bus;
233    uint8_t slot;
234    uint8_t path;
235    uint8_t aer; /* XXX automatic error recovery */
236    uint8_t undi;
237};
238static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
239
240static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
241
242/* Tunable device values... */
243
244SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters");
245
246/* Debug */
247unsigned long bxe_debug = 0;
248TUNABLE_ULONG("hw.bxe.debug", &bxe_debug);
249SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN),
250             &bxe_debug, 0, "Debug logging mode");
251
252/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
253static int bxe_interrupt_mode = INTR_MODE_MSIX;
254TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode);
255SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
256           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
257
258/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
259static int bxe_queue_count = 4;
260TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count);
261SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
262           &bxe_queue_count, 0, "Multi-Queue queue count");
263
264/* max number of buffers per queue (default RX_BD_USABLE) */
265static int bxe_max_rx_bufs = 0;
266TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs);
267SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
268           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
269
270/* Host interrupt coalescing RX tick timer (usecs) */
271static int bxe_hc_rx_ticks = 25;
272TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks);
273SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
274           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
275
276/* Host interrupt coalescing TX tick timer (usecs) */
277static int bxe_hc_tx_ticks = 50;
278TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks);
279SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
280           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
281
282/* Maximum number of Rx packets to process at a time */
283static int bxe_rx_budget = 0xffffffff;
284TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget);
285SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN,
286           &bxe_rx_budget, 0, "Rx processing budget");
287
288/* Maximum LRO aggregation size */
289static int bxe_max_aggregation_size = 0;
290TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size);
291SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN,
292           &bxe_max_aggregation_size, 0, "max aggregation size");
293
294/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
295static int bxe_mrrs = -1;
296TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs);
297SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
298           &bxe_mrrs, 0, "PCIe maximum read request size");
299
300/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
301static int bxe_autogreeen = 0;
302TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen);
303SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
304           &bxe_autogreeen, 0, "AutoGrEEEn support");
305
306/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
307static int bxe_udp_rss = 0;
308TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss);
309SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
310           &bxe_udp_rss, 0, "UDP RSS support");
311
312
313#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
314
315#define STATS_OFFSET32(stat_name)                   \
316    (offsetof(struct bxe_eth_stats, stat_name) / 4)
317
318#define Q_STATS_OFFSET32(stat_name)                   \
319    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
320
321static const struct {
322    uint32_t offset;
323    uint32_t size;
324    uint32_t flags;
325#define STATS_FLAGS_PORT  1
326#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
327#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
328    char string[STAT_NAME_LEN];
329} bxe_eth_stats_arr[] = {
330    { STATS_OFFSET32(total_bytes_received_hi),
331                8, STATS_FLAGS_BOTH, "rx_bytes" },
332    { STATS_OFFSET32(error_bytes_received_hi),
333                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
334    { STATS_OFFSET32(total_unicast_packets_received_hi),
335                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
336    { STATS_OFFSET32(total_multicast_packets_received_hi),
337                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
338    { STATS_OFFSET32(total_broadcast_packets_received_hi),
339                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
340    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
341                8, STATS_FLAGS_PORT, "rx_crc_errors" },
342    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
343                8, STATS_FLAGS_PORT, "rx_align_errors" },
344    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
345                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
346    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
347                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
348    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
349                8, STATS_FLAGS_PORT, "rx_fragments" },
350    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
351                8, STATS_FLAGS_PORT, "rx_jabbers" },
352    { STATS_OFFSET32(no_buff_discard_hi),
353                8, STATS_FLAGS_BOTH, "rx_discards" },
354    { STATS_OFFSET32(mac_filter_discard),
355                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
356    { STATS_OFFSET32(mf_tag_discard),
357                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
358    { STATS_OFFSET32(pfc_frames_received_hi),
359                8, STATS_FLAGS_PORT, "pfc_frames_received" },
360    { STATS_OFFSET32(pfc_frames_sent_hi),
361                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
362    { STATS_OFFSET32(brb_drop_hi),
363                8, STATS_FLAGS_PORT, "rx_brb_discard" },
364    { STATS_OFFSET32(brb_truncate_hi),
365                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
366    { STATS_OFFSET32(pause_frames_received_hi),
367                8, STATS_FLAGS_PORT, "rx_pause_frames" },
368    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
369                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
370    { STATS_OFFSET32(nig_timer_max),
371                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
372    { STATS_OFFSET32(total_bytes_transmitted_hi),
373                8, STATS_FLAGS_BOTH, "tx_bytes" },
374    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
375                8, STATS_FLAGS_PORT, "tx_error_bytes" },
376    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
377                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
378    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
379                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
380    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
381                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
382    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
383                8, STATS_FLAGS_PORT, "tx_mac_errors" },
384    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
385                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
386    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
387                8, STATS_FLAGS_PORT, "tx_single_collisions" },
388    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
389                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
390    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
391                8, STATS_FLAGS_PORT, "tx_deferred" },
392    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
393                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
394    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
395                8, STATS_FLAGS_PORT, "tx_late_collisions" },
396    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
397                8, STATS_FLAGS_PORT, "tx_total_collisions" },
398    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
399                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
400    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
401                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
402    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
403                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
404    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
405                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
406    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
407                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
408    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
409                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
410    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
411                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
412    { STATS_OFFSET32(pause_frames_sent_hi),
413                8, STATS_FLAGS_PORT, "tx_pause_frames" },
414    { STATS_OFFSET32(total_tpa_aggregations_hi),
415                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
416    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
417                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
418    { STATS_OFFSET32(total_tpa_bytes_hi),
419                8, STATS_FLAGS_FUNC, "tpa_bytes"},
420    { STATS_OFFSET32(eee_tx_lpi),
421                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
422    { STATS_OFFSET32(rx_calls),
423                4, STATS_FLAGS_FUNC, "rx_calls"},
424    { STATS_OFFSET32(rx_pkts),
425                4, STATS_FLAGS_FUNC, "rx_pkts"},
426    { STATS_OFFSET32(rx_tpa_pkts),
427                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
428    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
429                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
430    { STATS_OFFSET32(rx_bxe_service_rxsgl),
431                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
432    { STATS_OFFSET32(rx_jumbo_sge_pkts),
433                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
434    { STATS_OFFSET32(rx_soft_errors),
435                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
436    { STATS_OFFSET32(rx_hw_csum_errors),
437                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
438    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
439                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
440    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
441                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
442    { STATS_OFFSET32(rx_budget_reached),
443                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
444    { STATS_OFFSET32(tx_pkts),
445                4, STATS_FLAGS_FUNC, "tx_pkts"},
446    { STATS_OFFSET32(tx_soft_errors),
447                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
448    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
449                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
450    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
451                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
452    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
453                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
454    { STATS_OFFSET32(tx_ofld_frames_lso),
455                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
456    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
457                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
458    { STATS_OFFSET32(tx_encap_failures),
459                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
460    { STATS_OFFSET32(tx_hw_queue_full),
461                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
462    { STATS_OFFSET32(tx_hw_max_queue_depth),
463                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
464    { STATS_OFFSET32(tx_dma_mapping_failure),
465                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
466    { STATS_OFFSET32(tx_max_drbr_queue_depth),
467                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
468    { STATS_OFFSET32(tx_window_violation_std),
469                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
470    { STATS_OFFSET32(tx_window_violation_tso),
471                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
472    { STATS_OFFSET32(tx_chain_lost_mbuf),
473                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
474    { STATS_OFFSET32(tx_frames_deferred),
475                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
476    { STATS_OFFSET32(tx_queue_xoff),
477                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
478    { STATS_OFFSET32(mbuf_defrag_attempts),
479                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
480    { STATS_OFFSET32(mbuf_defrag_failures),
481                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
482    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
483                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
484    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
485                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
486    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
487                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
488    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
489                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
490    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
491                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
492    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
493                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
494    { STATS_OFFSET32(mbuf_alloc_tx),
495                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
496    { STATS_OFFSET32(mbuf_alloc_rx),
497                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
498    { STATS_OFFSET32(mbuf_alloc_sge),
499                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
500    { STATS_OFFSET32(mbuf_alloc_tpa),
501                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
502    { STATS_OFFSET32(tx_queue_full_return),
503                4, STATS_FLAGS_FUNC, "tx_queue_full_return"}
504};
505
506static const struct {
507    uint32_t offset;
508    uint32_t size;
509    char string[STAT_NAME_LEN];
510} bxe_eth_q_stats_arr[] = {
511    { Q_STATS_OFFSET32(total_bytes_received_hi),
512                8, "rx_bytes" },
513    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
514                8, "rx_ucast_packets" },
515    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
516                8, "rx_mcast_packets" },
517    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
518                8, "rx_bcast_packets" },
519    { Q_STATS_OFFSET32(no_buff_discard_hi),
520                8, "rx_discards" },
521    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
522                8, "tx_bytes" },
523    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
524                8, "tx_ucast_packets" },
525    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
526                8, "tx_mcast_packets" },
527    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
528                8, "tx_bcast_packets" },
529    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
530                8, "tpa_aggregations" },
531    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
532                8, "tpa_aggregated_frames"},
533    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
534                8, "tpa_bytes"},
535    { Q_STATS_OFFSET32(rx_calls),
536                4, "rx_calls"},
537    { Q_STATS_OFFSET32(rx_pkts),
538                4, "rx_pkts"},
539    { Q_STATS_OFFSET32(rx_tpa_pkts),
540                4, "rx_tpa_pkts"},
541    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
542                4, "rx_erroneous_jumbo_sge_pkts"},
543    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
544                4, "rx_bxe_service_rxsgl"},
545    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
546                4, "rx_jumbo_sge_pkts"},
547    { Q_STATS_OFFSET32(rx_soft_errors),
548                4, "rx_soft_errors"},
549    { Q_STATS_OFFSET32(rx_hw_csum_errors),
550                4, "rx_hw_csum_errors"},
551    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
552                4, "rx_ofld_frames_csum_ip"},
553    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
554                4, "rx_ofld_frames_csum_tcp_udp"},
555    { Q_STATS_OFFSET32(rx_budget_reached),
556                4, "rx_budget_reached"},
557    { Q_STATS_OFFSET32(tx_pkts),
558                4, "tx_pkts"},
559    { Q_STATS_OFFSET32(tx_soft_errors),
560                4, "tx_soft_errors"},
561    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
562                4, "tx_ofld_frames_csum_ip"},
563    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
564                4, "tx_ofld_frames_csum_tcp"},
565    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
566                4, "tx_ofld_frames_csum_udp"},
567    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
568                4, "tx_ofld_frames_lso"},
569    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
570                4, "tx_ofld_frames_lso_hdr_splits"},
571    { Q_STATS_OFFSET32(tx_encap_failures),
572                4, "tx_encap_failures"},
573    { Q_STATS_OFFSET32(tx_hw_queue_full),
574                4, "tx_hw_queue_full"},
575    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
576                4, "tx_hw_max_queue_depth"},
577    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
578                4, "tx_dma_mapping_failure"},
579    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
580                4, "tx_max_drbr_queue_depth"},
581    { Q_STATS_OFFSET32(tx_window_violation_std),
582                4, "tx_window_violation_std"},
583    { Q_STATS_OFFSET32(tx_window_violation_tso),
584                4, "tx_window_violation_tso"},
585    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
586                4, "tx_chain_lost_mbuf"},
587    { Q_STATS_OFFSET32(tx_frames_deferred),
588                4, "tx_frames_deferred"},
589    { Q_STATS_OFFSET32(tx_queue_xoff),
590                4, "tx_queue_xoff"},
591    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
592                4, "mbuf_defrag_attempts"},
593    { Q_STATS_OFFSET32(mbuf_defrag_failures),
594                4, "mbuf_defrag_failures"},
595    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
596                4, "mbuf_rx_bd_alloc_failed"},
597    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
598                4, "mbuf_rx_bd_mapping_failed"},
599    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
600                4, "mbuf_rx_tpa_alloc_failed"},
601    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
602                4, "mbuf_rx_tpa_mapping_failed"},
603    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
604                4, "mbuf_rx_sge_alloc_failed"},
605    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
606                4, "mbuf_rx_sge_mapping_failed"},
607    { Q_STATS_OFFSET32(mbuf_alloc_tx),
608                4, "mbuf_alloc_tx"},
609    { Q_STATS_OFFSET32(mbuf_alloc_rx),
610                4, "mbuf_alloc_rx"},
611    { Q_STATS_OFFSET32(mbuf_alloc_sge),
612                4, "mbuf_alloc_sge"},
613    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
614                4, "mbuf_alloc_tpa"},
615    { Q_STATS_OFFSET32(tx_queue_full_return),
616                4, "tx_queue_full_return"}
617};
618
619#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
620#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
621
622
623static void    bxe_cmng_fns_init(struct bxe_softc *sc,
624                                 uint8_t          read_cfg,
625                                 uint8_t          cmng_type);
626static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
627static void    storm_memset_cmng(struct bxe_softc *sc,
628                                 struct cmng_init *cmng,
629                                 uint8_t          port);
630static void    bxe_set_reset_global(struct bxe_softc *sc);
631static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
632static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
633                                 int              engine);
634static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
635static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
636                                   uint8_t          *global,
637                                   uint8_t          print);
638static void    bxe_int_disable(struct bxe_softc *sc);
639static int     bxe_release_leader_lock(struct bxe_softc *sc);
640static void    bxe_pf_disable(struct bxe_softc *sc);
641static void    bxe_free_fp_buffers(struct bxe_softc *sc);
642static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
643                                      struct bxe_fastpath *fp,
644                                      uint16_t            rx_bd_prod,
645                                      uint16_t            rx_cq_prod,
646                                      uint16_t            rx_sge_prod);
647static void    bxe_link_report_locked(struct bxe_softc *sc);
648static void    bxe_link_report(struct bxe_softc *sc);
649static void    bxe_link_status_update(struct bxe_softc *sc);
650static void    bxe_periodic_callout_func(void *xsc);
651static void    bxe_periodic_start(struct bxe_softc *sc);
652static void    bxe_periodic_stop(struct bxe_softc *sc);
653static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
654                                    uint16_t prev_index,
655                                    uint16_t index);
656static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
657                                     int                 queue);
658static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
659                                     uint16_t            index);
660static uint8_t bxe_txeof(struct bxe_softc *sc,
661                         struct bxe_fastpath *fp);
662static void    bxe_task_fp(struct bxe_fastpath *fp);
663static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
664                                     struct mbuf      *m,
665                                     uint8_t          contents);
666static int     bxe_alloc_mem(struct bxe_softc *sc);
667static void    bxe_free_mem(struct bxe_softc *sc);
668static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
669static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
670static int     bxe_interrupt_attach(struct bxe_softc *sc);
671static void    bxe_interrupt_detach(struct bxe_softc *sc);
672static void    bxe_set_rx_mode(struct bxe_softc *sc);
673static int     bxe_init_locked(struct bxe_softc *sc);
674static int     bxe_stop_locked(struct bxe_softc *sc);
675static __noinline int bxe_nic_load(struct bxe_softc *sc,
676                                   int              load_mode);
677static __noinline int bxe_nic_unload(struct bxe_softc *sc,
678                                     uint32_t         unload_mode,
679                                     uint8_t          keep_link);
680
681static void bxe_handle_sp_tq(void *context, int pending);
682static void bxe_handle_fp_tq(void *context, int pending);
683
684static int bxe_add_cdev(struct bxe_softc *sc);
685static void bxe_del_cdev(struct bxe_softc *sc);
686static int bxe_alloc_buf_rings(struct bxe_softc *sc);
687static void bxe_free_buf_rings(struct bxe_softc *sc);
688
689/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
690uint32_t
691calc_crc32(uint8_t  *crc32_packet,
692           uint32_t crc32_length,
693           uint32_t crc32_seed,
694           uint8_t  complement)
695{
696   uint32_t byte         = 0;
697   uint32_t bit          = 0;
698   uint8_t  msb          = 0;
699   uint32_t temp         = 0;
700   uint32_t shft         = 0;
701   uint8_t  current_byte = 0;
702   uint32_t crc32_result = crc32_seed;
703   const uint32_t CRC32_POLY = 0x1edc6f41;
704
705   if ((crc32_packet == NULL) ||
706       (crc32_length == 0) ||
707       ((crc32_length % 8) != 0))
708    {
709        return (crc32_result);
710    }
711
712    for (byte = 0; byte < crc32_length; byte = byte + 1)
713    {
714        current_byte = crc32_packet[byte];
715        for (bit = 0; bit < 8; bit = bit + 1)
716        {
717            /* msb = crc32_result[31]; */
718            msb = (uint8_t)(crc32_result >> 31);
719
720            crc32_result = crc32_result << 1;
721
722            /* it (msb != current_byte[bit]) */
723            if (msb != (0x1 & (current_byte >> bit)))
724            {
725                crc32_result = crc32_result ^ CRC32_POLY;
726                /* crc32_result[0] = 1 */
727                crc32_result |= 1;
728            }
729        }
730    }
731
732    /* Last step is to:
733     * 1. "mirror" every bit
734     * 2. swap the 4 bytes
735     * 3. complement each bit
736     */
737
738    /* Mirror */
739    temp = crc32_result;
740    shft = sizeof(crc32_result) * 8 - 1;
741
742    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
743    {
744        temp <<= 1;
745        temp |= crc32_result & 1;
746        shft-- ;
747    }
748
749    /* temp[31-bit] = crc32_result[bit] */
750    temp <<= shft;
751
752    /* Swap */
753    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
754    {
755        uint32_t t0, t1, t2, t3;
756        t0 = (0x000000ff & (temp >> 24));
757        t1 = (0x0000ff00 & (temp >> 8));
758        t2 = (0x00ff0000 & (temp << 8));
759        t3 = (0xff000000 & (temp << 24));
760        crc32_result = t0 | t1 | t2 | t3;
761    }
762
763    /* Complement */
764    if (complement)
765    {
766        crc32_result = ~crc32_result;
767    }
768
769    return (crc32_result);
770}
771
772int
773bxe_test_bit(int                    nr,
774             volatile unsigned long *addr)
775{
776    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
777}
778
779void
780bxe_set_bit(unsigned int           nr,
781            volatile unsigned long *addr)
782{
783    atomic_set_acq_long(addr, (1 << nr));
784}
785
786void
787bxe_clear_bit(int                    nr,
788              volatile unsigned long *addr)
789{
790    atomic_clear_acq_long(addr, (1 << nr));
791}
792
793int
794bxe_test_and_set_bit(int                    nr,
795                       volatile unsigned long *addr)
796{
797    unsigned long x;
798    nr = (1 << nr);
799    do {
800        x = *addr;
801    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
802    // if (x & nr) bit_was_set; else bit_was_not_set;
803    return (x & nr);
804}
805
806int
807bxe_test_and_clear_bit(int                    nr,
808                       volatile unsigned long *addr)
809{
810    unsigned long x;
811    nr = (1 << nr);
812    do {
813        x = *addr;
814    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
815    // if (x & nr) bit_was_set; else bit_was_not_set;
816    return (x & nr);
817}
818
819int
820bxe_cmpxchg(volatile int *addr,
821            int          old,
822            int          new)
823{
824    int x;
825    do {
826        x = *addr;
827    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
828    return (x);
829}
830
831/*
832 * Get DMA memory from the OS.
833 *
834 * Validates that the OS has provided DMA buffers in response to a
835 * bus_dmamap_load call and saves the physical address of those buffers.
836 * When the callback is used the OS will return 0 for the mapping function
837 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
838 * failures back to the caller.
839 *
840 * Returns:
841 *   Nothing.
842 */
843static void
844bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
845{
846    struct bxe_dma *dma = arg;
847
848    if (error) {
849        dma->paddr = 0;
850        dma->nseg  = 0;
851        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
852    } else {
853        dma->paddr = segs->ds_addr;
854        dma->nseg  = nseg;
855    }
856}
857
858/*
859 * Allocate a block of memory and map it for DMA. No partial completions
860 * allowed and release any resources acquired if we can't acquire all
861 * resources.
862 *
863 * Returns:
864 *   0 = Success, !0 = Failure
865 */
866int
867bxe_dma_alloc(struct bxe_softc *sc,
868              bus_size_t       size,
869              struct bxe_dma   *dma,
870              const char       *msg)
871{
872    int rc;
873
874    if (dma->size > 0) {
875        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
876              (unsigned long)dma->size);
877        return (1);
878    }
879
880    memset(dma, 0, sizeof(*dma)); /* sanity */
881    dma->sc   = sc;
882    dma->size = size;
883    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
884
885    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
886                            BCM_PAGE_SIZE,      /* alignment */
887                            0,                  /* boundary limit */
888                            BUS_SPACE_MAXADDR,  /* restricted low */
889                            BUS_SPACE_MAXADDR,  /* restricted hi */
890                            NULL,               /* addr filter() */
891                            NULL,               /* addr filter() arg */
892                            size,               /* max map size */
893                            1,                  /* num discontinuous */
894                            size,               /* max seg size */
895                            BUS_DMA_ALLOCNOW,   /* flags */
896                            NULL,               /* lock() */
897                            NULL,               /* lock() arg */
898                            &dma->tag);         /* returned dma tag */
899    if (rc != 0) {
900        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
901        memset(dma, 0, sizeof(*dma));
902        return (1);
903    }
904
905    rc = bus_dmamem_alloc(dma->tag,
906                          (void **)&dma->vaddr,
907                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
908                          &dma->map);
909    if (rc != 0) {
910        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
911        bus_dma_tag_destroy(dma->tag);
912        memset(dma, 0, sizeof(*dma));
913        return (1);
914    }
915
916    rc = bus_dmamap_load(dma->tag,
917                         dma->map,
918                         dma->vaddr,
919                         size,
920                         bxe_dma_map_addr, /* BLOGD in here */
921                         dma,
922                         BUS_DMA_NOWAIT);
923    if (rc != 0) {
924        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
925        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
926        bus_dma_tag_destroy(dma->tag);
927        memset(dma, 0, sizeof(*dma));
928        return (1);
929    }
930
931    return (0);
932}
933
934void
935bxe_dma_free(struct bxe_softc *sc,
936             struct bxe_dma   *dma)
937{
938    if (dma->size > 0) {
939        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
940
941        bus_dmamap_sync(dma->tag, dma->map,
942                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
943        bus_dmamap_unload(dma->tag, dma->map);
944        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
945        bus_dma_tag_destroy(dma->tag);
946    }
947
948    memset(dma, 0, sizeof(*dma));
949}
950
951/*
952 * These indirect read and write routines are only during init.
953 * The locking is handled by the MCP.
954 */
955
956void
957bxe_reg_wr_ind(struct bxe_softc *sc,
958               uint32_t         addr,
959               uint32_t         val)
960{
961    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
962    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
963    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
964}
965
966uint32_t
967bxe_reg_rd_ind(struct bxe_softc *sc,
968               uint32_t         addr)
969{
970    uint32_t val;
971
972    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
973    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
974    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
975
976    return (val);
977}
978
979static int
980bxe_acquire_hw_lock(struct bxe_softc *sc,
981                    uint32_t         resource)
982{
983    uint32_t lock_status;
984    uint32_t resource_bit = (1 << resource);
985    int func = SC_FUNC(sc);
986    uint32_t hw_lock_control_reg;
987    int cnt;
988
989    /* validate the resource is within range */
990    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
991        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
992            " resource_bit 0x%x\n", resource, resource_bit);
993        return (-1);
994    }
995
996    if (func <= 5) {
997        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
998    } else {
999        hw_lock_control_reg =
1000                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1001    }
1002
1003    /* validate the resource is not already taken */
1004    lock_status = REG_RD(sc, hw_lock_control_reg);
1005    if (lock_status & resource_bit) {
1006        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1007              resource, lock_status, resource_bit);
1008        return (-1);
1009    }
1010
1011    /* try every 5ms for 5 seconds */
1012    for (cnt = 0; cnt < 1000; cnt++) {
1013        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1014        lock_status = REG_RD(sc, hw_lock_control_reg);
1015        if (lock_status & resource_bit) {
1016            return (0);
1017        }
1018        DELAY(5000);
1019    }
1020
1021    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1022        resource, resource_bit);
1023    return (-1);
1024}
1025
1026static int
1027bxe_release_hw_lock(struct bxe_softc *sc,
1028                    uint32_t         resource)
1029{
1030    uint32_t lock_status;
1031    uint32_t resource_bit = (1 << resource);
1032    int func = SC_FUNC(sc);
1033    uint32_t hw_lock_control_reg;
1034
1035    /* validate the resource is within range */
1036    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1037        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1038            " resource_bit 0x%x\n", resource, resource_bit);
1039        return (-1);
1040    }
1041
1042    if (func <= 5) {
1043        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1044    } else {
1045        hw_lock_control_reg =
1046                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1047    }
1048
1049    /* validate the resource is currently taken */
1050    lock_status = REG_RD(sc, hw_lock_control_reg);
1051    if (!(lock_status & resource_bit)) {
1052        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1053              resource, lock_status, resource_bit);
1054        return (-1);
1055    }
1056
1057    REG_WR(sc, hw_lock_control_reg, resource_bit);
1058    return (0);
1059}
1060static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1061{
1062	BXE_PHY_LOCK(sc);
1063	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1064}
1065
1066static void bxe_release_phy_lock(struct bxe_softc *sc)
1067{
1068	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1069	BXE_PHY_UNLOCK(sc);
1070}
1071/*
1072 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1073 * had we done things the other way around, if two pfs from the same port
1074 * would attempt to access nvram at the same time, we could run into a
1075 * scenario such as:
1076 * pf A takes the port lock.
1077 * pf B succeeds in taking the same lock since they are from the same port.
1078 * pf A takes the per pf misc lock. Performs eeprom access.
1079 * pf A finishes. Unlocks the per pf misc lock.
1080 * Pf B takes the lock and proceeds to perform it's own access.
1081 * pf A unlocks the per port lock, while pf B is still working (!).
1082 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1083 * access corrupted by pf B).*
1084 */
1085static int
1086bxe_acquire_nvram_lock(struct bxe_softc *sc)
1087{
1088    int port = SC_PORT(sc);
1089    int count, i;
1090    uint32_t val = 0;
1091
1092    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1093    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1094
1095    /* adjust timeout for emulation/FPGA */
1096    count = NVRAM_TIMEOUT_COUNT;
1097    if (CHIP_REV_IS_SLOW(sc)) {
1098        count *= 100;
1099    }
1100
1101    /* request access to nvram interface */
1102    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1103           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1104
1105    for (i = 0; i < count*10; i++) {
1106        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1107        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1108            break;
1109        }
1110
1111        DELAY(5);
1112    }
1113
1114    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1115        BLOGE(sc, "Cannot get access to nvram interface "
1116            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1117            port, val);
1118        return (-1);
1119    }
1120
1121    return (0);
1122}
1123
1124static int
1125bxe_release_nvram_lock(struct bxe_softc *sc)
1126{
1127    int port = SC_PORT(sc);
1128    int count, i;
1129    uint32_t val = 0;
1130
1131    /* adjust timeout for emulation/FPGA */
1132    count = NVRAM_TIMEOUT_COUNT;
1133    if (CHIP_REV_IS_SLOW(sc)) {
1134        count *= 100;
1135    }
1136
1137    /* relinquish nvram interface */
1138    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1139           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1140
1141    for (i = 0; i < count*10; i++) {
1142        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1143        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1144            break;
1145        }
1146
1147        DELAY(5);
1148    }
1149
1150    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1151        BLOGE(sc, "Cannot free access to nvram interface "
1152            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1153            port, val);
1154        return (-1);
1155    }
1156
1157    /* release HW lock: protect against other PFs in PF Direct Assignment */
1158    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1159
1160    return (0);
1161}
1162
1163static void
1164bxe_enable_nvram_access(struct bxe_softc *sc)
1165{
1166    uint32_t val;
1167
1168    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1169
1170    /* enable both bits, even on read */
1171    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1172           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1173}
1174
1175static void
1176bxe_disable_nvram_access(struct bxe_softc *sc)
1177{
1178    uint32_t val;
1179
1180    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1181
1182    /* disable both bits, even after read */
1183    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1184           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1185                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1186}
1187
1188static int
1189bxe_nvram_read_dword(struct bxe_softc *sc,
1190                     uint32_t         offset,
1191                     uint32_t         *ret_val,
1192                     uint32_t         cmd_flags)
1193{
1194    int count, i, rc;
1195    uint32_t val;
1196
1197    /* build the command word */
1198    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1199
1200    /* need to clear DONE bit separately */
1201    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1202
1203    /* address of the NVRAM to read from */
1204    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1205           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1206
1207    /* issue a read command */
1208    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1209
1210    /* adjust timeout for emulation/FPGA */
1211    count = NVRAM_TIMEOUT_COUNT;
1212    if (CHIP_REV_IS_SLOW(sc)) {
1213        count *= 100;
1214    }
1215
1216    /* wait for completion */
1217    *ret_val = 0;
1218    rc = -1;
1219    for (i = 0; i < count; i++) {
1220        DELAY(5);
1221        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1222
1223        if (val & MCPR_NVM_COMMAND_DONE) {
1224            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1225            /* we read nvram data in cpu order
1226             * but ethtool sees it as an array of bytes
1227             * converting to big-endian will do the work
1228             */
1229            *ret_val = htobe32(val);
1230            rc = 0;
1231            break;
1232        }
1233    }
1234
1235    if (rc == -1) {
1236        BLOGE(sc, "nvram read timeout expired "
1237            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1238            offset, cmd_flags, val);
1239    }
1240
1241    return (rc);
1242}
1243
1244static int
1245bxe_nvram_read(struct bxe_softc *sc,
1246               uint32_t         offset,
1247               uint8_t          *ret_buf,
1248               int              buf_size)
1249{
1250    uint32_t cmd_flags;
1251    uint32_t val;
1252    int rc;
1253
1254    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1255        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1256              offset, buf_size);
1257        return (-1);
1258    }
1259
1260    if ((offset + buf_size) > sc->devinfo.flash_size) {
1261        BLOGE(sc, "Invalid parameter, "
1262                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1263              offset, buf_size, sc->devinfo.flash_size);
1264        return (-1);
1265    }
1266
1267    /* request access to nvram interface */
1268    rc = bxe_acquire_nvram_lock(sc);
1269    if (rc) {
1270        return (rc);
1271    }
1272
1273    /* enable access to nvram interface */
1274    bxe_enable_nvram_access(sc);
1275
1276    /* read the first word(s) */
1277    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1278    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1279        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1280        memcpy(ret_buf, &val, 4);
1281
1282        /* advance to the next dword */
1283        offset += sizeof(uint32_t);
1284        ret_buf += sizeof(uint32_t);
1285        buf_size -= sizeof(uint32_t);
1286        cmd_flags = 0;
1287    }
1288
1289    if (rc == 0) {
1290        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1291        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1292        memcpy(ret_buf, &val, 4);
1293    }
1294
1295    /* disable access to nvram interface */
1296    bxe_disable_nvram_access(sc);
1297    bxe_release_nvram_lock(sc);
1298
1299    return (rc);
1300}
1301
1302static int
1303bxe_nvram_write_dword(struct bxe_softc *sc,
1304                      uint32_t         offset,
1305                      uint32_t         val,
1306                      uint32_t         cmd_flags)
1307{
1308    int count, i, rc;
1309
1310    /* build the command word */
1311    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1312
1313    /* need to clear DONE bit separately */
1314    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1315
1316    /* write the data */
1317    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1318
1319    /* address of the NVRAM to write to */
1320    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1321           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1322
1323    /* issue the write command */
1324    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1325
1326    /* adjust timeout for emulation/FPGA */
1327    count = NVRAM_TIMEOUT_COUNT;
1328    if (CHIP_REV_IS_SLOW(sc)) {
1329        count *= 100;
1330    }
1331
1332    /* wait for completion */
1333    rc = -1;
1334    for (i = 0; i < count; i++) {
1335        DELAY(5);
1336        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1337        if (val & MCPR_NVM_COMMAND_DONE) {
1338            rc = 0;
1339            break;
1340        }
1341    }
1342
1343    if (rc == -1) {
1344        BLOGE(sc, "nvram write timeout expired "
1345            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1346            offset, cmd_flags, val);
1347    }
1348
1349    return (rc);
1350}
1351
1352#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1353
1354static int
1355bxe_nvram_write1(struct bxe_softc *sc,
1356                 uint32_t         offset,
1357                 uint8_t          *data_buf,
1358                 int              buf_size)
1359{
1360    uint32_t cmd_flags;
1361    uint32_t align_offset;
1362    uint32_t val;
1363    int rc;
1364
1365    if ((offset + buf_size) > sc->devinfo.flash_size) {
1366        BLOGE(sc, "Invalid parameter, "
1367                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1368              offset, buf_size, sc->devinfo.flash_size);
1369        return (-1);
1370    }
1371
1372    /* request access to nvram interface */
1373    rc = bxe_acquire_nvram_lock(sc);
1374    if (rc) {
1375        return (rc);
1376    }
1377
1378    /* enable access to nvram interface */
1379    bxe_enable_nvram_access(sc);
1380
1381    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1382    align_offset = (offset & ~0x03);
1383    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1384
1385    if (rc == 0) {
1386        val &= ~(0xff << BYTE_OFFSET(offset));
1387        val |= (*data_buf << BYTE_OFFSET(offset));
1388
1389        /* nvram data is returned as an array of bytes
1390         * convert it back to cpu order
1391         */
1392        val = be32toh(val);
1393
1394        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1395    }
1396
1397    /* disable access to nvram interface */
1398    bxe_disable_nvram_access(sc);
1399    bxe_release_nvram_lock(sc);
1400
1401    return (rc);
1402}
1403
1404static int
1405bxe_nvram_write(struct bxe_softc *sc,
1406                uint32_t         offset,
1407                uint8_t          *data_buf,
1408                int              buf_size)
1409{
1410    uint32_t cmd_flags;
1411    uint32_t val;
1412    uint32_t written_so_far;
1413    int rc;
1414
1415    if (buf_size == 1) {
1416        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1417    }
1418
1419    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1420        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1421              offset, buf_size);
1422        return (-1);
1423    }
1424
1425    if (buf_size == 0) {
1426        return (0); /* nothing to do */
1427    }
1428
1429    if ((offset + buf_size) > sc->devinfo.flash_size) {
1430        BLOGE(sc, "Invalid parameter, "
1431                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1432              offset, buf_size, sc->devinfo.flash_size);
1433        return (-1);
1434    }
1435
1436    /* request access to nvram interface */
1437    rc = bxe_acquire_nvram_lock(sc);
1438    if (rc) {
1439        return (rc);
1440    }
1441
1442    /* enable access to nvram interface */
1443    bxe_enable_nvram_access(sc);
1444
1445    written_so_far = 0;
1446    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1447    while ((written_so_far < buf_size) && (rc == 0)) {
1448        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1449            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1450        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1451            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1452        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1453            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1454        }
1455
1456        memcpy(&val, data_buf, 4);
1457
1458        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1459
1460        /* advance to the next dword */
1461        offset += sizeof(uint32_t);
1462        data_buf += sizeof(uint32_t);
1463        written_so_far += sizeof(uint32_t);
1464        cmd_flags = 0;
1465    }
1466
1467    /* disable access to nvram interface */
1468    bxe_disable_nvram_access(sc);
1469    bxe_release_nvram_lock(sc);
1470
1471    return (rc);
1472}
1473
1474/* copy command into DMAE command memory and set DMAE command Go */
1475void
1476bxe_post_dmae(struct bxe_softc    *sc,
1477              struct dmae_cmd *dmae,
1478              int                 idx)
1479{
1480    uint32_t cmd_offset;
1481    int i;
1482
1483    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1484    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1485        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1486    }
1487
1488    REG_WR(sc, dmae_reg_go_c[idx], 1);
1489}
1490
1491uint32_t
1492bxe_dmae_opcode_add_comp(uint32_t opcode,
1493                         uint8_t  comp_type)
1494{
1495    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1496                      DMAE_CMD_C_TYPE_ENABLE));
1497}
1498
1499uint32_t
1500bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1501{
1502    return (opcode & ~DMAE_CMD_SRC_RESET);
1503}
1504
1505uint32_t
1506bxe_dmae_opcode(struct bxe_softc *sc,
1507                uint8_t          src_type,
1508                uint8_t          dst_type,
1509                uint8_t          with_comp,
1510                uint8_t          comp_type)
1511{
1512    uint32_t opcode = 0;
1513
1514    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1515               (dst_type << DMAE_CMD_DST_SHIFT));
1516
1517    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1518
1519    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1520
1521    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1522               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1523
1524    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1525
1526#ifdef __BIG_ENDIAN
1527    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1528#else
1529    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1530#endif
1531
1532    if (with_comp) {
1533        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1534    }
1535
1536    return (opcode);
1537}
1538
1539static void
1540bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1541                        struct dmae_cmd *dmae,
1542                        uint8_t             src_type,
1543                        uint8_t             dst_type)
1544{
1545    memset(dmae, 0, sizeof(struct dmae_cmd));
1546
1547    /* set the opcode */
1548    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1549                                   TRUE, DMAE_COMP_PCI);
1550
1551    /* fill in the completion parameters */
1552    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1553    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1554    dmae->comp_val     = DMAE_COMP_VAL;
1555}
1556
1557/* issue a DMAE command over the init channel and wait for completion */
1558static int
1559bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1560                         struct dmae_cmd *dmae)
1561{
1562    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1563    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1564
1565    BXE_DMAE_LOCK(sc);
1566
1567    /* reset completion */
1568    *wb_comp = 0;
1569
1570    /* post the command on the channel used for initializations */
1571    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1572
1573    /* wait for completion */
1574    DELAY(5);
1575
1576    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1577        if (!timeout ||
1578            (sc->recovery_state != BXE_RECOVERY_DONE &&
1579             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1580            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1581                *wb_comp, sc->recovery_state);
1582            BXE_DMAE_UNLOCK(sc);
1583            return (DMAE_TIMEOUT);
1584        }
1585
1586        timeout--;
1587        DELAY(50);
1588    }
1589
1590    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1591        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1592                *wb_comp, sc->recovery_state);
1593        BXE_DMAE_UNLOCK(sc);
1594        return (DMAE_PCI_ERROR);
1595    }
1596
1597    BXE_DMAE_UNLOCK(sc);
1598    return (0);
1599}
1600
1601void
1602bxe_read_dmae(struct bxe_softc *sc,
1603              uint32_t         src_addr,
1604              uint32_t         len32)
1605{
1606    struct dmae_cmd dmae;
1607    uint32_t *data;
1608    int i, rc;
1609
1610    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1611
1612    if (!sc->dmae_ready) {
1613        data = BXE_SP(sc, wb_data[0]);
1614
1615        for (i = 0; i < len32; i++) {
1616            data[i] = (CHIP_IS_E1(sc)) ?
1617                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1618                          REG_RD(sc, (src_addr + (i * 4)));
1619        }
1620
1621        return;
1622    }
1623
1624    /* set opcode and fixed command fields */
1625    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1626
1627    /* fill in addresses and len */
1628    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1629    dmae.src_addr_hi = 0;
1630    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1631    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1632    dmae.len         = len32;
1633
1634    /* issue the command and wait for completion */
1635    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1636        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1637    };
1638}
1639
1640void
1641bxe_write_dmae(struct bxe_softc *sc,
1642               bus_addr_t       dma_addr,
1643               uint32_t         dst_addr,
1644               uint32_t         len32)
1645{
1646    struct dmae_cmd dmae;
1647    int rc;
1648
1649    if (!sc->dmae_ready) {
1650        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1651
1652        if (CHIP_IS_E1(sc)) {
1653            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1654        } else {
1655            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1656        }
1657
1658        return;
1659    }
1660
1661    /* set opcode and fixed command fields */
1662    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1663
1664    /* fill in addresses and len */
1665    dmae.src_addr_lo = U64_LO(dma_addr);
1666    dmae.src_addr_hi = U64_HI(dma_addr);
1667    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1668    dmae.dst_addr_hi = 0;
1669    dmae.len         = len32;
1670
1671    /* issue the command and wait for completion */
1672    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1673        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1674    }
1675}
1676
1677void
1678bxe_write_dmae_phys_len(struct bxe_softc *sc,
1679                        bus_addr_t       phys_addr,
1680                        uint32_t         addr,
1681                        uint32_t         len)
1682{
1683    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1684    int offset = 0;
1685
1686    while (len > dmae_wr_max) {
1687        bxe_write_dmae(sc,
1688                       (phys_addr + offset), /* src DMA address */
1689                       (addr + offset),      /* dst GRC address */
1690                       dmae_wr_max);
1691        offset += (dmae_wr_max * 4);
1692        len -= dmae_wr_max;
1693    }
1694
1695    bxe_write_dmae(sc,
1696                   (phys_addr + offset), /* src DMA address */
1697                   (addr + offset),      /* dst GRC address */
1698                   len);
1699}
1700
1701void
1702bxe_set_ctx_validation(struct bxe_softc   *sc,
1703                       struct eth_context *cxt,
1704                       uint32_t           cid)
1705{
1706    /* ustorm cxt validation */
1707    cxt->ustorm_ag_context.cdu_usage =
1708        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1709            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1710    /* xcontext validation */
1711    cxt->xstorm_ag_context.cdu_reserved =
1712        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1713            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1714}
1715
1716static void
1717bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1718                            uint8_t          port,
1719                            uint8_t          fw_sb_id,
1720                            uint8_t          sb_index,
1721                            uint8_t          ticks)
1722{
1723    uint32_t addr =
1724        (BAR_CSTRORM_INTMEM +
1725         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1726
1727    REG_WR8(sc, addr, ticks);
1728
1729    BLOGD(sc, DBG_LOAD,
1730          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1731          port, fw_sb_id, sb_index, ticks);
1732}
1733
1734static void
1735bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1736                            uint8_t          port,
1737                            uint16_t         fw_sb_id,
1738                            uint8_t          sb_index,
1739                            uint8_t          disable)
1740{
1741    uint32_t enable_flag =
1742        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1743    uint32_t addr =
1744        (BAR_CSTRORM_INTMEM +
1745         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1746    uint8_t flags;
1747
1748    /* clear and set */
1749    flags = REG_RD8(sc, addr);
1750    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1751    flags |= enable_flag;
1752    REG_WR8(sc, addr, flags);
1753
1754    BLOGD(sc, DBG_LOAD,
1755          "port %d fw_sb_id %d sb_index %d disable %d\n",
1756          port, fw_sb_id, sb_index, disable);
1757}
1758
1759void
1760bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1761                             uint8_t          fw_sb_id,
1762                             uint8_t          sb_index,
1763                             uint8_t          disable,
1764                             uint16_t         usec)
1765{
1766    int port = SC_PORT(sc);
1767    uint8_t ticks = (usec / 4); /* XXX ??? */
1768
1769    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1770
1771    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1772    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1773}
1774
1775void
1776elink_cb_udelay(struct bxe_softc *sc,
1777                uint32_t         usecs)
1778{
1779    DELAY(usecs);
1780}
1781
1782uint32_t
1783elink_cb_reg_read(struct bxe_softc *sc,
1784                  uint32_t         reg_addr)
1785{
1786    return (REG_RD(sc, reg_addr));
1787}
1788
1789void
1790elink_cb_reg_write(struct bxe_softc *sc,
1791                   uint32_t         reg_addr,
1792                   uint32_t         val)
1793{
1794    REG_WR(sc, reg_addr, val);
1795}
1796
1797void
1798elink_cb_reg_wb_write(struct bxe_softc *sc,
1799                      uint32_t         offset,
1800                      uint32_t         *wb_write,
1801                      uint16_t         len)
1802{
1803    REG_WR_DMAE(sc, offset, wb_write, len);
1804}
1805
1806void
1807elink_cb_reg_wb_read(struct bxe_softc *sc,
1808                     uint32_t         offset,
1809                     uint32_t         *wb_write,
1810                     uint16_t         len)
1811{
1812    REG_RD_DMAE(sc, offset, wb_write, len);
1813}
1814
1815uint8_t
1816elink_cb_path_id(struct bxe_softc *sc)
1817{
1818    return (SC_PATH(sc));
1819}
1820
1821void
1822elink_cb_event_log(struct bxe_softc     *sc,
1823                   const elink_log_id_t elink_log_id,
1824                   ...)
1825{
1826    /* XXX */
1827    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1828}
1829
1830static int
1831bxe_set_spio(struct bxe_softc *sc,
1832             int              spio,
1833             uint32_t         mode)
1834{
1835    uint32_t spio_reg;
1836
1837    /* Only 2 SPIOs are configurable */
1838    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1839        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1840        return (-1);
1841    }
1842
1843    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1844
1845    /* read SPIO and mask except the float bits */
1846    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1847
1848    switch (mode) {
1849    case MISC_SPIO_OUTPUT_LOW:
1850        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1851        /* clear FLOAT and set CLR */
1852        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1853        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1854        break;
1855
1856    case MISC_SPIO_OUTPUT_HIGH:
1857        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1858        /* clear FLOAT and set SET */
1859        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1860        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1861        break;
1862
1863    case MISC_SPIO_INPUT_HI_Z:
1864        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1865        /* set FLOAT */
1866        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1867        break;
1868
1869    default:
1870        break;
1871    }
1872
1873    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1874    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1875
1876    return (0);
1877}
1878
1879static int
1880bxe_gpio_read(struct bxe_softc *sc,
1881              int              gpio_num,
1882              uint8_t          port)
1883{
1884    /* The GPIO should be swapped if swap register is set and active */
1885    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1886                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1887    int gpio_shift = (gpio_num +
1888                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1889    uint32_t gpio_mask = (1 << gpio_shift);
1890    uint32_t gpio_reg;
1891
1892    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1893        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1894            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1895            gpio_mask);
1896        return (-1);
1897    }
1898
1899    /* read GPIO value */
1900    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1901
1902    /* get the requested pin value */
1903    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1904}
1905
1906static int
1907bxe_gpio_write(struct bxe_softc *sc,
1908               int              gpio_num,
1909               uint32_t         mode,
1910               uint8_t          port)
1911{
1912    /* The GPIO should be swapped if swap register is set and active */
1913    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1914                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1915    int gpio_shift = (gpio_num +
1916                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1917    uint32_t gpio_mask = (1 << gpio_shift);
1918    uint32_t gpio_reg;
1919
1920    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1921        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1922            " gpio_shift %d gpio_mask 0x%x\n",
1923            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1924        return (-1);
1925    }
1926
1927    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1928
1929    /* read GPIO and mask except the float bits */
1930    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1931
1932    switch (mode) {
1933    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1934        BLOGD(sc, DBG_PHY,
1935              "Set GPIO %d (shift %d) -> output low\n",
1936              gpio_num, gpio_shift);
1937        /* clear FLOAT and set CLR */
1938        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1939        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1940        break;
1941
1942    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1943        BLOGD(sc, DBG_PHY,
1944              "Set GPIO %d (shift %d) -> output high\n",
1945              gpio_num, gpio_shift);
1946        /* clear FLOAT and set SET */
1947        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1949        break;
1950
1951    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1952        BLOGD(sc, DBG_PHY,
1953              "Set GPIO %d (shift %d) -> input\n",
1954              gpio_num, gpio_shift);
1955        /* set FLOAT */
1956        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1957        break;
1958
1959    default:
1960        break;
1961    }
1962
1963    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1964    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1965
1966    return (0);
1967}
1968
1969static int
1970bxe_gpio_mult_write(struct bxe_softc *sc,
1971                    uint8_t          pins,
1972                    uint32_t         mode)
1973{
1974    uint32_t gpio_reg;
1975
1976    /* any port swapping should be handled by caller */
1977
1978    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1979
1980    /* read GPIO and mask except the float bits */
1981    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1982    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1983    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1984    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1985
1986    switch (mode) {
1987    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1988        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
1989        /* set CLR */
1990        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
1991        break;
1992
1993    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
1995        /* set SET */
1996        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
1997        break;
1998
1999    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2000        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2001        /* set FLOAT */
2002        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2003        break;
2004
2005    default:
2006        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2007            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2008        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2009        return (-1);
2010    }
2011
2012    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2013    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2014
2015    return (0);
2016}
2017
2018static int
2019bxe_gpio_int_write(struct bxe_softc *sc,
2020                   int              gpio_num,
2021                   uint32_t         mode,
2022                   uint8_t          port)
2023{
2024    /* The GPIO should be swapped if swap register is set and active */
2025    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2026                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2027    int gpio_shift = (gpio_num +
2028                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2029    uint32_t gpio_mask = (1 << gpio_shift);
2030    uint32_t gpio_reg;
2031
2032    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2034            " gpio_shift %d gpio_mask 0x%x\n",
2035            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2036        return (-1);
2037    }
2038
2039    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2040
2041    /* read GPIO int */
2042    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2043
2044    switch (mode) {
2045    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2046        BLOGD(sc, DBG_PHY,
2047              "Clear GPIO INT %d (shift %d) -> output low\n",
2048              gpio_num, gpio_shift);
2049        /* clear SET and set CLR */
2050        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2051        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2052        break;
2053
2054    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2055        BLOGD(sc, DBG_PHY,
2056              "Set GPIO INT %d (shift %d) -> output high\n",
2057              gpio_num, gpio_shift);
2058        /* clear CLR and set SET */
2059        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2060        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2061        break;
2062
2063    default:
2064        break;
2065    }
2066
2067    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2068    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2069
2070    return (0);
2071}
2072
2073uint32_t
2074elink_cb_gpio_read(struct bxe_softc *sc,
2075                   uint16_t         gpio_num,
2076                   uint8_t          port)
2077{
2078    return (bxe_gpio_read(sc, gpio_num, port));
2079}
2080
2081uint8_t
2082elink_cb_gpio_write(struct bxe_softc *sc,
2083                    uint16_t         gpio_num,
2084                    uint8_t          mode, /* 0=low 1=high */
2085                    uint8_t          port)
2086{
2087    return (bxe_gpio_write(sc, gpio_num, mode, port));
2088}
2089
2090uint8_t
2091elink_cb_gpio_mult_write(struct bxe_softc *sc,
2092                         uint8_t          pins,
2093                         uint8_t          mode) /* 0=low 1=high */
2094{
2095    return (bxe_gpio_mult_write(sc, pins, mode));
2096}
2097
2098uint8_t
2099elink_cb_gpio_int_write(struct bxe_softc *sc,
2100                        uint16_t         gpio_num,
2101                        uint8_t          mode, /* 0=low 1=high */
2102                        uint8_t          port)
2103{
2104    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2105}
2106
2107void
2108elink_cb_notify_link_changed(struct bxe_softc *sc)
2109{
2110    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2111                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2112}
2113
2114/* send the MCP a request, block until there is a reply */
2115uint32_t
2116elink_cb_fw_command(struct bxe_softc *sc,
2117                    uint32_t         command,
2118                    uint32_t         param)
2119{
2120    int mb_idx = SC_FW_MB_IDX(sc);
2121    uint32_t seq;
2122    uint32_t rc = 0;
2123    uint32_t cnt = 1;
2124    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2125
2126    BXE_FWMB_LOCK(sc);
2127
2128    seq = ++sc->fw_seq;
2129    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2130    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2131
2132    BLOGD(sc, DBG_PHY,
2133          "wrote command 0x%08x to FW MB param 0x%08x\n",
2134          (command | seq), param);
2135
2136    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2137    do {
2138        DELAY(delay * 1000);
2139        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2140    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2141
2142    BLOGD(sc, DBG_PHY,
2143          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2144          cnt*delay, rc, seq);
2145
2146    /* is this a reply to our command? */
2147    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2148        rc &= FW_MSG_CODE_MASK;
2149    } else {
2150        /* Ruh-roh! */
2151        BLOGE(sc, "FW failed to respond!\n");
2152        // XXX bxe_fw_dump(sc);
2153        rc = 0;
2154    }
2155
2156    BXE_FWMB_UNLOCK(sc);
2157    return (rc);
2158}
2159
2160static uint32_t
2161bxe_fw_command(struct bxe_softc *sc,
2162               uint32_t         command,
2163               uint32_t         param)
2164{
2165    return (elink_cb_fw_command(sc, command, param));
2166}
2167
2168static void
2169__storm_memset_dma_mapping(struct bxe_softc *sc,
2170                           uint32_t         addr,
2171                           bus_addr_t       mapping)
2172{
2173    REG_WR(sc, addr, U64_LO(mapping));
2174    REG_WR(sc, (addr + 4), U64_HI(mapping));
2175}
2176
2177static void
2178storm_memset_spq_addr(struct bxe_softc *sc,
2179                      bus_addr_t       mapping,
2180                      uint16_t         abs_fid)
2181{
2182    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2183                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2184    __storm_memset_dma_mapping(sc, addr, mapping);
2185}
2186
2187static void
2188storm_memset_vf_to_pf(struct bxe_softc *sc,
2189                      uint16_t         abs_fid,
2190                      uint16_t         pf_id)
2191{
2192    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2193    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2194    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2195    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2196}
2197
2198static void
2199storm_memset_func_en(struct bxe_softc *sc,
2200                     uint16_t         abs_fid,
2201                     uint8_t          enable)
2202{
2203    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2204    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2205    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2206    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2207}
2208
2209static void
2210storm_memset_eq_data(struct bxe_softc       *sc,
2211                     struct event_ring_data *eq_data,
2212                     uint16_t               pfid)
2213{
2214    uint32_t addr;
2215    size_t size;
2216
2217    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2218    size = sizeof(struct event_ring_data);
2219    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2220}
2221
2222static void
2223storm_memset_eq_prod(struct bxe_softc *sc,
2224                     uint16_t         eq_prod,
2225                     uint16_t         pfid)
2226{
2227    uint32_t addr = (BAR_CSTRORM_INTMEM +
2228                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2229    REG_WR16(sc, addr, eq_prod);
2230}
2231
2232/*
2233 * Post a slowpath command.
2234 *
2235 * A slowpath command is used to propogate a configuration change through
2236 * the controller in a controlled manner, allowing each STORM processor and
2237 * other H/W blocks to phase in the change.  The commands sent on the
2238 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2239 * completion of the ramrod will occur in different ways.  Here's a
2240 * breakdown of ramrods and how they complete:
2241 *
2242 * RAMROD_CMD_ID_ETH_PORT_SETUP
2243 *   Used to setup the leading connection on a port.  Completes on the
2244 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2245 *
2246 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2247 *   Used to setup an additional connection on a port.  Completes on the
2248 *   RCQ of the multi-queue/RSS connection being initialized.
2249 *
2250 * RAMROD_CMD_ID_ETH_STAT_QUERY
2251 *   Used to force the storm processors to update the statistics database
2252 *   in host memory.  This ramrod is send on the leading connection CID and
2253 *   completes as an index increment of the CSTORM on the default status
2254 *   block.
2255 *
2256 * RAMROD_CMD_ID_ETH_UPDATE
2257 *   Used to update the state of the leading connection, usually to udpate
2258 *   the RSS indirection table.  Completes on the RCQ of the leading
2259 *   connection. (Not currently used under FreeBSD until OS support becomes
2260 *   available.)
2261 *
2262 * RAMROD_CMD_ID_ETH_HALT
2263 *   Used when tearing down a connection prior to driver unload.  Completes
2264 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2265 *   use this on the leading connection.
2266 *
2267 * RAMROD_CMD_ID_ETH_SET_MAC
2268 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2269 *   the RCQ of the leading connection.
2270 *
2271 * RAMROD_CMD_ID_ETH_CFC_DEL
2272 *   Used when tearing down a conneciton prior to driver unload.  Completes
2273 *   on the RCQ of the leading connection (since the current connection
2274 *   has been completely removed from controller memory).
2275 *
2276 * RAMROD_CMD_ID_ETH_PORT_DEL
2277 *   Used to tear down the leading connection prior to driver unload,
2278 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2279 *   default status block.
2280 *
2281 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2282 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2283 *   RSS connection that is being offloaded.  (Not currently used under
2284 *   FreeBSD.)
2285 *
2286 * There can only be one command pending per function.
2287 *
2288 * Returns:
2289 *   0 = Success, !0 = Failure.
2290 */
2291
2292/* must be called under the spq lock */
2293static inline
2294struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2295{
2296    struct eth_spe *next_spe = sc->spq_prod_bd;
2297
2298    if (sc->spq_prod_bd == sc->spq_last_bd) {
2299        /* wrap back to the first eth_spq */
2300        sc->spq_prod_bd = sc->spq;
2301        sc->spq_prod_idx = 0;
2302    } else {
2303        sc->spq_prod_bd++;
2304        sc->spq_prod_idx++;
2305    }
2306
2307    return (next_spe);
2308}
2309
2310/* must be called under the spq lock */
2311static inline
2312void bxe_sp_prod_update(struct bxe_softc *sc)
2313{
2314    int func = SC_FUNC(sc);
2315
2316    /*
2317     * Make sure that BD data is updated before writing the producer.
2318     * BD data is written to the memory, the producer is read from the
2319     * memory, thus we need a full memory barrier to ensure the ordering.
2320     */
2321    mb();
2322
2323    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2324             sc->spq_prod_idx);
2325
2326    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2327                      BUS_SPACE_BARRIER_WRITE);
2328}
2329
2330/**
2331 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2332 *
2333 * @cmd:      command to check
2334 * @cmd_type: command type
2335 */
2336static inline
2337int bxe_is_contextless_ramrod(int cmd,
2338                              int cmd_type)
2339{
2340    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2341        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2342        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2343        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2344        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2345        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2346        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2347        return (TRUE);
2348    } else {
2349        return (FALSE);
2350    }
2351}
2352
2353/**
2354 * bxe_sp_post - place a single command on an SP ring
2355 *
2356 * @sc:         driver handle
2357 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2358 * @cid:        SW CID the command is related to
2359 * @data_hi:    command private data address (high 32 bits)
2360 * @data_lo:    command private data address (low 32 bits)
2361 * @cmd_type:   command type (e.g. NONE, ETH)
2362 *
2363 * SP data is handled as if it's always an address pair, thus data fields are
2364 * not swapped to little endian in upper functions. Instead this function swaps
2365 * data as if it's two uint32 fields.
2366 */
2367int
2368bxe_sp_post(struct bxe_softc *sc,
2369            int              command,
2370            int              cid,
2371            uint32_t         data_hi,
2372            uint32_t         data_lo,
2373            int              cmd_type)
2374{
2375    struct eth_spe *spe;
2376    uint16_t type;
2377    int common;
2378
2379    common = bxe_is_contextless_ramrod(command, cmd_type);
2380
2381    BXE_SP_LOCK(sc);
2382
2383    if (common) {
2384        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2385            BLOGE(sc, "EQ ring is full!\n");
2386            BXE_SP_UNLOCK(sc);
2387            return (-1);
2388        }
2389    } else {
2390        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2391            BLOGE(sc, "SPQ ring is full!\n");
2392            BXE_SP_UNLOCK(sc);
2393            return (-1);
2394        }
2395    }
2396
2397    spe = bxe_sp_get_next(sc);
2398
2399    /* CID needs port number to be encoded int it */
2400    spe->hdr.conn_and_cmd_data =
2401        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2402
2403    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2404
2405    /* TBD: Check if it works for VFs */
2406    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2407             SPE_HDR_T_FUNCTION_ID);
2408
2409    spe->hdr.type = htole16(type);
2410
2411    spe->data.update_data_addr.hi = htole32(data_hi);
2412    spe->data.update_data_addr.lo = htole32(data_lo);
2413
2414    /*
2415     * It's ok if the actual decrement is issued towards the memory
2416     * somewhere between the lock and unlock. Thus no more explict
2417     * memory barrier is needed.
2418     */
2419    if (common) {
2420        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2421    } else {
2422        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2423    }
2424
2425    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2426    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2427          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2428    BLOGD(sc, DBG_SP,
2429          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2430          sc->spq_prod_idx,
2431          (uint32_t)U64_HI(sc->spq_dma.paddr),
2432          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2433          command,
2434          common,
2435          HW_CID(sc, cid),
2436          data_hi,
2437          data_lo,
2438          type,
2439          atomic_load_acq_long(&sc->cq_spq_left),
2440          atomic_load_acq_long(&sc->eq_spq_left));
2441
2442    bxe_sp_prod_update(sc);
2443
2444    BXE_SP_UNLOCK(sc);
2445    return (0);
2446}
2447
2448/**
2449 * bxe_debug_print_ind_table - prints the indirection table configuration.
2450 *
2451 * @sc: driver hanlde
2452 * @p:  pointer to rss configuration
2453 */
2454
2455/*
2456 * FreeBSD Device probe function.
2457 *
2458 * Compares the device found to the driver's list of supported devices and
2459 * reports back to the bsd loader whether this is the right driver for the device.
2460 * This is the driver entry function called from the "kldload" command.
2461 *
2462 * Returns:
2463 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2464 */
2465static int
2466bxe_probe(device_t dev)
2467{
2468    struct bxe_softc *sc;
2469    struct bxe_device_type *t;
2470    char *descbuf;
2471    uint16_t did, sdid, svid, vid;
2472
2473    /* Find our device structure */
2474    sc = device_get_softc(dev);
2475    sc->dev = dev;
2476    t = bxe_devs;
2477
2478    /* Get the data for the device to be probed. */
2479    vid  = pci_get_vendor(dev);
2480    did  = pci_get_device(dev);
2481    svid = pci_get_subvendor(dev);
2482    sdid = pci_get_subdevice(dev);
2483
2484    BLOGD(sc, DBG_LOAD,
2485          "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
2486          "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
2487
2488    /* Look through the list of known devices for a match. */
2489    while (t->bxe_name != NULL) {
2490        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2491            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2492            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2493            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2494            if (descbuf == NULL)
2495                return (ENOMEM);
2496
2497            /* Print out the device identity. */
2498            snprintf(descbuf, BXE_DEVDESC_MAX,
2499                     "%s (%c%d) BXE v:%s\n", t->bxe_name,
2500                     (((pci_read_config(dev, PCIR_REVID, 4) &
2501                        0xf0) >> 4) + 'A'),
2502                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2503                     BXE_DRIVER_VERSION);
2504
2505            device_set_desc_copy(dev, descbuf);
2506            free(descbuf, M_TEMP);
2507            return (BUS_PROBE_DEFAULT);
2508        }
2509        t++;
2510    }
2511
2512    return (ENXIO);
2513}
2514
2515static void
2516bxe_init_mutexes(struct bxe_softc *sc)
2517{
2518#ifdef BXE_CORE_LOCK_SX
2519    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2520             "bxe%d_core_lock", sc->unit);
2521    sx_init(&sc->core_sx, sc->core_sx_name);
2522#else
2523    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2524             "bxe%d_core_lock", sc->unit);
2525    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2526#endif
2527
2528    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2529             "bxe%d_sp_lock", sc->unit);
2530    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2531
2532    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2533             "bxe%d_dmae_lock", sc->unit);
2534    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2535
2536    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2537             "bxe%d_phy_lock", sc->unit);
2538    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2539
2540    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2541             "bxe%d_fwmb_lock", sc->unit);
2542    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2543
2544    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2545             "bxe%d_print_lock", sc->unit);
2546    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2547
2548    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2549             "bxe%d_stats_lock", sc->unit);
2550    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2551
2552    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2553             "bxe%d_mcast_lock", sc->unit);
2554    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2555}
2556
2557static void
2558bxe_release_mutexes(struct bxe_softc *sc)
2559{
2560#ifdef BXE_CORE_LOCK_SX
2561    sx_destroy(&sc->core_sx);
2562#else
2563    if (mtx_initialized(&sc->core_mtx)) {
2564        mtx_destroy(&sc->core_mtx);
2565    }
2566#endif
2567
2568    if (mtx_initialized(&sc->sp_mtx)) {
2569        mtx_destroy(&sc->sp_mtx);
2570    }
2571
2572    if (mtx_initialized(&sc->dmae_mtx)) {
2573        mtx_destroy(&sc->dmae_mtx);
2574    }
2575
2576    if (mtx_initialized(&sc->port.phy_mtx)) {
2577        mtx_destroy(&sc->port.phy_mtx);
2578    }
2579
2580    if (mtx_initialized(&sc->fwmb_mtx)) {
2581        mtx_destroy(&sc->fwmb_mtx);
2582    }
2583
2584    if (mtx_initialized(&sc->print_mtx)) {
2585        mtx_destroy(&sc->print_mtx);
2586    }
2587
2588    if (mtx_initialized(&sc->stats_mtx)) {
2589        mtx_destroy(&sc->stats_mtx);
2590    }
2591
2592    if (mtx_initialized(&sc->mcast_mtx)) {
2593        mtx_destroy(&sc->mcast_mtx);
2594    }
2595}
2596
2597static void
2598bxe_tx_disable(struct bxe_softc* sc)
2599{
2600    struct ifnet *ifp = sc->ifnet;
2601
2602    /* tell the stack the driver is stopped and TX queue is full */
2603    if (ifp != NULL) {
2604        ifp->if_drv_flags = 0;
2605    }
2606}
2607
2608static void
2609bxe_drv_pulse(struct bxe_softc *sc)
2610{
2611    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2612             sc->fw_drv_pulse_wr_seq);
2613}
2614
2615static inline uint16_t
2616bxe_tx_avail(struct bxe_softc *sc,
2617             struct bxe_fastpath *fp)
2618{
2619    int16_t  used;
2620    uint16_t prod;
2621    uint16_t cons;
2622
2623    prod = fp->tx_bd_prod;
2624    cons = fp->tx_bd_cons;
2625
2626    used = SUB_S16(prod, cons);
2627
2628    return (int16_t)(sc->tx_ring_size) - used;
2629}
2630
2631static inline int
2632bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2633{
2634    uint16_t hw_cons;
2635
2636    mb(); /* status block fields can change */
2637    hw_cons = le16toh(*fp->tx_cons_sb);
2638    return (hw_cons != fp->tx_pkt_cons);
2639}
2640
2641static inline uint8_t
2642bxe_has_tx_work(struct bxe_fastpath *fp)
2643{
2644    /* expand this for multi-cos if ever supported */
2645    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2646}
2647
2648static inline int
2649bxe_has_rx_work(struct bxe_fastpath *fp)
2650{
2651    uint16_t rx_cq_cons_sb;
2652
2653    mb(); /* status block fields can change */
2654    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2655    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2656        rx_cq_cons_sb++;
2657    return (fp->rx_cq_cons != rx_cq_cons_sb);
2658}
2659
2660static void
2661bxe_sp_event(struct bxe_softc    *sc,
2662             struct bxe_fastpath *fp,
2663             union eth_rx_cqe    *rr_cqe)
2664{
2665    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2666    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2667    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2668    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2669
2670    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2671          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2672
2673    switch (command) {
2674    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2675        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2676        drv_cmd = ECORE_Q_CMD_UPDATE;
2677        break;
2678
2679    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2680        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2681        drv_cmd = ECORE_Q_CMD_SETUP;
2682        break;
2683
2684    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2685        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2686        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2687        break;
2688
2689    case (RAMROD_CMD_ID_ETH_HALT):
2690        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2691        drv_cmd = ECORE_Q_CMD_HALT;
2692        break;
2693
2694    case (RAMROD_CMD_ID_ETH_TERMINATE):
2695        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2696        drv_cmd = ECORE_Q_CMD_TERMINATE;
2697        break;
2698
2699    case (RAMROD_CMD_ID_ETH_EMPTY):
2700        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2701        drv_cmd = ECORE_Q_CMD_EMPTY;
2702        break;
2703
2704    default:
2705        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2706              command, fp->index);
2707        return;
2708    }
2709
2710    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2711        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2712        /*
2713         * q_obj->complete_cmd() failure means that this was
2714         * an unexpected completion.
2715         *
2716         * In this case we don't want to increase the sc->spq_left
2717         * because apparently we haven't sent this command the first
2718         * place.
2719         */
2720        // bxe_panic(sc, ("Unexpected SP completion\n"));
2721        return;
2722    }
2723
2724    atomic_add_acq_long(&sc->cq_spq_left, 1);
2725
2726    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2727          atomic_load_acq_long(&sc->cq_spq_left));
2728}
2729
2730/*
2731 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2732 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2733 * the current aggregation queue as in-progress.
2734 */
2735static void
2736bxe_tpa_start(struct bxe_softc            *sc,
2737              struct bxe_fastpath         *fp,
2738              uint16_t                    queue,
2739              uint16_t                    cons,
2740              uint16_t                    prod,
2741              struct eth_fast_path_rx_cqe *cqe)
2742{
2743    struct bxe_sw_rx_bd tmp_bd;
2744    struct bxe_sw_rx_bd *rx_buf;
2745    struct eth_rx_bd *rx_bd;
2746    int max_agg_queues;
2747    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2748    uint16_t index;
2749
2750    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2751                       "cons=%d prod=%d\n",
2752          fp->index, queue, cons, prod);
2753
2754    max_agg_queues = MAX_AGG_QS(sc);
2755
2756    KASSERT((queue < max_agg_queues),
2757            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2758             fp->index, queue, max_agg_queues));
2759
2760    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2761            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2762             fp->index, queue));
2763
2764    /* copy the existing mbuf and mapping from the TPA pool */
2765    tmp_bd = tpa_info->bd;
2766
2767    if (tmp_bd.m == NULL) {
2768        uint32_t *tmp;
2769
2770        tmp = (uint32_t *)cqe;
2771
2772        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2773              fp->index, queue, cons, prod);
2774        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2775            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2776
2777        /* XXX Error handling? */
2778        return;
2779    }
2780
2781    /* change the TPA queue to the start state */
2782    tpa_info->state            = BXE_TPA_STATE_START;
2783    tpa_info->placement_offset = cqe->placement_offset;
2784    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2785    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2786    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2787
2788    fp->rx_tpa_queue_used |= (1 << queue);
2789
2790    /*
2791     * If all the buffer descriptors are filled with mbufs then fill in
2792     * the current consumer index with a new BD. Else if a maximum Rx
2793     * buffer limit is imposed then fill in the next producer index.
2794     */
2795    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2796                prod : cons;
2797
2798    /* move the received mbuf and mapping to TPA pool */
2799    tpa_info->bd = fp->rx_mbuf_chain[cons];
2800
2801    /* release any existing RX BD mbuf mappings */
2802    if (cons != index) {
2803        rx_buf = &fp->rx_mbuf_chain[cons];
2804
2805        if (rx_buf->m_map != NULL) {
2806            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2807                            BUS_DMASYNC_POSTREAD);
2808            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2809        }
2810
2811        /*
2812         * We get here when the maximum number of rx buffers is less than
2813         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2814         * it out here without concern of a memory leak.
2815         */
2816        fp->rx_mbuf_chain[cons].m = NULL;
2817    }
2818
2819    /* update the Rx SW BD with the mbuf info from the TPA pool */
2820    fp->rx_mbuf_chain[index] = tmp_bd;
2821
2822    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2823    rx_bd = &fp->rx_chain[index];
2824    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2825    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2826}
2827
2828/*
2829 * When a TPA aggregation is completed, loop through the individual mbufs
2830 * of the aggregation, combining them into a single mbuf which will be sent
2831 * up the stack. Refill all freed SGEs with mbufs as we go along.
2832 */
2833static int
2834bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2835                   struct bxe_fastpath       *fp,
2836                   struct bxe_sw_tpa_info    *tpa_info,
2837                   uint16_t                  queue,
2838                   uint16_t                  pages,
2839                   struct mbuf               *m,
2840			       struct eth_end_agg_rx_cqe *cqe,
2841                   uint16_t                  cqe_idx)
2842{
2843    struct mbuf *m_frag;
2844    uint32_t frag_len, frag_size, i;
2845    uint16_t sge_idx;
2846    int rc = 0;
2847    int j;
2848
2849    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2850
2851    BLOGD(sc, DBG_LRO,
2852          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2853          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2854
2855    /* make sure the aggregated frame is not too big to handle */
2856    if (pages > 8 * PAGES_PER_SGE) {
2857
2858        uint32_t *tmp = (uint32_t *)cqe;
2859
2860        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2861                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2862              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2863              tpa_info->len_on_bd, frag_size);
2864
2865        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2866            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2867
2868        bxe_panic(sc, ("sge page count error\n"));
2869        return (EINVAL);
2870    }
2871
2872    /*
2873     * Scan through the scatter gather list pulling individual mbufs into a
2874     * single mbuf for the host stack.
2875     */
2876    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2877        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2878
2879        /*
2880         * Firmware gives the indices of the SGE as if the ring is an array
2881         * (meaning that the "next" element will consume 2 indices).
2882         */
2883        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2884
2885        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2886                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2887              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2888
2889        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2890
2891        /* allocate a new mbuf for the SGE */
2892        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2893        if (rc) {
2894            /* Leave all remaining SGEs in the ring! */
2895            return (rc);
2896        }
2897
2898        /* update the fragment length */
2899        m_frag->m_len = frag_len;
2900
2901        /* concatenate the fragment to the head mbuf */
2902        m_cat(m, m_frag);
2903        fp->eth_q_stats.mbuf_alloc_sge--;
2904
2905        /* update the TPA mbuf size and remaining fragment size */
2906        m->m_pkthdr.len += frag_len;
2907        frag_size -= frag_len;
2908    }
2909
2910    BLOGD(sc, DBG_LRO,
2911          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2912          fp->index, queue, frag_size);
2913
2914    return (rc);
2915}
2916
2917static inline void
2918bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2919{
2920    int i, j;
2921
2922    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2923        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2924
2925        for (j = 0; j < 2; j++) {
2926            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2927            idx--;
2928        }
2929    }
2930}
2931
2932static inline void
2933bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2934{
2935    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2936    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2937
2938    /*
2939     * Clear the two last indices in the page to 1. These are the indices that
2940     * correspond to the "next" element, hence will never be indicated and
2941     * should be removed from the calculations.
2942     */
2943    bxe_clear_sge_mask_next_elems(fp);
2944}
2945
2946static inline void
2947bxe_update_last_max_sge(struct bxe_fastpath *fp,
2948                        uint16_t            idx)
2949{
2950    uint16_t last_max = fp->last_max_sge;
2951
2952    if (SUB_S16(idx, last_max) > 0) {
2953        fp->last_max_sge = idx;
2954    }
2955}
2956
2957static inline void
2958bxe_update_sge_prod(struct bxe_softc          *sc,
2959                    struct bxe_fastpath       *fp,
2960                    uint16_t                  sge_len,
2961                    union eth_sgl_or_raw_data *cqe)
2962{
2963    uint16_t last_max, last_elem, first_elem;
2964    uint16_t delta = 0;
2965    uint16_t i;
2966
2967    if (!sge_len) {
2968        return;
2969    }
2970
2971    /* first mark all used pages */
2972    for (i = 0; i < sge_len; i++) {
2973        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2974                            RX_SGE(le16toh(cqe->sgl[i])));
2975    }
2976
2977    BLOGD(sc, DBG_LRO,
2978          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2979          fp->index, sge_len - 1,
2980          le16toh(cqe->sgl[sge_len - 1]));
2981
2982    /* assume that the last SGE index is the biggest */
2983    bxe_update_last_max_sge(fp,
2984                            le16toh(cqe->sgl[sge_len - 1]));
2985
2986    last_max = RX_SGE(fp->last_max_sge);
2987    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
2988    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
2989
2990    /* if ring is not full */
2991    if (last_elem + 1 != first_elem) {
2992        last_elem++;
2993    }
2994
2995    /* now update the prod */
2996    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
2997        if (__predict_true(fp->sge_mask[i])) {
2998            break;
2999        }
3000
3001        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3002        delta += BIT_VEC64_ELEM_SZ;
3003    }
3004
3005    if (delta > 0) {
3006        fp->rx_sge_prod += delta;
3007        /* clear page-end entries */
3008        bxe_clear_sge_mask_next_elems(fp);
3009    }
3010
3011    BLOGD(sc, DBG_LRO,
3012          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3013          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3014}
3015
3016/*
3017 * The aggregation on the current TPA queue has completed. Pull the individual
3018 * mbuf fragments together into a single mbuf, perform all necessary checksum
3019 * calculations, and send the resuting mbuf to the stack.
3020 */
3021static void
3022bxe_tpa_stop(struct bxe_softc          *sc,
3023             struct bxe_fastpath       *fp,
3024             struct bxe_sw_tpa_info    *tpa_info,
3025             uint16_t                  queue,
3026             uint16_t                  pages,
3027			 struct eth_end_agg_rx_cqe *cqe,
3028             uint16_t                  cqe_idx)
3029{
3030    struct ifnet *ifp = sc->ifnet;
3031    struct mbuf *m;
3032    int rc = 0;
3033
3034    BLOGD(sc, DBG_LRO,
3035          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3036          fp->index, queue, tpa_info->placement_offset,
3037          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3038
3039    m = tpa_info->bd.m;
3040
3041    /* allocate a replacement before modifying existing mbuf */
3042    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3043    if (rc) {
3044        /* drop the frame and log an error */
3045        fp->eth_q_stats.rx_soft_errors++;
3046        goto bxe_tpa_stop_exit;
3047    }
3048
3049    /* we have a replacement, fixup the current mbuf */
3050    m_adj(m, tpa_info->placement_offset);
3051    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3052
3053    /* mark the checksums valid (taken care of by the firmware) */
3054    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3055    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3056    m->m_pkthdr.csum_data = 0xffff;
3057    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3058                               CSUM_IP_VALID   |
3059                               CSUM_DATA_VALID |
3060                               CSUM_PSEUDO_HDR);
3061
3062    /* aggregate all of the SGEs into a single mbuf */
3063    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3064    if (rc) {
3065        /* drop the packet and log an error */
3066        fp->eth_q_stats.rx_soft_errors++;
3067        m_freem(m);
3068    } else {
3069        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3070            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3071            m->m_flags |= M_VLANTAG;
3072        }
3073
3074        /* assign packet to this interface interface */
3075        m->m_pkthdr.rcvif = ifp;
3076
3077#if __FreeBSD_version >= 800000
3078        /* specify what RSS queue was used for this flow */
3079        m->m_pkthdr.flowid = fp->index;
3080        BXE_SET_FLOWID(m);
3081#endif
3082
3083        ifp->if_ipackets++;
3084        fp->eth_q_stats.rx_tpa_pkts++;
3085
3086        /* pass the frame to the stack */
3087        (*ifp->if_input)(ifp, m);
3088    }
3089
3090    /* we passed an mbuf up the stack or dropped the frame */
3091    fp->eth_q_stats.mbuf_alloc_tpa--;
3092
3093bxe_tpa_stop_exit:
3094
3095    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3096    fp->rx_tpa_queue_used &= ~(1 << queue);
3097}
3098
3099static uint8_t
3100bxe_service_rxsgl(
3101                 struct bxe_fastpath *fp,
3102                 uint16_t len,
3103                 uint16_t lenonbd,
3104                 struct mbuf *m,
3105                 struct eth_fast_path_rx_cqe *cqe_fp)
3106{
3107    struct mbuf *m_frag;
3108    uint16_t frags, frag_len;
3109    uint16_t sge_idx = 0;
3110    uint16_t j;
3111    uint8_t i, rc = 0;
3112    uint32_t frag_size;
3113
3114    /* adjust the mbuf */
3115    m->m_len = lenonbd;
3116
3117    frag_size =  len - lenonbd;
3118    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3119
3120    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3121        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3122
3123        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3124        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3125        m_frag->m_len = frag_len;
3126
3127       /* allocate a new mbuf for the SGE */
3128        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3129        if (rc) {
3130            /* Leave all remaining SGEs in the ring! */
3131            return (rc);
3132        }
3133        fp->eth_q_stats.mbuf_alloc_sge--;
3134
3135        /* concatenate the fragment to the head mbuf */
3136        m_cat(m, m_frag);
3137
3138        frag_size -= frag_len;
3139    }
3140
3141    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3142
3143    return rc;
3144}
3145
3146static uint8_t
3147bxe_rxeof(struct bxe_softc    *sc,
3148          struct bxe_fastpath *fp)
3149{
3150    struct ifnet *ifp = sc->ifnet;
3151    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3152    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3153    int rx_pkts = 0;
3154    int rc = 0;
3155
3156    BXE_FP_RX_LOCK(fp);
3157
3158    /* CQ "next element" is of the size of the regular element */
3159    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3160    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3161        hw_cq_cons++;
3162    }
3163
3164    bd_cons = fp->rx_bd_cons;
3165    bd_prod = fp->rx_bd_prod;
3166    bd_prod_fw = bd_prod;
3167    sw_cq_cons = fp->rx_cq_cons;
3168    sw_cq_prod = fp->rx_cq_prod;
3169
3170    /*
3171     * Memory barrier necessary as speculative reads of the rx
3172     * buffer can be ahead of the index in the status block
3173     */
3174    rmb();
3175
3176    BLOGD(sc, DBG_RX,
3177          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3178          fp->index, hw_cq_cons, sw_cq_cons);
3179
3180    while (sw_cq_cons != hw_cq_cons) {
3181        struct bxe_sw_rx_bd *rx_buf = NULL;
3182        union eth_rx_cqe *cqe;
3183        struct eth_fast_path_rx_cqe *cqe_fp;
3184        uint8_t cqe_fp_flags;
3185        enum eth_rx_cqe_type cqe_fp_type;
3186        uint16_t len, lenonbd,  pad;
3187        struct mbuf *m = NULL;
3188
3189        comp_ring_cons = RCQ(sw_cq_cons);
3190        bd_prod = RX_BD(bd_prod);
3191        bd_cons = RX_BD(bd_cons);
3192
3193        cqe          = &fp->rcq_chain[comp_ring_cons];
3194        cqe_fp       = &cqe->fast_path_cqe;
3195        cqe_fp_flags = cqe_fp->type_error_flags;
3196        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3197
3198        BLOGD(sc, DBG_RX,
3199              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3200              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3201              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3202              fp->index,
3203              hw_cq_cons,
3204              sw_cq_cons,
3205              bd_prod,
3206              bd_cons,
3207              CQE_TYPE(cqe_fp_flags),
3208              cqe_fp_flags,
3209              cqe_fp->status_flags,
3210              le32toh(cqe_fp->rss_hash_result),
3211              le16toh(cqe_fp->vlan_tag),
3212              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3213              le16toh(cqe_fp->len_on_bd));
3214
3215        /* is this a slowpath msg? */
3216        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3217            bxe_sp_event(sc, fp, cqe);
3218            goto next_cqe;
3219        }
3220
3221        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3222
3223        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3224            struct bxe_sw_tpa_info *tpa_info;
3225            uint16_t frag_size, pages;
3226            uint8_t queue;
3227
3228            if (CQE_TYPE_START(cqe_fp_type)) {
3229                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3230                              bd_cons, bd_prod, cqe_fp);
3231                m = NULL; /* packet not ready yet */
3232                goto next_rx;
3233            }
3234
3235            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3236                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3237
3238            queue = cqe->end_agg_cqe.queue_index;
3239            tpa_info = &fp->rx_tpa_info[queue];
3240
3241            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3242                  fp->index, queue);
3243
3244            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3245                         tpa_info->len_on_bd);
3246            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3247
3248            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3249                         &cqe->end_agg_cqe, comp_ring_cons);
3250
3251            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3252
3253            goto next_cqe;
3254        }
3255
3256        /* non TPA */
3257
3258        /* is this an error packet? */
3259        if (__predict_false(cqe_fp_flags &
3260                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3261            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3262            fp->eth_q_stats.rx_soft_errors++;
3263            goto next_rx;
3264        }
3265
3266        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3267        lenonbd = le16toh(cqe_fp->len_on_bd);
3268        pad = cqe_fp->placement_offset;
3269
3270        m = rx_buf->m;
3271
3272        if (__predict_false(m == NULL)) {
3273            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3274                  bd_cons, fp->index);
3275            goto next_rx;
3276        }
3277
3278        /* XXX double copy if packet length under a threshold */
3279
3280        /*
3281         * If all the buffer descriptors are filled with mbufs then fill in
3282         * the current consumer index with a new BD. Else if a maximum Rx
3283         * buffer limit is imposed then fill in the next producer index.
3284         */
3285        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3286                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3287                                      bd_prod : bd_cons);
3288        if (rc != 0) {
3289
3290            /* we simply reuse the received mbuf and don't post it to the stack */
3291            m = NULL;
3292
3293            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3294                  fp->index, rc);
3295            fp->eth_q_stats.rx_soft_errors++;
3296
3297            if (sc->max_rx_bufs != RX_BD_USABLE) {
3298                /* copy this consumer index to the producer index */
3299                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3300                       sizeof(struct bxe_sw_rx_bd));
3301                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3302            }
3303
3304            goto next_rx;
3305        }
3306
3307        /* current mbuf was detached from the bd */
3308        fp->eth_q_stats.mbuf_alloc_rx--;
3309
3310        /* we allocated a replacement mbuf, fixup the current one */
3311        m_adj(m, pad);
3312        m->m_pkthdr.len = m->m_len = len;
3313
3314        if ((len > 60) && (len > lenonbd)) {
3315            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3316            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3317            if (rc)
3318                break;
3319            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3320        } else if (lenonbd < len) {
3321            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3322        }
3323
3324        /* assign packet to this interface interface */
3325        m->m_pkthdr.rcvif = ifp;
3326
3327        /* assume no hardware checksum has complated */
3328        m->m_pkthdr.csum_flags = 0;
3329
3330        /* validate checksum if offload enabled */
3331        if (ifp->if_capenable & IFCAP_RXCSUM) {
3332            /* check for a valid IP frame */
3333            if (!(cqe->fast_path_cqe.status_flags &
3334                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3335                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3336                if (__predict_false(cqe_fp_flags &
3337                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3338                    fp->eth_q_stats.rx_hw_csum_errors++;
3339                } else {
3340                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3341                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3342                }
3343            }
3344
3345            /* check for a valid TCP/UDP frame */
3346            if (!(cqe->fast_path_cqe.status_flags &
3347                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3348                if (__predict_false(cqe_fp_flags &
3349                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3350                    fp->eth_q_stats.rx_hw_csum_errors++;
3351                } else {
3352                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3353                    m->m_pkthdr.csum_data = 0xFFFF;
3354                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3355                                               CSUM_PSEUDO_HDR);
3356                }
3357            }
3358        }
3359
3360        /* if there is a VLAN tag then flag that info */
3361        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3362            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3363            m->m_flags |= M_VLANTAG;
3364        }
3365
3366#if __FreeBSD_version >= 800000
3367        /* specify what RSS queue was used for this flow */
3368        m->m_pkthdr.flowid = fp->index;
3369        BXE_SET_FLOWID(m);
3370#endif
3371
3372next_rx:
3373
3374        bd_cons    = RX_BD_NEXT(bd_cons);
3375        bd_prod    = RX_BD_NEXT(bd_prod);
3376        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3377
3378        /* pass the frame to the stack */
3379        if (__predict_true(m != NULL)) {
3380            ifp->if_ipackets++;
3381            rx_pkts++;
3382            (*ifp->if_input)(ifp, m);
3383        }
3384
3385next_cqe:
3386
3387        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3388        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3389
3390        /* limit spinning on the queue */
3391        if (rc != 0)
3392            break;
3393
3394        if (rx_pkts == sc->rx_budget) {
3395            fp->eth_q_stats.rx_budget_reached++;
3396            break;
3397        }
3398    } /* while work to do */
3399
3400    fp->rx_bd_cons = bd_cons;
3401    fp->rx_bd_prod = bd_prod_fw;
3402    fp->rx_cq_cons = sw_cq_cons;
3403    fp->rx_cq_prod = sw_cq_prod;
3404
3405    /* Update producers */
3406    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3407
3408    fp->eth_q_stats.rx_pkts += rx_pkts;
3409    fp->eth_q_stats.rx_calls++;
3410
3411    BXE_FP_RX_UNLOCK(fp);
3412
3413    return (sw_cq_cons != hw_cq_cons);
3414}
3415
3416static uint16_t
3417bxe_free_tx_pkt(struct bxe_softc    *sc,
3418                struct bxe_fastpath *fp,
3419                uint16_t            idx)
3420{
3421    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3422    struct eth_tx_start_bd *tx_start_bd;
3423    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3424    uint16_t new_cons;
3425    int nbd;
3426
3427    /* unmap the mbuf from non-paged memory */
3428    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3429
3430    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3431    nbd = le16toh(tx_start_bd->nbd) - 1;
3432
3433    new_cons = (tx_buf->first_bd + nbd);
3434
3435    /* free the mbuf */
3436    if (__predict_true(tx_buf->m != NULL)) {
3437        m_freem(tx_buf->m);
3438        fp->eth_q_stats.mbuf_alloc_tx--;
3439    } else {
3440        fp->eth_q_stats.tx_chain_lost_mbuf++;
3441    }
3442
3443    tx_buf->m = NULL;
3444    tx_buf->first_bd = 0;
3445
3446    return (new_cons);
3447}
3448
3449/* transmit timeout watchdog */
3450static int
3451bxe_watchdog(struct bxe_softc    *sc,
3452             struct bxe_fastpath *fp)
3453{
3454    BXE_FP_TX_LOCK(fp);
3455
3456    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3457        BXE_FP_TX_UNLOCK(fp);
3458        return (0);
3459    }
3460
3461    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3462    if(sc->trigger_grcdump) {
3463         /* taking grcdump */
3464         bxe_grc_dump(sc);
3465    }
3466
3467    BXE_FP_TX_UNLOCK(fp);
3468
3469    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT);
3470    taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task);
3471
3472    return (-1);
3473}
3474
3475/* processes transmit completions */
3476static uint8_t
3477bxe_txeof(struct bxe_softc    *sc,
3478          struct bxe_fastpath *fp)
3479{
3480    struct ifnet *ifp = sc->ifnet;
3481    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3482    uint16_t tx_bd_avail;
3483
3484    BXE_FP_TX_LOCK_ASSERT(fp);
3485
3486    bd_cons = fp->tx_bd_cons;
3487    hw_cons = le16toh(*fp->tx_cons_sb);
3488    sw_cons = fp->tx_pkt_cons;
3489
3490    while (sw_cons != hw_cons) {
3491        pkt_cons = TX_BD(sw_cons);
3492
3493        BLOGD(sc, DBG_TX,
3494              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3495              fp->index, hw_cons, sw_cons, pkt_cons);
3496
3497        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3498
3499        sw_cons++;
3500    }
3501
3502    fp->tx_pkt_cons = sw_cons;
3503    fp->tx_bd_cons  = bd_cons;
3504
3505    BLOGD(sc, DBG_TX,
3506          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3507          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3508
3509    mb();
3510
3511    tx_bd_avail = bxe_tx_avail(sc, fp);
3512
3513    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3514        ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3515    } else {
3516        ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3517    }
3518
3519    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3520        /* reset the watchdog timer if there are pending transmits */
3521        fp->watchdog_timer = BXE_TX_TIMEOUT;
3522        return (TRUE);
3523    } else {
3524        /* clear watchdog when there are no pending transmits */
3525        fp->watchdog_timer = 0;
3526        return (FALSE);
3527    }
3528}
3529
3530static void
3531bxe_drain_tx_queues(struct bxe_softc *sc)
3532{
3533    struct bxe_fastpath *fp;
3534    int i, count;
3535
3536    /* wait until all TX fastpath tasks have completed */
3537    for (i = 0; i < sc->num_queues; i++) {
3538        fp = &sc->fp[i];
3539
3540        count = 1000;
3541
3542        while (bxe_has_tx_work(fp)) {
3543
3544            BXE_FP_TX_LOCK(fp);
3545            bxe_txeof(sc, fp);
3546            BXE_FP_TX_UNLOCK(fp);
3547
3548            if (count == 0) {
3549                BLOGE(sc, "Timeout waiting for fp[%d] "
3550                          "transmits to complete!\n", i);
3551                bxe_panic(sc, ("tx drain failure\n"));
3552                return;
3553            }
3554
3555            count--;
3556            DELAY(1000);
3557            rmb();
3558        }
3559    }
3560
3561    return;
3562}
3563
3564static int
3565bxe_del_all_macs(struct bxe_softc          *sc,
3566                 struct ecore_vlan_mac_obj *mac_obj,
3567                 int                       mac_type,
3568                 uint8_t                   wait_for_comp)
3569{
3570    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3571    int rc;
3572
3573    /* wait for completion of requested */
3574    if (wait_for_comp) {
3575        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3576    }
3577
3578    /* Set the mac type of addresses we want to clear */
3579    bxe_set_bit(mac_type, &vlan_mac_flags);
3580
3581    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3582    if (rc < 0) {
3583        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3584            rc, mac_type, wait_for_comp);
3585    }
3586
3587    return (rc);
3588}
3589
3590static int
3591bxe_fill_accept_flags(struct bxe_softc *sc,
3592                      uint32_t         rx_mode,
3593                      unsigned long    *rx_accept_flags,
3594                      unsigned long    *tx_accept_flags)
3595{
3596    /* Clear the flags first */
3597    *rx_accept_flags = 0;
3598    *tx_accept_flags = 0;
3599
3600    switch (rx_mode) {
3601    case BXE_RX_MODE_NONE:
3602        /*
3603         * 'drop all' supersedes any accept flags that may have been
3604         * passed to the function.
3605         */
3606        break;
3607
3608    case BXE_RX_MODE_NORMAL:
3609        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3610        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3611        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3612
3613        /* internal switching mode */
3614        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3615        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3616        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3617
3618        break;
3619
3620    case BXE_RX_MODE_ALLMULTI:
3621        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3622        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3623        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3624
3625        /* internal switching mode */
3626        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3627        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3628        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3629
3630        break;
3631
3632    case BXE_RX_MODE_PROMISC:
3633        /*
3634         * According to deffinition of SI mode, iface in promisc mode
3635         * should receive matched and unmatched (in resolution of port)
3636         * unicast packets.
3637         */
3638        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3639        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3640        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3641        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3642
3643        /* internal switching mode */
3644        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3645        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3646
3647        if (IS_MF_SI(sc)) {
3648            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3649        } else {
3650            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3651        }
3652
3653        break;
3654
3655    default:
3656        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3657        return (-1);
3658    }
3659
3660    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3661    if (rx_mode != BXE_RX_MODE_NONE) {
3662        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3663        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3664    }
3665
3666    return (0);
3667}
3668
3669static int
3670bxe_set_q_rx_mode(struct bxe_softc *sc,
3671                  uint8_t          cl_id,
3672                  unsigned long    rx_mode_flags,
3673                  unsigned long    rx_accept_flags,
3674                  unsigned long    tx_accept_flags,
3675                  unsigned long    ramrod_flags)
3676{
3677    struct ecore_rx_mode_ramrod_params ramrod_param;
3678    int rc;
3679
3680    memset(&ramrod_param, 0, sizeof(ramrod_param));
3681
3682    /* Prepare ramrod parameters */
3683    ramrod_param.cid = 0;
3684    ramrod_param.cl_id = cl_id;
3685    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3686    ramrod_param.func_id = SC_FUNC(sc);
3687
3688    ramrod_param.pstate = &sc->sp_state;
3689    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3690
3691    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3692    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3693
3694    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3695
3696    ramrod_param.ramrod_flags = ramrod_flags;
3697    ramrod_param.rx_mode_flags = rx_mode_flags;
3698
3699    ramrod_param.rx_accept_flags = rx_accept_flags;
3700    ramrod_param.tx_accept_flags = tx_accept_flags;
3701
3702    rc = ecore_config_rx_mode(sc, &ramrod_param);
3703    if (rc < 0) {
3704        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3705            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3706            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3707            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3708            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3709        return (rc);
3710    }
3711
3712    return (0);
3713}
3714
3715static int
3716bxe_set_storm_rx_mode(struct bxe_softc *sc)
3717{
3718    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3719    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3720    int rc;
3721
3722    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3723                               &tx_accept_flags);
3724    if (rc) {
3725        return (rc);
3726    }
3727
3728    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3729    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3730
3731    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3732    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3733                              rx_accept_flags, tx_accept_flags,
3734                              ramrod_flags));
3735}
3736
3737/* returns the "mcp load_code" according to global load_count array */
3738static int
3739bxe_nic_load_no_mcp(struct bxe_softc *sc)
3740{
3741    int path = SC_PATH(sc);
3742    int port = SC_PORT(sc);
3743
3744    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3745          path, load_count[path][0], load_count[path][1],
3746          load_count[path][2]);
3747    load_count[path][0]++;
3748    load_count[path][1 + port]++;
3749    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3750          path, load_count[path][0], load_count[path][1],
3751          load_count[path][2]);
3752    if (load_count[path][0] == 1) {
3753        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3754    } else if (load_count[path][1 + port] == 1) {
3755        return (FW_MSG_CODE_DRV_LOAD_PORT);
3756    } else {
3757        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3758    }
3759}
3760
3761/* returns the "mcp load_code" according to global load_count array */
3762static int
3763bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3764{
3765    int port = SC_PORT(sc);
3766    int path = SC_PATH(sc);
3767
3768    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3769          path, load_count[path][0], load_count[path][1],
3770          load_count[path][2]);
3771    load_count[path][0]--;
3772    load_count[path][1 + port]--;
3773    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3774          path, load_count[path][0], load_count[path][1],
3775          load_count[path][2]);
3776    if (load_count[path][0] == 0) {
3777        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3778    } else if (load_count[path][1 + port] == 0) {
3779        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3780    } else {
3781        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3782    }
3783}
3784
3785/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3786static uint32_t
3787bxe_send_unload_req(struct bxe_softc *sc,
3788                    int              unload_mode)
3789{
3790    uint32_t reset_code = 0;
3791
3792    /* Select the UNLOAD request mode */
3793    if (unload_mode == UNLOAD_NORMAL) {
3794        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3795    } else {
3796        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3797    }
3798
3799    /* Send the request to the MCP */
3800    if (!BXE_NOMCP(sc)) {
3801        reset_code = bxe_fw_command(sc, reset_code, 0);
3802    } else {
3803        reset_code = bxe_nic_unload_no_mcp(sc);
3804    }
3805
3806    return (reset_code);
3807}
3808
3809/* send UNLOAD_DONE command to the MCP */
3810static void
3811bxe_send_unload_done(struct bxe_softc *sc,
3812                     uint8_t          keep_link)
3813{
3814    uint32_t reset_param =
3815        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3816
3817    /* Report UNLOAD_DONE to MCP */
3818    if (!BXE_NOMCP(sc)) {
3819        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3820    }
3821}
3822
3823static int
3824bxe_func_wait_started(struct bxe_softc *sc)
3825{
3826    int tout = 50;
3827
3828    if (!sc->port.pmf) {
3829        return (0);
3830    }
3831
3832    /*
3833     * (assumption: No Attention from MCP at this stage)
3834     * PMF probably in the middle of TX disable/enable transaction
3835     * 1. Sync IRS for default SB
3836     * 2. Sync SP queue - this guarantees us that attention handling started
3837     * 3. Wait, that TX disable/enable transaction completes
3838     *
3839     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3840     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3841     * received completion for the transaction the state is TX_STOPPED.
3842     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3843     * transaction.
3844     */
3845
3846    /* XXX make sure default SB ISR is done */
3847    /* need a way to synchronize an irq (intr_mtx?) */
3848
3849    /* XXX flush any work queues */
3850
3851    while (ecore_func_get_state(sc, &sc->func_obj) !=
3852           ECORE_F_STATE_STARTED && tout--) {
3853        DELAY(20000);
3854    }
3855
3856    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3857        /*
3858         * Failed to complete the transaction in a "good way"
3859         * Force both transactions with CLR bit.
3860         */
3861        struct ecore_func_state_params func_params = { NULL };
3862
3863        BLOGE(sc, "Unexpected function state! "
3864                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3865
3866        func_params.f_obj = &sc->func_obj;
3867        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3868
3869        /* STARTED-->TX_STOPPED */
3870        func_params.cmd = ECORE_F_CMD_TX_STOP;
3871        ecore_func_state_change(sc, &func_params);
3872
3873        /* TX_STOPPED-->STARTED */
3874        func_params.cmd = ECORE_F_CMD_TX_START;
3875        return (ecore_func_state_change(sc, &func_params));
3876    }
3877
3878    return (0);
3879}
3880
3881static int
3882bxe_stop_queue(struct bxe_softc *sc,
3883               int              index)
3884{
3885    struct bxe_fastpath *fp = &sc->fp[index];
3886    struct ecore_queue_state_params q_params = { NULL };
3887    int rc;
3888
3889    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3890
3891    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3892    /* We want to wait for completion in this context */
3893    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3894
3895    /* Stop the primary connection: */
3896
3897    /* ...halt the connection */
3898    q_params.cmd = ECORE_Q_CMD_HALT;
3899    rc = ecore_queue_state_change(sc, &q_params);
3900    if (rc) {
3901        return (rc);
3902    }
3903
3904    /* ...terminate the connection */
3905    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3906    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3907    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3908    rc = ecore_queue_state_change(sc, &q_params);
3909    if (rc) {
3910        return (rc);
3911    }
3912
3913    /* ...delete cfc entry */
3914    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3915    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3916    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3917    return (ecore_queue_state_change(sc, &q_params));
3918}
3919
3920/* wait for the outstanding SP commands */
3921static inline uint8_t
3922bxe_wait_sp_comp(struct bxe_softc *sc,
3923                 unsigned long    mask)
3924{
3925    unsigned long tmp;
3926    int tout = 5000; /* wait for 5 secs tops */
3927
3928    while (tout--) {
3929        mb();
3930        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3931            return (TRUE);
3932        }
3933
3934        DELAY(1000);
3935    }
3936
3937    mb();
3938
3939    tmp = atomic_load_acq_long(&sc->sp_state);
3940    if (tmp & mask) {
3941        BLOGE(sc, "Filtering completion timed out: "
3942                  "sp_state 0x%lx, mask 0x%lx\n",
3943              tmp, mask);
3944        return (FALSE);
3945    }
3946
3947    return (FALSE);
3948}
3949
3950static int
3951bxe_func_stop(struct bxe_softc *sc)
3952{
3953    struct ecore_func_state_params func_params = { NULL };
3954    int rc;
3955
3956    /* prepare parameters for function state transitions */
3957    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3958    func_params.f_obj = &sc->func_obj;
3959    func_params.cmd = ECORE_F_CMD_STOP;
3960
3961    /*
3962     * Try to stop the function the 'good way'. If it fails (in case
3963     * of a parity error during bxe_chip_cleanup()) and we are
3964     * not in a debug mode, perform a state transaction in order to
3965     * enable further HW_RESET transaction.
3966     */
3967    rc = ecore_func_state_change(sc, &func_params);
3968    if (rc) {
3969        BLOGE(sc, "FUNC_STOP ramrod failed. "
3970                  "Running a dry transaction (%d)\n", rc);
3971        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3972        return (ecore_func_state_change(sc, &func_params));
3973    }
3974
3975    return (0);
3976}
3977
3978static int
3979bxe_reset_hw(struct bxe_softc *sc,
3980             uint32_t         load_code)
3981{
3982    struct ecore_func_state_params func_params = { NULL };
3983
3984    /* Prepare parameters for function state transitions */
3985    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3986
3987    func_params.f_obj = &sc->func_obj;
3988    func_params.cmd = ECORE_F_CMD_HW_RESET;
3989
3990    func_params.params.hw_init.load_phase = load_code;
3991
3992    return (ecore_func_state_change(sc, &func_params));
3993}
3994
3995static void
3996bxe_int_disable_sync(struct bxe_softc *sc,
3997                     int              disable_hw)
3998{
3999    if (disable_hw) {
4000        /* prevent the HW from sending interrupts */
4001        bxe_int_disable(sc);
4002    }
4003
4004    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4005    /* make sure all ISRs are done */
4006
4007    /* XXX make sure sp_task is not running */
4008    /* cancel and flush work queues */
4009}
4010
4011static void
4012bxe_chip_cleanup(struct bxe_softc *sc,
4013                 uint32_t         unload_mode,
4014                 uint8_t          keep_link)
4015{
4016    int port = SC_PORT(sc);
4017    struct ecore_mcast_ramrod_params rparam = { NULL };
4018    uint32_t reset_code;
4019    int i, rc = 0;
4020
4021    bxe_drain_tx_queues(sc);
4022
4023    /* give HW time to discard old tx messages */
4024    DELAY(1000);
4025
4026    /* Clean all ETH MACs */
4027    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4028    if (rc < 0) {
4029        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4030    }
4031
4032    /* Clean up UC list  */
4033    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4034    if (rc < 0) {
4035        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4036    }
4037
4038    /* Disable LLH */
4039    if (!CHIP_IS_E1(sc)) {
4040        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4041    }
4042
4043    /* Set "drop all" to stop Rx */
4044
4045    /*
4046     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4047     * a race between the completion code and this code.
4048     */
4049    BXE_MCAST_LOCK(sc);
4050
4051    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4052        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4053    } else {
4054        bxe_set_storm_rx_mode(sc);
4055    }
4056
4057    /* Clean up multicast configuration */
4058    rparam.mcast_obj = &sc->mcast_obj;
4059    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4060    if (rc < 0) {
4061        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4062    }
4063
4064    BXE_MCAST_UNLOCK(sc);
4065
4066    // XXX bxe_iov_chip_cleanup(sc);
4067
4068    /*
4069     * Send the UNLOAD_REQUEST to the MCP. This will return if
4070     * this function should perform FUNCTION, PORT, or COMMON HW
4071     * reset.
4072     */
4073    reset_code = bxe_send_unload_req(sc, unload_mode);
4074
4075    /*
4076     * (assumption: No Attention from MCP at this stage)
4077     * PMF probably in the middle of TX disable/enable transaction
4078     */
4079    rc = bxe_func_wait_started(sc);
4080    if (rc) {
4081        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4082    }
4083
4084    /*
4085     * Close multi and leading connections
4086     * Completions for ramrods are collected in a synchronous way
4087     */
4088    for (i = 0; i < sc->num_queues; i++) {
4089        if (bxe_stop_queue(sc, i)) {
4090            goto unload_error;
4091        }
4092    }
4093
4094    /*
4095     * If SP settings didn't get completed so far - something
4096     * very wrong has happen.
4097     */
4098    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4099        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4100    }
4101
4102unload_error:
4103
4104    rc = bxe_func_stop(sc);
4105    if (rc) {
4106        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4107    }
4108
4109    /* disable HW interrupts */
4110    bxe_int_disable_sync(sc, TRUE);
4111
4112    /* detach interrupts */
4113    bxe_interrupt_detach(sc);
4114
4115    /* Reset the chip */
4116    rc = bxe_reset_hw(sc, reset_code);
4117    if (rc) {
4118        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4119    }
4120
4121    /* Report UNLOAD_DONE to MCP */
4122    bxe_send_unload_done(sc, keep_link);
4123}
4124
4125static void
4126bxe_disable_close_the_gate(struct bxe_softc *sc)
4127{
4128    uint32_t val;
4129    int port = SC_PORT(sc);
4130
4131    BLOGD(sc, DBG_LOAD,
4132          "Disabling 'close the gates'\n");
4133
4134    if (CHIP_IS_E1(sc)) {
4135        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4136                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4137        val = REG_RD(sc, addr);
4138        val &= ~(0x300);
4139        REG_WR(sc, addr, val);
4140    } else {
4141        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4142        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4143                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4144        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4145    }
4146}
4147
4148/*
4149 * Cleans the object that have internal lists without sending
4150 * ramrods. Should be run when interrutps are disabled.
4151 */
4152static void
4153bxe_squeeze_objects(struct bxe_softc *sc)
4154{
4155    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4156    struct ecore_mcast_ramrod_params rparam = { NULL };
4157    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4158    int rc;
4159
4160    /* Cleanup MACs' object first... */
4161
4162    /* Wait for completion of requested */
4163    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4164    /* Perform a dry cleanup */
4165    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4166
4167    /* Clean ETH primary MAC */
4168    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4169    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4170                             &ramrod_flags);
4171    if (rc != 0) {
4172        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4173    }
4174
4175    /* Cleanup UC list */
4176    vlan_mac_flags = 0;
4177    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4178    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4179                             &ramrod_flags);
4180    if (rc != 0) {
4181        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4182    }
4183
4184    /* Now clean mcast object... */
4185
4186    rparam.mcast_obj = &sc->mcast_obj;
4187    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4188
4189    /* Add a DEL command... */
4190    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4191    if (rc < 0) {
4192        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4193    }
4194
4195    /* now wait until all pending commands are cleared */
4196
4197    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4198    while (rc != 0) {
4199        if (rc < 0) {
4200            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4201            return;
4202        }
4203
4204        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4205    }
4206}
4207
4208/* stop the controller */
4209static __noinline int
4210bxe_nic_unload(struct bxe_softc *sc,
4211               uint32_t         unload_mode,
4212               uint8_t          keep_link)
4213{
4214    uint8_t global = FALSE;
4215    uint32_t val;
4216    int i;
4217
4218    BXE_CORE_LOCK_ASSERT(sc);
4219
4220    sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
4221
4222    for (i = 0; i < sc->num_queues; i++) {
4223        struct bxe_fastpath *fp;
4224
4225        fp = &sc->fp[i];
4226        BXE_FP_TX_LOCK(fp);
4227        BXE_FP_TX_UNLOCK(fp);
4228    }
4229
4230    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4231
4232    /* mark driver as unloaded in shmem2 */
4233    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4234        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4235        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4236                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4237    }
4238
4239    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4240        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4241        /*
4242         * We can get here if the driver has been unloaded
4243         * during parity error recovery and is either waiting for a
4244         * leader to complete or for other functions to unload and
4245         * then ifconfig down has been issued. In this case we want to
4246         * unload and let other functions to complete a recovery
4247         * process.
4248         */
4249        sc->recovery_state = BXE_RECOVERY_DONE;
4250        sc->is_leader = 0;
4251        bxe_release_leader_lock(sc);
4252        mb();
4253
4254        BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4255        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4256            " state = 0x%x\n", sc->recovery_state, sc->state);
4257        return (-1);
4258    }
4259
4260    /*
4261     * Nothing to do during unload if previous bxe_nic_load()
4262     * did not completed succesfully - all resourses are released.
4263     */
4264    if ((sc->state == BXE_STATE_CLOSED) ||
4265        (sc->state == BXE_STATE_ERROR)) {
4266        return (0);
4267    }
4268
4269    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4270    mb();
4271
4272    /* stop tx */
4273    bxe_tx_disable(sc);
4274
4275    sc->rx_mode = BXE_RX_MODE_NONE;
4276    /* XXX set rx mode ??? */
4277
4278    if (IS_PF(sc) && !sc->grcdump_done) {
4279        /* set ALWAYS_ALIVE bit in shmem */
4280        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4281
4282        bxe_drv_pulse(sc);
4283
4284        bxe_stats_handle(sc, STATS_EVENT_STOP);
4285        bxe_save_statistics(sc);
4286    }
4287
4288    /* wait till consumers catch up with producers in all queues */
4289    bxe_drain_tx_queues(sc);
4290
4291    /* if VF indicate to PF this function is going down (PF will delete sp
4292     * elements and clear initializations
4293     */
4294    if (IS_VF(sc)) {
4295        ; /* bxe_vfpf_close_vf(sc); */
4296    } else if (unload_mode != UNLOAD_RECOVERY) {
4297        /* if this is a normal/close unload need to clean up chip */
4298        if (!sc->grcdump_done)
4299            bxe_chip_cleanup(sc, unload_mode, keep_link);
4300    } else {
4301        /* Send the UNLOAD_REQUEST to the MCP */
4302        bxe_send_unload_req(sc, unload_mode);
4303
4304        /*
4305         * Prevent transactions to host from the functions on the
4306         * engine that doesn't reset global blocks in case of global
4307         * attention once gloabl blocks are reset and gates are opened
4308         * (the engine which leader will perform the recovery
4309         * last).
4310         */
4311        if (!CHIP_IS_E1x(sc)) {
4312            bxe_pf_disable(sc);
4313        }
4314
4315        /* disable HW interrupts */
4316        bxe_int_disable_sync(sc, TRUE);
4317
4318        /* detach interrupts */
4319        bxe_interrupt_detach(sc);
4320
4321        /* Report UNLOAD_DONE to MCP */
4322        bxe_send_unload_done(sc, FALSE);
4323    }
4324
4325    /*
4326     * At this stage no more interrupts will arrive so we may safely clean
4327     * the queue'able objects here in case they failed to get cleaned so far.
4328     */
4329    if (IS_PF(sc)) {
4330        bxe_squeeze_objects(sc);
4331    }
4332
4333    /* There should be no more pending SP commands at this stage */
4334    sc->sp_state = 0;
4335
4336    sc->port.pmf = 0;
4337
4338    bxe_free_fp_buffers(sc);
4339
4340    if (IS_PF(sc)) {
4341        bxe_free_mem(sc);
4342    }
4343
4344    bxe_free_fw_stats_mem(sc);
4345
4346    sc->state = BXE_STATE_CLOSED;
4347
4348    /*
4349     * Check if there are pending parity attentions. If there are - set
4350     * RECOVERY_IN_PROGRESS.
4351     */
4352    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4353        bxe_set_reset_in_progress(sc);
4354
4355        /* Set RESET_IS_GLOBAL if needed */
4356        if (global) {
4357            bxe_set_reset_global(sc);
4358        }
4359    }
4360
4361    /*
4362     * The last driver must disable a "close the gate" if there is no
4363     * parity attention or "process kill" pending.
4364     */
4365    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4366        bxe_reset_is_done(sc, SC_PATH(sc))) {
4367        bxe_disable_close_the_gate(sc);
4368    }
4369
4370    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4371
4372    return (0);
4373}
4374
4375/*
4376 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4377 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4378 */
4379static int
4380bxe_ifmedia_update(struct ifnet *ifp)
4381{
4382    struct bxe_softc *sc = (struct bxe_softc *)ifp->if_softc;
4383    struct ifmedia *ifm;
4384
4385    ifm = &sc->ifmedia;
4386
4387    /* We only support Ethernet media type. */
4388    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4389        return (EINVAL);
4390    }
4391
4392    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4393    case IFM_AUTO:
4394         break;
4395    case IFM_10G_CX4:
4396    case IFM_10G_SR:
4397    case IFM_10G_T:
4398    case IFM_10G_TWINAX:
4399    default:
4400        /* We don't support changing the media type. */
4401        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4402              IFM_SUBTYPE(ifm->ifm_media));
4403        return (EINVAL);
4404    }
4405
4406    return (0);
4407}
4408
4409/*
4410 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4411 */
4412static void
4413bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
4414{
4415    struct bxe_softc *sc = ifp->if_softc;
4416
4417    /* Report link down if the driver isn't running. */
4418    if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4419        ifmr->ifm_active |= IFM_NONE;
4420        return;
4421    }
4422
4423    /* Setup the default interface info. */
4424    ifmr->ifm_status = IFM_AVALID;
4425    ifmr->ifm_active = IFM_ETHER;
4426
4427    if (sc->link_vars.link_up) {
4428        ifmr->ifm_status |= IFM_ACTIVE;
4429    } else {
4430        ifmr->ifm_active |= IFM_NONE;
4431        return;
4432    }
4433
4434    ifmr->ifm_active |= sc->media;
4435
4436    if (sc->link_vars.duplex == DUPLEX_FULL) {
4437        ifmr->ifm_active |= IFM_FDX;
4438    } else {
4439        ifmr->ifm_active |= IFM_HDX;
4440    }
4441}
4442
4443static void
4444bxe_handle_chip_tq(void *context,
4445                   int  pending)
4446{
4447    struct bxe_softc *sc = (struct bxe_softc *)context;
4448    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4449
4450    switch (work)
4451    {
4452    case CHIP_TQ_REINIT:
4453        if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
4454            /* restart the interface */
4455            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4456            bxe_periodic_stop(sc);
4457            BXE_CORE_LOCK(sc);
4458            bxe_stop_locked(sc);
4459            bxe_init_locked(sc);
4460            BXE_CORE_UNLOCK(sc);
4461        }
4462        break;
4463
4464    default:
4465        break;
4466    }
4467}
4468
4469/*
4470 * Handles any IOCTL calls from the operating system.
4471 *
4472 * Returns:
4473 *   0 = Success, >0 Failure
4474 */
4475static int
4476bxe_ioctl(struct ifnet *ifp,
4477          u_long       command,
4478          caddr_t      data)
4479{
4480    struct bxe_softc *sc = ifp->if_softc;
4481    struct ifreq *ifr = (struct ifreq *)data;
4482    int mask = 0;
4483    int reinit = 0;
4484    int error = 0;
4485
4486    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4487    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4488
4489    switch (command)
4490    {
4491    case SIOCSIFMTU:
4492        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4493              ifr->ifr_mtu);
4494
4495        if (sc->mtu == ifr->ifr_mtu) {
4496            /* nothing to change */
4497            break;
4498        }
4499
4500        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4501            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4502                  ifr->ifr_mtu, mtu_min, mtu_max);
4503            error = EINVAL;
4504            break;
4505        }
4506
4507        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4508                             (unsigned long)ifr->ifr_mtu);
4509        atomic_store_rel_long((volatile unsigned long *)&ifp->if_mtu,
4510                              (unsigned long)ifr->ifr_mtu);
4511
4512        reinit = 1;
4513        break;
4514
4515    case SIOCSIFFLAGS:
4516        /* toggle the interface state up or down */
4517        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4518
4519	BXE_CORE_LOCK(sc);
4520        /* check if the interface is up */
4521        if (ifp->if_flags & IFF_UP) {
4522            if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4523                /* set the receive mode flags */
4524                bxe_set_rx_mode(sc);
4525            } else if(sc->state != BXE_STATE_DISABLED) {
4526		bxe_init_locked(sc);
4527            }
4528        } else {
4529            if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4530		bxe_periodic_stop(sc);
4531		bxe_stop_locked(sc);
4532            }
4533        }
4534	BXE_CORE_UNLOCK(sc);
4535
4536        break;
4537
4538    case SIOCADDMULTI:
4539    case SIOCDELMULTI:
4540        /* add/delete multicast addresses */
4541        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4542
4543        /* check if the interface is up */
4544        if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
4545            /* set the receive mode flags */
4546	    BXE_CORE_LOCK(sc);
4547            bxe_set_rx_mode(sc);
4548	    BXE_CORE_UNLOCK(sc);
4549        }
4550
4551        break;
4552
4553    case SIOCSIFCAP:
4554        /* find out which capabilities have changed */
4555        mask = (ifr->ifr_reqcap ^ ifp->if_capenable);
4556
4557        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4558              mask);
4559
4560        /* toggle the LRO capabilites enable flag */
4561        if (mask & IFCAP_LRO) {
4562            ifp->if_capenable ^= IFCAP_LRO;
4563            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4564                  (ifp->if_capenable & IFCAP_LRO) ? "ON" : "OFF");
4565            reinit = 1;
4566        }
4567
4568        /* toggle the TXCSUM checksum capabilites enable flag */
4569        if (mask & IFCAP_TXCSUM) {
4570            ifp->if_capenable ^= IFCAP_TXCSUM;
4571            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4572                  (ifp->if_capenable & IFCAP_TXCSUM) ? "ON" : "OFF");
4573            if (ifp->if_capenable & IFCAP_TXCSUM) {
4574                ifp->if_hwassist = (CSUM_IP       |
4575                                    CSUM_TCP      |
4576                                    CSUM_UDP      |
4577                                    CSUM_TSO      |
4578                                    CSUM_TCP_IPV6 |
4579                                    CSUM_UDP_IPV6);
4580            } else {
4581                ifp->if_hwassist = 0;
4582            }
4583        }
4584
4585        /* toggle the RXCSUM checksum capabilities enable flag */
4586        if (mask & IFCAP_RXCSUM) {
4587            ifp->if_capenable ^= IFCAP_RXCSUM;
4588            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4589                  (ifp->if_capenable & IFCAP_RXCSUM) ? "ON" : "OFF");
4590            if (ifp->if_capenable & IFCAP_RXCSUM) {
4591                ifp->if_hwassist = (CSUM_IP       |
4592                                    CSUM_TCP      |
4593                                    CSUM_UDP      |
4594                                    CSUM_TSO      |
4595                                    CSUM_TCP_IPV6 |
4596                                    CSUM_UDP_IPV6);
4597            } else {
4598                ifp->if_hwassist = 0;
4599            }
4600        }
4601
4602        /* toggle TSO4 capabilities enabled flag */
4603        if (mask & IFCAP_TSO4) {
4604            ifp->if_capenable ^= IFCAP_TSO4;
4605            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4606                  (ifp->if_capenable & IFCAP_TSO4) ? "ON" : "OFF");
4607        }
4608
4609        /* toggle TSO6 capabilities enabled flag */
4610        if (mask & IFCAP_TSO6) {
4611            ifp->if_capenable ^= IFCAP_TSO6;
4612            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4613                  (ifp->if_capenable & IFCAP_TSO6) ? "ON" : "OFF");
4614        }
4615
4616        /* toggle VLAN_HWTSO capabilities enabled flag */
4617        if (mask & IFCAP_VLAN_HWTSO) {
4618            ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
4619            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4620                  (ifp->if_capenable & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4621        }
4622
4623        /* toggle VLAN_HWCSUM capabilities enabled flag */
4624        if (mask & IFCAP_VLAN_HWCSUM) {
4625            /* XXX investigate this... */
4626            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4627            error = EINVAL;
4628        }
4629
4630        /* toggle VLAN_MTU capabilities enable flag */
4631        if (mask & IFCAP_VLAN_MTU) {
4632            /* XXX investigate this... */
4633            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4634            error = EINVAL;
4635        }
4636
4637        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4638        if (mask & IFCAP_VLAN_HWTAGGING) {
4639            /* XXX investigate this... */
4640            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4641            error = EINVAL;
4642        }
4643
4644        /* toggle VLAN_HWFILTER capabilities enabled flag */
4645        if (mask & IFCAP_VLAN_HWFILTER) {
4646            /* XXX investigate this... */
4647            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4648            error = EINVAL;
4649        }
4650
4651        /* XXX not yet...
4652         * IFCAP_WOL_MAGIC
4653         */
4654
4655        break;
4656
4657    case SIOCSIFMEDIA:
4658    case SIOCGIFMEDIA:
4659        /* set/get interface media */
4660        BLOGD(sc, DBG_IOCTL,
4661              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4662              (command & 0xff));
4663        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4664        break;
4665
4666    default:
4667        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4668              (command & 0xff));
4669        error = ether_ioctl(ifp, command, data);
4670        break;
4671    }
4672
4673    if (reinit && (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
4674        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4675              "Re-initializing hardware from IOCTL change\n");
4676	bxe_periodic_stop(sc);
4677	BXE_CORE_LOCK(sc);
4678	bxe_stop_locked(sc);
4679	bxe_init_locked(sc);
4680	BXE_CORE_UNLOCK(sc);
4681    }
4682
4683    return (error);
4684}
4685
4686static __noinline void
4687bxe_dump_mbuf(struct bxe_softc *sc,
4688              struct mbuf      *m,
4689              uint8_t          contents)
4690{
4691    char * type;
4692    int i = 0;
4693
4694    if (!(sc->debug & DBG_MBUF)) {
4695        return;
4696    }
4697
4698    if (m == NULL) {
4699        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4700        return;
4701    }
4702
4703    while (m) {
4704
4705#if __FreeBSD_version >= 1000000
4706        BLOGD(sc, DBG_MBUF,
4707              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4708              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4709
4710        if (m->m_flags & M_PKTHDR) {
4711             BLOGD(sc, DBG_MBUF,
4712                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4713                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4714                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4715        }
4716#else
4717        BLOGD(sc, DBG_MBUF,
4718              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4719              i, m, m->m_len, m->m_flags,
4720              "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data);
4721
4722        if (m->m_flags & M_PKTHDR) {
4723             BLOGD(sc, DBG_MBUF,
4724                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4725                   i, m->m_pkthdr.len, m->m_flags,
4726                   "\20\12M_BCAST\13M_MCAST\14M_FRAG"
4727                   "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG"
4728                   "\22M_PROMISC\23M_NOFREE",
4729                   (int)m->m_pkthdr.csum_flags,
4730                   "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS"
4731                   "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED"
4732                   "\12CSUM_IP_VALID\13CSUM_DATA_VALID"
4733                   "\14CSUM_PSEUDO_HDR");
4734        }
4735#endif /* #if __FreeBSD_version >= 1000000 */
4736
4737        if (m->m_flags & M_EXT) {
4738            switch (m->m_ext.ext_type) {
4739            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4740            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4741            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4742            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4743            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4744            case EXT_PACKET:     type = "EXT_PACKET";     break;
4745            case EXT_MBUF:       type = "EXT_MBUF";       break;
4746            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4747            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4748            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4749            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4750            default:             type = "UNKNOWN";        break;
4751            }
4752
4753            BLOGD(sc, DBG_MBUF,
4754                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4755                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4756        }
4757
4758        if (contents) {
4759            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4760        }
4761
4762        m = m->m_next;
4763        i++;
4764    }
4765}
4766
4767/*
4768 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4769 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4770 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4771 * The headers comes in a seperate bd in FreeBSD so 13-3=10.
4772 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4773 */
4774static int
4775bxe_chktso_window(struct bxe_softc  *sc,
4776                  int               nsegs,
4777                  bus_dma_segment_t *segs,
4778                  struct mbuf       *m)
4779{
4780    uint32_t num_wnds, wnd_size, wnd_sum;
4781    int32_t frag_idx, wnd_idx;
4782    unsigned short lso_mss;
4783    int defrag;
4784
4785    defrag = 0;
4786    wnd_sum = 0;
4787    wnd_size = 10;
4788    num_wnds = nsegs - wnd_size;
4789    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4790
4791    /*
4792     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4793     * first window sum of data while skipping the first assuming it is the
4794     * header in FreeBSD.
4795     */
4796    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4797        wnd_sum += htole16(segs[frag_idx].ds_len);
4798    }
4799
4800    /* check the first 10 bd window size */
4801    if (wnd_sum < lso_mss) {
4802        return (1);
4803    }
4804
4805    /* run through the windows */
4806    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4807        /* subtract the first mbuf->m_len of the last wndw(-header) */
4808        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4809        /* add the next mbuf len to the len of our new window */
4810        wnd_sum += htole16(segs[frag_idx].ds_len);
4811        if (wnd_sum < lso_mss) {
4812            return (1);
4813        }
4814    }
4815
4816    return (0);
4817}
4818
4819static uint8_t
4820bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4821                    struct mbuf         *m,
4822                    uint32_t            *parsing_data)
4823{
4824    struct ether_vlan_header *eh = NULL;
4825    struct ip *ip4 = NULL;
4826    struct ip6_hdr *ip6 = NULL;
4827    caddr_t ip = NULL;
4828    struct tcphdr *th = NULL;
4829    int e_hlen, ip_hlen, l4_off;
4830    uint16_t proto;
4831
4832    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4833        /* no L4 checksum offload needed */
4834        return (0);
4835    }
4836
4837    /* get the Ethernet header */
4838    eh = mtod(m, struct ether_vlan_header *);
4839
4840    /* handle VLAN encapsulation if present */
4841    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4842        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4843        proto  = ntohs(eh->evl_proto);
4844    } else {
4845        e_hlen = ETHER_HDR_LEN;
4846        proto  = ntohs(eh->evl_encap_proto);
4847    }
4848
4849    switch (proto) {
4850    case ETHERTYPE_IP:
4851        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4852        ip4 = (m->m_len < sizeof(struct ip)) ?
4853                  (struct ip *)m->m_next->m_data :
4854                  (struct ip *)(m->m_data + e_hlen);
4855        /* ip_hl is number of 32-bit words */
4856        ip_hlen = (ip4->ip_hl << 2);
4857        ip = (caddr_t)ip4;
4858        break;
4859    case ETHERTYPE_IPV6:
4860        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4861        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4862                  (struct ip6_hdr *)m->m_next->m_data :
4863                  (struct ip6_hdr *)(m->m_data + e_hlen);
4864        /* XXX cannot support offload with IPv6 extensions */
4865        ip_hlen = sizeof(struct ip6_hdr);
4866        ip = (caddr_t)ip6;
4867        break;
4868    default:
4869        /* We can't offload in this case... */
4870        /* XXX error stat ??? */
4871        return (0);
4872    }
4873
4874    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4875    l4_off = (e_hlen + ip_hlen);
4876
4877    *parsing_data |=
4878        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4879         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4880
4881    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4882                                  CSUM_TSO |
4883                                  CSUM_TCP_IPV6)) {
4884        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4885        th = (struct tcphdr *)(ip + ip_hlen);
4886        /* th_off is number of 32-bit words */
4887        *parsing_data |= ((th->th_off <<
4888                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4889                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4890        return (l4_off + (th->th_off << 2)); /* entire header length */
4891    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4892                                         CSUM_UDP_IPV6)) {
4893        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4894        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4895    } else {
4896        /* XXX error stat ??? */
4897        return (0);
4898    }
4899}
4900
4901static uint8_t
4902bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4903                 struct mbuf                *m,
4904                 struct eth_tx_parse_bd_e1x *pbd)
4905{
4906    struct ether_vlan_header *eh = NULL;
4907    struct ip *ip4 = NULL;
4908    struct ip6_hdr *ip6 = NULL;
4909    caddr_t ip = NULL;
4910    struct tcphdr *th = NULL;
4911    struct udphdr *uh = NULL;
4912    int e_hlen, ip_hlen;
4913    uint16_t proto;
4914    uint8_t hlen;
4915    uint16_t tmp_csum;
4916    uint32_t *tmp_uh;
4917
4918    /* get the Ethernet header */
4919    eh = mtod(m, struct ether_vlan_header *);
4920
4921    /* handle VLAN encapsulation if present */
4922    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4923        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4924        proto  = ntohs(eh->evl_proto);
4925    } else {
4926        e_hlen = ETHER_HDR_LEN;
4927        proto  = ntohs(eh->evl_encap_proto);
4928    }
4929
4930    switch (proto) {
4931    case ETHERTYPE_IP:
4932        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4933        ip4 = (m->m_len < sizeof(struct ip)) ?
4934                  (struct ip *)m->m_next->m_data :
4935                  (struct ip *)(m->m_data + e_hlen);
4936        /* ip_hl is number of 32-bit words */
4937        ip_hlen = (ip4->ip_hl << 1);
4938        ip = (caddr_t)ip4;
4939        break;
4940    case ETHERTYPE_IPV6:
4941        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4942        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4943                  (struct ip6_hdr *)m->m_next->m_data :
4944                  (struct ip6_hdr *)(m->m_data + e_hlen);
4945        /* XXX cannot support offload with IPv6 extensions */
4946        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4947        ip = (caddr_t)ip6;
4948        break;
4949    default:
4950        /* We can't offload in this case... */
4951        /* XXX error stat ??? */
4952        return (0);
4953    }
4954
4955    hlen = (e_hlen >> 1);
4956
4957    /* note that rest of global_data is indirectly zeroed here */
4958    if (m->m_flags & M_VLANTAG) {
4959        pbd->global_data =
4960            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4961    } else {
4962        pbd->global_data = htole16(hlen);
4963    }
4964
4965    pbd->ip_hlen_w = ip_hlen;
4966
4967    hlen += pbd->ip_hlen_w;
4968
4969    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4970
4971    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4972                                  CSUM_TSO |
4973                                  CSUM_TCP_IPV6)) {
4974        th = (struct tcphdr *)(ip + (ip_hlen << 1));
4975        /* th_off is number of 32-bit words */
4976        hlen += (uint16_t)(th->th_off << 1);
4977    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4978                                         CSUM_UDP_IPV6)) {
4979        uh = (struct udphdr *)(ip + (ip_hlen << 1));
4980        hlen += (sizeof(struct udphdr) / 2);
4981    } else {
4982        /* valid case as only CSUM_IP was set */
4983        return (0);
4984    }
4985
4986    pbd->total_hlen_w = htole16(hlen);
4987
4988    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4989                                  CSUM_TSO |
4990                                  CSUM_TCP_IPV6)) {
4991        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4992        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
4993    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4994                                         CSUM_UDP_IPV6)) {
4995        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4996
4997        /*
4998         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
4999         * checksums and does not know anything about the UDP header and where
5000         * the checksum field is located. It only knows about TCP. Therefore
5001         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5002         * offload. Since the checksum field offset for TCP is 16 bytes and
5003         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5004         * bytes less than the start of the UDP header. This allows the
5005         * hardware to write the checksum in the correct spot. But the
5006         * hardware will compute a checksum which includes the last 10 bytes
5007         * of the IP header. To correct this we tweak the stack computed
5008         * pseudo checksum by folding in the calculation of the inverse
5009         * checksum for those final 10 bytes of the IP header. This allows
5010         * the correct checksum to be computed by the hardware.
5011         */
5012
5013        /* set pointer 10 bytes before UDP header */
5014        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5015
5016        /* calculate a pseudo header checksum over the first 10 bytes */
5017        tmp_csum = in_pseudo(*tmp_uh,
5018                             *(tmp_uh + 1),
5019                             *(uint16_t *)(tmp_uh + 2));
5020
5021        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5022    }
5023
5024    return (hlen * 2); /* entire header length, number of bytes */
5025}
5026
5027static void
5028bxe_set_pbd_lso_e2(struct mbuf *m,
5029                   uint32_t    *parsing_data)
5030{
5031    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5032                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5033                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5034
5035    /* XXX test for IPv6 with extension header... */
5036}
5037
5038static void
5039bxe_set_pbd_lso(struct mbuf                *m,
5040                struct eth_tx_parse_bd_e1x *pbd)
5041{
5042    struct ether_vlan_header *eh = NULL;
5043    struct ip *ip = NULL;
5044    struct tcphdr *th = NULL;
5045    int e_hlen;
5046
5047    /* get the Ethernet header */
5048    eh = mtod(m, struct ether_vlan_header *);
5049
5050    /* handle VLAN encapsulation if present */
5051    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5052                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5053
5054    /* get the IP and TCP header, with LSO entire header in first mbuf */
5055    /* XXX assuming IPv4 */
5056    ip = (struct ip *)(m->m_data + e_hlen);
5057    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5058
5059    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5060    pbd->tcp_send_seq = ntohl(th->th_seq);
5061    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5062
5063#if 1
5064        /* XXX IPv4 */
5065        pbd->ip_id = ntohs(ip->ip_id);
5066        pbd->tcp_pseudo_csum =
5067            ntohs(in_pseudo(ip->ip_src.s_addr,
5068                            ip->ip_dst.s_addr,
5069                            htons(IPPROTO_TCP)));
5070#else
5071        /* XXX IPv6 */
5072        pbd->tcp_pseudo_csum =
5073            ntohs(in_pseudo(&ip6->ip6_src,
5074                            &ip6->ip6_dst,
5075                            htons(IPPROTO_TCP)));
5076#endif
5077
5078    pbd->global_data |=
5079        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5080}
5081
5082/*
5083 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5084 * visible to the controller.
5085 *
5086 * If an mbuf is submitted to this routine and cannot be given to the
5087 * controller (e.g. it has too many fragments) then the function may free
5088 * the mbuf and return to the caller.
5089 *
5090 * Returns:
5091 *   0 = Success, !0 = Failure
5092 *   Note the side effect that an mbuf may be freed if it causes a problem.
5093 */
5094static int
5095bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5096{
5097    bus_dma_segment_t segs[32];
5098    struct mbuf *m0;
5099    struct bxe_sw_tx_bd *tx_buf;
5100    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5101    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5102    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5103    struct eth_tx_bd *tx_data_bd;
5104    struct eth_tx_bd *tx_total_pkt_size_bd;
5105    struct eth_tx_start_bd *tx_start_bd;
5106    uint16_t bd_prod, pkt_prod, total_pkt_size;
5107    uint8_t mac_type;
5108    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5109    struct bxe_softc *sc;
5110    uint16_t tx_bd_avail;
5111    struct ether_vlan_header *eh;
5112    uint32_t pbd_e2_parsing_data = 0;
5113    uint8_t hlen = 0;
5114    int tmp_bd;
5115    int i;
5116
5117    sc = fp->sc;
5118
5119#if __FreeBSD_version >= 800000
5120    M_ASSERTPKTHDR(*m_head);
5121#endif /* #if __FreeBSD_version >= 800000 */
5122
5123    m0 = *m_head;
5124    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5125    tx_start_bd = NULL;
5126    tx_data_bd = NULL;
5127    tx_total_pkt_size_bd = NULL;
5128
5129    /* get the H/W pointer for packets and BDs */
5130    pkt_prod = fp->tx_pkt_prod;
5131    bd_prod = fp->tx_bd_prod;
5132
5133    mac_type = UNICAST_ADDRESS;
5134
5135    /* map the mbuf into the next open DMAable memory */
5136    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5137    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5138                                    tx_buf->m_map, m0,
5139                                    segs, &nsegs, BUS_DMA_NOWAIT);
5140
5141    /* mapping errors */
5142    if(__predict_false(error != 0)) {
5143        fp->eth_q_stats.tx_dma_mapping_failure++;
5144        if (error == ENOMEM) {
5145            /* resource issue, try again later */
5146            rc = ENOMEM;
5147        } else if (error == EFBIG) {
5148            /* possibly recoverable with defragmentation */
5149            fp->eth_q_stats.mbuf_defrag_attempts++;
5150            m0 = m_defrag(*m_head, M_DONTWAIT);
5151            if (m0 == NULL) {
5152                fp->eth_q_stats.mbuf_defrag_failures++;
5153                rc = ENOBUFS;
5154            } else {
5155                /* defrag successful, try mapping again */
5156                *m_head = m0;
5157                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5158                                                tx_buf->m_map, m0,
5159                                                segs, &nsegs, BUS_DMA_NOWAIT);
5160                if (error) {
5161                    fp->eth_q_stats.tx_dma_mapping_failure++;
5162                    rc = error;
5163                }
5164            }
5165        } else {
5166            /* unknown, unrecoverable mapping error */
5167            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5168            bxe_dump_mbuf(sc, m0, FALSE);
5169            rc = error;
5170        }
5171
5172        goto bxe_tx_encap_continue;
5173    }
5174
5175    tx_bd_avail = bxe_tx_avail(sc, fp);
5176
5177    /* make sure there is enough room in the send queue */
5178    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5179        /* Recoverable, try again later. */
5180        fp->eth_q_stats.tx_hw_queue_full++;
5181        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5182        rc = ENOMEM;
5183        goto bxe_tx_encap_continue;
5184    }
5185
5186    /* capture the current H/W TX chain high watermark */
5187    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5188                        (TX_BD_USABLE - tx_bd_avail))) {
5189        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5190    }
5191
5192    /* make sure it fits in the packet window */
5193    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5194        /*
5195         * The mbuf may be to big for the controller to handle. If the frame
5196         * is a TSO frame we'll need to do an additional check.
5197         */
5198        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5199            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5200                goto bxe_tx_encap_continue; /* OK to send */
5201            } else {
5202                fp->eth_q_stats.tx_window_violation_tso++;
5203            }
5204        } else {
5205            fp->eth_q_stats.tx_window_violation_std++;
5206        }
5207
5208        /* lets try to defragment this mbuf and remap it */
5209        fp->eth_q_stats.mbuf_defrag_attempts++;
5210        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5211
5212        m0 = m_defrag(*m_head, M_DONTWAIT);
5213        if (m0 == NULL) {
5214            fp->eth_q_stats.mbuf_defrag_failures++;
5215            /* Ugh, just drop the frame... :( */
5216            rc = ENOBUFS;
5217        } else {
5218            /* defrag successful, try mapping again */
5219            *m_head = m0;
5220            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5221                                            tx_buf->m_map, m0,
5222                                            segs, &nsegs, BUS_DMA_NOWAIT);
5223            if (error) {
5224                fp->eth_q_stats.tx_dma_mapping_failure++;
5225                /* No sense in trying to defrag/copy chain, drop it. :( */
5226                rc = error;
5227            }
5228            else {
5229                /* if the chain is still too long then drop it */
5230                if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5231                    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5232                    rc = ENODEV;
5233                }
5234            }
5235        }
5236    }
5237
5238bxe_tx_encap_continue:
5239
5240    /* Check for errors */
5241    if (rc) {
5242        if (rc == ENOMEM) {
5243            /* recoverable try again later  */
5244        } else {
5245            fp->eth_q_stats.tx_soft_errors++;
5246            fp->eth_q_stats.mbuf_alloc_tx--;
5247            m_freem(*m_head);
5248            *m_head = NULL;
5249        }
5250
5251        return (rc);
5252    }
5253
5254    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5255    if (m0->m_flags & M_BCAST) {
5256        mac_type = BROADCAST_ADDRESS;
5257    } else if (m0->m_flags & M_MCAST) {
5258        mac_type = MULTICAST_ADDRESS;
5259    }
5260
5261    /* store the mbuf into the mbuf ring */
5262    tx_buf->m        = m0;
5263    tx_buf->first_bd = fp->tx_bd_prod;
5264    tx_buf->flags    = 0;
5265
5266    /* prepare the first transmit (start) BD for the mbuf */
5267    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5268
5269    BLOGD(sc, DBG_TX,
5270          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5271          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5272
5273    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5274    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5275    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5276    total_pkt_size += tx_start_bd->nbytes;
5277    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5278
5279    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5280
5281    /* all frames have at least Start BD + Parsing BD */
5282    nbds = nsegs + 1;
5283    tx_start_bd->nbd = htole16(nbds);
5284
5285    if (m0->m_flags & M_VLANTAG) {
5286        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5287        tx_start_bd->bd_flags.as_bitfield |=
5288            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5289    } else {
5290        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5291        if (IS_VF(sc)) {
5292            /* map ethernet header to find type and header length */
5293            eh = mtod(m0, struct ether_vlan_header *);
5294            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5295        } else {
5296            /* used by FW for packet accounting */
5297            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5298        }
5299    }
5300
5301    /*
5302     * add a parsing BD from the chain. The parsing BD is always added
5303     * though it is only used for TSO and chksum
5304     */
5305    bd_prod = TX_BD_NEXT(bd_prod);
5306
5307    if (m0->m_pkthdr.csum_flags) {
5308        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5309            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5310            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5311        }
5312
5313        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5314            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5315                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5316        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5317            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5318                                                  ETH_TX_BD_FLAGS_IS_UDP |
5319                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5320        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5321                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5322            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5323        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5324            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5325                                                  ETH_TX_BD_FLAGS_IS_UDP);
5326        }
5327    }
5328
5329    if (!CHIP_IS_E1x(sc)) {
5330        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5331        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5332
5333        if (m0->m_pkthdr.csum_flags) {
5334            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5335        }
5336
5337        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5338                 mac_type);
5339    } else {
5340        uint16_t global_data = 0;
5341
5342        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5343        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5344
5345        if (m0->m_pkthdr.csum_flags) {
5346            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5347        }
5348
5349        SET_FLAG(global_data,
5350                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5351        pbd_e1x->global_data |= htole16(global_data);
5352    }
5353
5354    /* setup the parsing BD with TSO specific info */
5355    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5356        fp->eth_q_stats.tx_ofld_frames_lso++;
5357        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5358
5359        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5360            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5361
5362            /* split the first BD into header/data making the fw job easy */
5363            nbds++;
5364            tx_start_bd->nbd = htole16(nbds);
5365            tx_start_bd->nbytes = htole16(hlen);
5366
5367            bd_prod = TX_BD_NEXT(bd_prod);
5368
5369            /* new transmit BD after the tx_parse_bd */
5370            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5371            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5372            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5373            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5374            if (tx_total_pkt_size_bd == NULL) {
5375                tx_total_pkt_size_bd = tx_data_bd;
5376            }
5377
5378            BLOGD(sc, DBG_TX,
5379                  "TSO split header size is %d (%x:%x) nbds %d\n",
5380                  le16toh(tx_start_bd->nbytes),
5381                  le32toh(tx_start_bd->addr_hi),
5382                  le32toh(tx_start_bd->addr_lo),
5383                  nbds);
5384        }
5385
5386        if (!CHIP_IS_E1x(sc)) {
5387            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5388        } else {
5389            bxe_set_pbd_lso(m0, pbd_e1x);
5390        }
5391    }
5392
5393    if (pbd_e2_parsing_data) {
5394        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5395    }
5396
5397    /* prepare remaining BDs, start tx bd contains first seg/frag */
5398    for (i = 1; i < nsegs ; i++) {
5399        bd_prod = TX_BD_NEXT(bd_prod);
5400        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5401        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5402        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5403        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5404        if (tx_total_pkt_size_bd == NULL) {
5405            tx_total_pkt_size_bd = tx_data_bd;
5406        }
5407        total_pkt_size += tx_data_bd->nbytes;
5408    }
5409
5410    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5411
5412    if (tx_total_pkt_size_bd != NULL) {
5413        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5414    }
5415
5416    if (__predict_false(sc->debug & DBG_TX)) {
5417        tmp_bd = tx_buf->first_bd;
5418        for (i = 0; i < nbds; i++)
5419        {
5420            if (i == 0) {
5421                BLOGD(sc, DBG_TX,
5422                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5423                      "bd_flags=0x%x hdr_nbds=%d\n",
5424                      tx_start_bd,
5425                      tmp_bd,
5426                      le16toh(tx_start_bd->nbd),
5427                      le16toh(tx_start_bd->vlan_or_ethertype),
5428                      tx_start_bd->bd_flags.as_bitfield,
5429                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5430            } else if (i == 1) {
5431                if (pbd_e1x) {
5432                    BLOGD(sc, DBG_TX,
5433                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5434                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5435                          "tcp_seq=%u total_hlen_w=%u\n",
5436                          pbd_e1x,
5437                          tmp_bd,
5438                          pbd_e1x->global_data,
5439                          pbd_e1x->ip_hlen_w,
5440                          pbd_e1x->ip_id,
5441                          pbd_e1x->lso_mss,
5442                          pbd_e1x->tcp_flags,
5443                          pbd_e1x->tcp_pseudo_csum,
5444                          pbd_e1x->tcp_send_seq,
5445                          le16toh(pbd_e1x->total_hlen_w));
5446                } else { /* if (pbd_e2) */
5447                    BLOGD(sc, DBG_TX,
5448                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5449                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5450                          pbd_e2,
5451                          tmp_bd,
5452                          pbd_e2->data.mac_addr.dst_hi,
5453                          pbd_e2->data.mac_addr.dst_mid,
5454                          pbd_e2->data.mac_addr.dst_lo,
5455                          pbd_e2->data.mac_addr.src_hi,
5456                          pbd_e2->data.mac_addr.src_mid,
5457                          pbd_e2->data.mac_addr.src_lo,
5458                          pbd_e2->parsing_data);
5459                }
5460            }
5461
5462            if (i != 1) { /* skip parse db as it doesn't hold data */
5463                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5464                BLOGD(sc, DBG_TX,
5465                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5466                      tx_data_bd,
5467                      tmp_bd,
5468                      le16toh(tx_data_bd->nbytes),
5469                      le32toh(tx_data_bd->addr_hi),
5470                      le32toh(tx_data_bd->addr_lo));
5471            }
5472
5473            tmp_bd = TX_BD_NEXT(tmp_bd);
5474        }
5475    }
5476
5477    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5478
5479    /* update TX BD producer index value for next TX */
5480    bd_prod = TX_BD_NEXT(bd_prod);
5481
5482    /*
5483     * If the chain of tx_bd's describing this frame is adjacent to or spans
5484     * an eth_tx_next_bd element then we need to increment the nbds value.
5485     */
5486    if (TX_BD_IDX(bd_prod) < nbds) {
5487        nbds++;
5488    }
5489
5490    /* don't allow reordering of writes for nbd and packets */
5491    mb();
5492
5493    fp->tx_db.data.prod += nbds;
5494
5495    /* producer points to the next free tx_bd at this point */
5496    fp->tx_pkt_prod++;
5497    fp->tx_bd_prod = bd_prod;
5498
5499    DOORBELL(sc, fp->index, fp->tx_db.raw);
5500
5501    fp->eth_q_stats.tx_pkts++;
5502
5503    /* Prevent speculative reads from getting ahead of the status block. */
5504    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5505                      0, 0, BUS_SPACE_BARRIER_READ);
5506
5507    /* Prevent speculative reads from getting ahead of the doorbell. */
5508    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5509                      0, 0, BUS_SPACE_BARRIER_READ);
5510
5511    return (0);
5512}
5513
5514static void
5515bxe_tx_start_locked(struct bxe_softc    *sc,
5516                    struct ifnet        *ifp,
5517                    struct bxe_fastpath *fp)
5518{
5519    struct mbuf *m = NULL;
5520    int tx_count = 0;
5521    uint16_t tx_bd_avail;
5522
5523    BXE_FP_TX_LOCK_ASSERT(fp);
5524
5525    /* keep adding entries while there are frames to send */
5526    while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
5527
5528        /*
5529         * check for any frames to send
5530         * dequeue can still be NULL even if queue is not empty
5531         */
5532        IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
5533        if (__predict_false(m == NULL)) {
5534            break;
5535        }
5536
5537        /* the mbuf now belongs to us */
5538        fp->eth_q_stats.mbuf_alloc_tx++;
5539
5540        /*
5541         * Put the frame into the transmit ring. If we don't have room,
5542         * place the mbuf back at the head of the TX queue, set the
5543         * OACTIVE flag, and wait for the NIC to drain the chain.
5544         */
5545        if (__predict_false(bxe_tx_encap(fp, &m))) {
5546            fp->eth_q_stats.tx_encap_failures++;
5547            if (m != NULL) {
5548                /* mark the TX queue as full and return the frame */
5549                ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5550                IFQ_DRV_PREPEND(&ifp->if_snd, m);
5551                fp->eth_q_stats.mbuf_alloc_tx--;
5552                fp->eth_q_stats.tx_queue_xoff++;
5553            }
5554
5555            /* stop looking for more work */
5556            break;
5557        }
5558
5559        /* the frame was enqueued successfully */
5560        tx_count++;
5561
5562        /* send a copy of the frame to any BPF listeners. */
5563        BPF_MTAP(ifp, m);
5564
5565        tx_bd_avail = bxe_tx_avail(sc, fp);
5566
5567        /* handle any completions if we're running low */
5568        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5569            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5570            bxe_txeof(sc, fp);
5571            if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5572                break;
5573            }
5574        }
5575    }
5576
5577    /* all TX packets were dequeued and/or the tx ring is full */
5578    if (tx_count > 0) {
5579        /* reset the TX watchdog timeout timer */
5580        fp->watchdog_timer = BXE_TX_TIMEOUT;
5581    }
5582}
5583
5584/* Legacy (non-RSS) dispatch routine */
5585static void
5586bxe_tx_start(struct ifnet *ifp)
5587{
5588    struct bxe_softc *sc;
5589    struct bxe_fastpath *fp;
5590
5591    sc = ifp->if_softc;
5592
5593    if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
5594        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5595        return;
5596    }
5597
5598    if (!sc->link_vars.link_up) {
5599        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5600        return;
5601    }
5602
5603    fp = &sc->fp[0];
5604
5605    if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5606        fp->eth_q_stats.tx_queue_full_return++;
5607        return;
5608    }
5609
5610    BXE_FP_TX_LOCK(fp);
5611    bxe_tx_start_locked(sc, ifp, fp);
5612    BXE_FP_TX_UNLOCK(fp);
5613}
5614
5615#if __FreeBSD_version >= 800000
5616
5617static int
5618bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5619                       struct ifnet        *ifp,
5620                       struct bxe_fastpath *fp,
5621                       struct mbuf         *m)
5622{
5623    struct buf_ring *tx_br = fp->tx_br;
5624    struct mbuf *next;
5625    int depth, rc, tx_count;
5626    uint16_t tx_bd_avail;
5627
5628    rc = tx_count = 0;
5629
5630    BXE_FP_TX_LOCK_ASSERT(fp);
5631
5632    if (!tx_br) {
5633        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5634        return (EINVAL);
5635    }
5636
5637    if (!sc->link_vars.link_up ||
5638        (ifp->if_drv_flags &
5639        (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
5640        rc = drbr_enqueue(ifp, tx_br, m);
5641        goto bxe_tx_mq_start_locked_exit;
5642    }
5643
5644    /* fetch the depth of the driver queue */
5645    depth = drbr_inuse(ifp, tx_br);
5646    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5647        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5648    }
5649
5650    if (m == NULL) {
5651        /* no new work, check for pending frames */
5652        next = drbr_dequeue(ifp, tx_br);
5653    } else if (drbr_needs_enqueue(ifp, tx_br)) {
5654        /* have both new and pending work, maintain packet order */
5655        rc = drbr_enqueue(ifp, tx_br, m);
5656        if (rc != 0) {
5657            fp->eth_q_stats.tx_soft_errors++;
5658            goto bxe_tx_mq_start_locked_exit;
5659        }
5660        next = drbr_dequeue(ifp, tx_br);
5661    } else {
5662        /* new work only and nothing pending */
5663        next = m;
5664    }
5665
5666    /* keep adding entries while there are frames to send */
5667    while (next != NULL) {
5668
5669        /* the mbuf now belongs to us */
5670        fp->eth_q_stats.mbuf_alloc_tx++;
5671
5672        /*
5673         * Put the frame into the transmit ring. If we don't have room,
5674         * place the mbuf back at the head of the TX queue, set the
5675         * OACTIVE flag, and wait for the NIC to drain the chain.
5676         */
5677        rc = bxe_tx_encap(fp, &next);
5678        if (__predict_false(rc != 0)) {
5679            fp->eth_q_stats.tx_encap_failures++;
5680            if (next != NULL) {
5681                /* mark the TX queue as full and save the frame */
5682                ifp->if_drv_flags |= IFF_DRV_OACTIVE;
5683                /* XXX this may reorder the frame */
5684                rc = drbr_enqueue(ifp, tx_br, next);
5685                fp->eth_q_stats.mbuf_alloc_tx--;
5686                fp->eth_q_stats.tx_frames_deferred++;
5687            }
5688
5689            /* stop looking for more work */
5690            break;
5691        }
5692
5693        /* the transmit frame was enqueued successfully */
5694        tx_count++;
5695
5696        /* send a copy of the frame to any BPF listeners */
5697        BPF_MTAP(ifp, next);
5698
5699        tx_bd_avail = bxe_tx_avail(sc, fp);
5700
5701        /* handle any completions if we're running low */
5702        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5703            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5704            bxe_txeof(sc, fp);
5705            if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
5706                break;
5707            }
5708        }
5709
5710        next = drbr_dequeue(ifp, tx_br);
5711    }
5712
5713    /* all TX packets were dequeued and/or the tx ring is full */
5714    if (tx_count > 0) {
5715        /* reset the TX watchdog timeout timer */
5716        fp->watchdog_timer = BXE_TX_TIMEOUT;
5717    }
5718
5719bxe_tx_mq_start_locked_exit:
5720
5721    return (rc);
5722}
5723
5724/* Multiqueue (TSS) dispatch routine. */
5725static int
5726bxe_tx_mq_start(struct ifnet *ifp,
5727                struct mbuf  *m)
5728{
5729    struct bxe_softc *sc = ifp->if_softc;
5730    struct bxe_fastpath *fp;
5731    int fp_index, rc;
5732
5733    fp_index = 0; /* default is the first queue */
5734
5735    /* check if flowid is set */
5736
5737    if (BXE_VALID_FLOWID(m))
5738        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5739
5740    fp = &sc->fp[fp_index];
5741
5742    if (BXE_FP_TX_TRYLOCK(fp)) {
5743        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5744        BXE_FP_TX_UNLOCK(fp);
5745    } else
5746        rc = drbr_enqueue(ifp, fp->tx_br, m);
5747
5748    return (rc);
5749}
5750
5751static void
5752bxe_mq_flush(struct ifnet *ifp)
5753{
5754    struct bxe_softc *sc = ifp->if_softc;
5755    struct bxe_fastpath *fp;
5756    struct mbuf *m;
5757    int i;
5758
5759    for (i = 0; i < sc->num_queues; i++) {
5760        fp = &sc->fp[i];
5761
5762        if (fp->state != BXE_FP_STATE_OPEN) {
5763            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5764                  fp->index, fp->state);
5765            continue;
5766        }
5767
5768        if (fp->tx_br != NULL) {
5769            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5770            BXE_FP_TX_LOCK(fp);
5771            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5772                m_freem(m);
5773            }
5774            BXE_FP_TX_UNLOCK(fp);
5775        }
5776    }
5777
5778    if_qflush(ifp);
5779}
5780
5781#endif /* FreeBSD_version >= 800000 */
5782
5783static uint16_t
5784bxe_cid_ilt_lines(struct bxe_softc *sc)
5785{
5786    if (IS_SRIOV(sc)) {
5787        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5788    }
5789    return (L2_ILT_LINES(sc));
5790}
5791
5792static void
5793bxe_ilt_set_info(struct bxe_softc *sc)
5794{
5795    struct ilt_client_info *ilt_client;
5796    struct ecore_ilt *ilt = sc->ilt;
5797    uint16_t line = 0;
5798
5799    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5800    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5801
5802    /* CDU */
5803    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5804    ilt_client->client_num = ILT_CLIENT_CDU;
5805    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5806    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5807    ilt_client->start = line;
5808    line += bxe_cid_ilt_lines(sc);
5809
5810    if (CNIC_SUPPORT(sc)) {
5811        line += CNIC_ILT_LINES;
5812    }
5813
5814    ilt_client->end = (line - 1);
5815
5816    BLOGD(sc, DBG_LOAD,
5817          "ilt client[CDU]: start %d, end %d, "
5818          "psz 0x%x, flags 0x%x, hw psz %d\n",
5819          ilt_client->start, ilt_client->end,
5820          ilt_client->page_size,
5821          ilt_client->flags,
5822          ilog2(ilt_client->page_size >> 12));
5823
5824    /* QM */
5825    if (QM_INIT(sc->qm_cid_count)) {
5826        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5827        ilt_client->client_num = ILT_CLIENT_QM;
5828        ilt_client->page_size = QM_ILT_PAGE_SZ;
5829        ilt_client->flags = 0;
5830        ilt_client->start = line;
5831
5832        /* 4 bytes for each cid */
5833        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5834                             QM_ILT_PAGE_SZ);
5835
5836        ilt_client->end = (line - 1);
5837
5838        BLOGD(sc, DBG_LOAD,
5839              "ilt client[QM]: start %d, end %d, "
5840              "psz 0x%x, flags 0x%x, hw psz %d\n",
5841              ilt_client->start, ilt_client->end,
5842              ilt_client->page_size, ilt_client->flags,
5843              ilog2(ilt_client->page_size >> 12));
5844    }
5845
5846    if (CNIC_SUPPORT(sc)) {
5847        /* SRC */
5848        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5849        ilt_client->client_num = ILT_CLIENT_SRC;
5850        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5851        ilt_client->flags = 0;
5852        ilt_client->start = line;
5853        line += SRC_ILT_LINES;
5854        ilt_client->end = (line - 1);
5855
5856        BLOGD(sc, DBG_LOAD,
5857              "ilt client[SRC]: start %d, end %d, "
5858              "psz 0x%x, flags 0x%x, hw psz %d\n",
5859              ilt_client->start, ilt_client->end,
5860              ilt_client->page_size, ilt_client->flags,
5861              ilog2(ilt_client->page_size >> 12));
5862
5863        /* TM */
5864        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5865        ilt_client->client_num = ILT_CLIENT_TM;
5866        ilt_client->page_size = TM_ILT_PAGE_SZ;
5867        ilt_client->flags = 0;
5868        ilt_client->start = line;
5869        line += TM_ILT_LINES;
5870        ilt_client->end = (line - 1);
5871
5872        BLOGD(sc, DBG_LOAD,
5873              "ilt client[TM]: start %d, end %d, "
5874              "psz 0x%x, flags 0x%x, hw psz %d\n",
5875              ilt_client->start, ilt_client->end,
5876              ilt_client->page_size, ilt_client->flags,
5877              ilog2(ilt_client->page_size >> 12));
5878    }
5879
5880    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5881}
5882
5883static void
5884bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5885{
5886    int i;
5887    uint32_t rx_buf_size;
5888
5889    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5890
5891    for (i = 0; i < sc->num_queues; i++) {
5892        if(rx_buf_size <= MCLBYTES){
5893            sc->fp[i].rx_buf_size = rx_buf_size;
5894            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5895        }else if (rx_buf_size <= MJUMPAGESIZE){
5896            sc->fp[i].rx_buf_size = rx_buf_size;
5897            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5898        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5899            sc->fp[i].rx_buf_size = MCLBYTES;
5900            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5901        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5902            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5903            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5904        }else {
5905            sc->fp[i].rx_buf_size = MCLBYTES;
5906            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5907        }
5908    }
5909}
5910
5911static int
5912bxe_alloc_ilt_mem(struct bxe_softc *sc)
5913{
5914    int rc = 0;
5915
5916    if ((sc->ilt =
5917         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5918                                    M_BXE_ILT,
5919                                    (M_NOWAIT | M_ZERO))) == NULL) {
5920        rc = 1;
5921    }
5922
5923    return (rc);
5924}
5925
5926static int
5927bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5928{
5929    int rc = 0;
5930
5931    if ((sc->ilt->lines =
5932         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5933                                    M_BXE_ILT,
5934                                    (M_NOWAIT | M_ZERO))) == NULL) {
5935        rc = 1;
5936    }
5937
5938    return (rc);
5939}
5940
5941static void
5942bxe_free_ilt_mem(struct bxe_softc *sc)
5943{
5944    if (sc->ilt != NULL) {
5945        free(sc->ilt, M_BXE_ILT);
5946        sc->ilt = NULL;
5947    }
5948}
5949
5950static void
5951bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5952{
5953    if (sc->ilt->lines != NULL) {
5954        free(sc->ilt->lines, M_BXE_ILT);
5955        sc->ilt->lines = NULL;
5956    }
5957}
5958
5959static void
5960bxe_free_mem(struct bxe_softc *sc)
5961{
5962    int i;
5963
5964    for (i = 0; i < L2_ILT_LINES(sc); i++) {
5965        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
5966        sc->context[i].vcxt = NULL;
5967        sc->context[i].size = 0;
5968    }
5969
5970    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
5971
5972    bxe_free_ilt_lines_mem(sc);
5973
5974}
5975
5976static int
5977bxe_alloc_mem(struct bxe_softc *sc)
5978{
5979    int context_size;
5980    int allocated;
5981    int i;
5982
5983    /*
5984     * Allocate memory for CDU context:
5985     * This memory is allocated separately and not in the generic ILT
5986     * functions because CDU differs in few aspects:
5987     * 1. There can be multiple entities allocating memory for context -
5988     * regular L2, CNIC, and SRIOV drivers. Each separately controls
5989     * its own ILT lines.
5990     * 2. Since CDU page-size is not a single 4KB page (which is the case
5991     * for the other ILT clients), to be efficient we want to support
5992     * allocation of sub-page-size in the last entry.
5993     * 3. Context pointers are used by the driver to pass to FW / update
5994     * the context (for the other ILT clients the pointers are used just to
5995     * free the memory during unload).
5996     */
5997    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
5998    for (i = 0, allocated = 0; allocated < context_size; i++) {
5999        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6000                                  (context_size - allocated));
6001
6002        if (bxe_dma_alloc(sc, sc->context[i].size,
6003                          &sc->context[i].vcxt_dma,
6004                          "cdu context") != 0) {
6005            bxe_free_mem(sc);
6006            return (-1);
6007        }
6008
6009        sc->context[i].vcxt =
6010            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6011
6012        allocated += sc->context[i].size;
6013    }
6014
6015    bxe_alloc_ilt_lines_mem(sc);
6016
6017    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6018          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6019    {
6020        for (i = 0; i < 4; i++) {
6021            BLOGD(sc, DBG_LOAD,
6022                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6023                  i,
6024                  sc->ilt->clients[i].page_size,
6025                  sc->ilt->clients[i].start,
6026                  sc->ilt->clients[i].end,
6027                  sc->ilt->clients[i].client_num,
6028                  sc->ilt->clients[i].flags);
6029        }
6030    }
6031    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6032        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6033        bxe_free_mem(sc);
6034        return (-1);
6035    }
6036
6037    return (0);
6038}
6039
6040static void
6041bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6042{
6043    struct bxe_softc *sc;
6044    int i;
6045
6046    sc = fp->sc;
6047
6048    if (fp->rx_mbuf_tag == NULL) {
6049        return;
6050    }
6051
6052    /* free all mbufs and unload all maps */
6053    for (i = 0; i < RX_BD_TOTAL; i++) {
6054        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6055            bus_dmamap_sync(fp->rx_mbuf_tag,
6056                            fp->rx_mbuf_chain[i].m_map,
6057                            BUS_DMASYNC_POSTREAD);
6058            bus_dmamap_unload(fp->rx_mbuf_tag,
6059                              fp->rx_mbuf_chain[i].m_map);
6060        }
6061
6062        if (fp->rx_mbuf_chain[i].m != NULL) {
6063            m_freem(fp->rx_mbuf_chain[i].m);
6064            fp->rx_mbuf_chain[i].m = NULL;
6065            fp->eth_q_stats.mbuf_alloc_rx--;
6066        }
6067    }
6068}
6069
6070static void
6071bxe_free_tpa_pool(struct bxe_fastpath *fp)
6072{
6073    struct bxe_softc *sc;
6074    int i, max_agg_queues;
6075
6076    sc = fp->sc;
6077
6078    if (fp->rx_mbuf_tag == NULL) {
6079        return;
6080    }
6081
6082    max_agg_queues = MAX_AGG_QS(sc);
6083
6084    /* release all mbufs and unload all DMA maps in the TPA pool */
6085    for (i = 0; i < max_agg_queues; i++) {
6086        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6087            bus_dmamap_sync(fp->rx_mbuf_tag,
6088                            fp->rx_tpa_info[i].bd.m_map,
6089                            BUS_DMASYNC_POSTREAD);
6090            bus_dmamap_unload(fp->rx_mbuf_tag,
6091                              fp->rx_tpa_info[i].bd.m_map);
6092        }
6093
6094        if (fp->rx_tpa_info[i].bd.m != NULL) {
6095            m_freem(fp->rx_tpa_info[i].bd.m);
6096            fp->rx_tpa_info[i].bd.m = NULL;
6097            fp->eth_q_stats.mbuf_alloc_tpa--;
6098        }
6099    }
6100}
6101
6102static void
6103bxe_free_sge_chain(struct bxe_fastpath *fp)
6104{
6105    struct bxe_softc *sc;
6106    int i;
6107
6108    sc = fp->sc;
6109
6110    if (fp->rx_sge_mbuf_tag == NULL) {
6111        return;
6112    }
6113
6114    /* rree all mbufs and unload all maps */
6115    for (i = 0; i < RX_SGE_TOTAL; i++) {
6116        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6117            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6118                            fp->rx_sge_mbuf_chain[i].m_map,
6119                            BUS_DMASYNC_POSTREAD);
6120            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6121                              fp->rx_sge_mbuf_chain[i].m_map);
6122        }
6123
6124        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6125            m_freem(fp->rx_sge_mbuf_chain[i].m);
6126            fp->rx_sge_mbuf_chain[i].m = NULL;
6127            fp->eth_q_stats.mbuf_alloc_sge--;
6128        }
6129    }
6130}
6131
6132static void
6133bxe_free_fp_buffers(struct bxe_softc *sc)
6134{
6135    struct bxe_fastpath *fp;
6136    int i;
6137
6138    for (i = 0; i < sc->num_queues; i++) {
6139        fp = &sc->fp[i];
6140
6141#if __FreeBSD_version >= 800000
6142        if (fp->tx_br != NULL) {
6143            /* just in case bxe_mq_flush() wasn't called */
6144            if (mtx_initialized(&fp->tx_mtx)) {
6145                struct mbuf *m;
6146
6147                BXE_FP_TX_LOCK(fp);
6148                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6149                    m_freem(m);
6150                BXE_FP_TX_UNLOCK(fp);
6151            }
6152        }
6153#endif
6154
6155        /* free all RX buffers */
6156        bxe_free_rx_bd_chain(fp);
6157        bxe_free_tpa_pool(fp);
6158        bxe_free_sge_chain(fp);
6159
6160        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6161            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6162                  fp->eth_q_stats.mbuf_alloc_rx);
6163        }
6164
6165        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6166            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6167                  fp->eth_q_stats.mbuf_alloc_sge);
6168        }
6169
6170        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6171            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6172                  fp->eth_q_stats.mbuf_alloc_tpa);
6173        }
6174
6175        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6176            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6177                  fp->eth_q_stats.mbuf_alloc_tx);
6178        }
6179
6180        /* XXX verify all mbufs were reclaimed */
6181    }
6182}
6183
6184static int
6185bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6186                     uint16_t            prev_index,
6187                     uint16_t            index)
6188{
6189    struct bxe_sw_rx_bd *rx_buf;
6190    struct eth_rx_bd *rx_bd;
6191    bus_dma_segment_t segs[1];
6192    bus_dmamap_t map;
6193    struct mbuf *m;
6194    int nsegs, rc;
6195
6196    rc = 0;
6197
6198    /* allocate the new RX BD mbuf */
6199    m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6200    if (__predict_false(m == NULL)) {
6201        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6202        return (ENOBUFS);
6203    }
6204
6205    fp->eth_q_stats.mbuf_alloc_rx++;
6206
6207    /* initialize the mbuf buffer length */
6208    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6209
6210    /* map the mbuf into non-paged pool */
6211    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6212                                 fp->rx_mbuf_spare_map,
6213                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6214    if (__predict_false(rc != 0)) {
6215        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6216        m_freem(m);
6217        fp->eth_q_stats.mbuf_alloc_rx--;
6218        return (rc);
6219    }
6220
6221    /* all mbufs must map to a single segment */
6222    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6223
6224    /* release any existing RX BD mbuf mappings */
6225
6226    if (prev_index != index) {
6227        rx_buf = &fp->rx_mbuf_chain[prev_index];
6228
6229        if (rx_buf->m_map != NULL) {
6230            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6231                            BUS_DMASYNC_POSTREAD);
6232            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6233        }
6234
6235        /*
6236         * We only get here from bxe_rxeof() when the maximum number
6237         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6238         * holds the mbuf in the prev_index so it's OK to NULL it out
6239         * here without concern of a memory leak.
6240         */
6241        fp->rx_mbuf_chain[prev_index].m = NULL;
6242    }
6243
6244    rx_buf = &fp->rx_mbuf_chain[index];
6245
6246    if (rx_buf->m_map != NULL) {
6247        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6248                        BUS_DMASYNC_POSTREAD);
6249        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6250    }
6251
6252    /* save the mbuf and mapping info for a future packet */
6253    map = (prev_index != index) ?
6254              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6255    rx_buf->m_map = fp->rx_mbuf_spare_map;
6256    fp->rx_mbuf_spare_map = map;
6257    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6258                    BUS_DMASYNC_PREREAD);
6259    rx_buf->m = m;
6260
6261    rx_bd = &fp->rx_chain[index];
6262    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6263    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6264
6265    return (rc);
6266}
6267
6268static int
6269bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6270                      int                 queue)
6271{
6272    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6273    bus_dma_segment_t segs[1];
6274    bus_dmamap_t map;
6275    struct mbuf *m;
6276    int nsegs;
6277    int rc = 0;
6278
6279    /* allocate the new TPA mbuf */
6280    m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6281    if (__predict_false(m == NULL)) {
6282        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6283        return (ENOBUFS);
6284    }
6285
6286    fp->eth_q_stats.mbuf_alloc_tpa++;
6287
6288    /* initialize the mbuf buffer length */
6289    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6290
6291    /* map the mbuf into non-paged pool */
6292    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6293                                 fp->rx_tpa_info_mbuf_spare_map,
6294                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6295    if (__predict_false(rc != 0)) {
6296        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6297        m_free(m);
6298        fp->eth_q_stats.mbuf_alloc_tpa--;
6299        return (rc);
6300    }
6301
6302    /* all mbufs must map to a single segment */
6303    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6304
6305    /* release any existing TPA mbuf mapping */
6306    if (tpa_info->bd.m_map != NULL) {
6307        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6308                        BUS_DMASYNC_POSTREAD);
6309        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6310    }
6311
6312    /* save the mbuf and mapping info for the TPA mbuf */
6313    map = tpa_info->bd.m_map;
6314    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6315    fp->rx_tpa_info_mbuf_spare_map = map;
6316    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6317                    BUS_DMASYNC_PREREAD);
6318    tpa_info->bd.m = m;
6319    tpa_info->seg = segs[0];
6320
6321    return (rc);
6322}
6323
6324/*
6325 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6326 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6327 * chain.
6328 */
6329static int
6330bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6331                      uint16_t            index)
6332{
6333    struct bxe_sw_rx_bd *sge_buf;
6334    struct eth_rx_sge *sge;
6335    bus_dma_segment_t segs[1];
6336    bus_dmamap_t map;
6337    struct mbuf *m;
6338    int nsegs;
6339    int rc = 0;
6340
6341    /* allocate a new SGE mbuf */
6342    m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6343    if (__predict_false(m == NULL)) {
6344        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6345        return (ENOMEM);
6346    }
6347
6348    fp->eth_q_stats.mbuf_alloc_sge++;
6349
6350    /* initialize the mbuf buffer length */
6351    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6352
6353    /* map the SGE mbuf into non-paged pool */
6354    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6355                                 fp->rx_sge_mbuf_spare_map,
6356                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6357    if (__predict_false(rc != 0)) {
6358        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6359        m_freem(m);
6360        fp->eth_q_stats.mbuf_alloc_sge--;
6361        return (rc);
6362    }
6363
6364    /* all mbufs must map to a single segment */
6365    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6366
6367    sge_buf = &fp->rx_sge_mbuf_chain[index];
6368
6369    /* release any existing SGE mbuf mapping */
6370    if (sge_buf->m_map != NULL) {
6371        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6372                        BUS_DMASYNC_POSTREAD);
6373        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6374    }
6375
6376    /* save the mbuf and mapping info for a future packet */
6377    map = sge_buf->m_map;
6378    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6379    fp->rx_sge_mbuf_spare_map = map;
6380    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6381                    BUS_DMASYNC_PREREAD);
6382    sge_buf->m = m;
6383
6384    sge = &fp->rx_sge_chain[index];
6385    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6386    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6387
6388    return (rc);
6389}
6390
6391static __noinline int
6392bxe_alloc_fp_buffers(struct bxe_softc *sc)
6393{
6394    struct bxe_fastpath *fp;
6395    int i, j, rc = 0;
6396    int ring_prod, cqe_ring_prod;
6397    int max_agg_queues;
6398
6399    for (i = 0; i < sc->num_queues; i++) {
6400        fp = &sc->fp[i];
6401
6402        ring_prod = cqe_ring_prod = 0;
6403        fp->rx_bd_cons = 0;
6404        fp->rx_cq_cons = 0;
6405
6406        /* allocate buffers for the RX BDs in RX BD chain */
6407        for (j = 0; j < sc->max_rx_bufs; j++) {
6408            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6409            if (rc != 0) {
6410                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6411                      i, rc);
6412                goto bxe_alloc_fp_buffers_error;
6413            }
6414
6415            ring_prod     = RX_BD_NEXT(ring_prod);
6416            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6417        }
6418
6419        fp->rx_bd_prod = ring_prod;
6420        fp->rx_cq_prod = cqe_ring_prod;
6421        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6422
6423        max_agg_queues = MAX_AGG_QS(sc);
6424
6425        fp->tpa_enable = TRUE;
6426
6427        /* fill the TPA pool */
6428        for (j = 0; j < max_agg_queues; j++) {
6429            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6430            if (rc != 0) {
6431                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6432                          i, j);
6433                fp->tpa_enable = FALSE;
6434                goto bxe_alloc_fp_buffers_error;
6435            }
6436
6437            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6438        }
6439
6440        if (fp->tpa_enable) {
6441            /* fill the RX SGE chain */
6442            ring_prod = 0;
6443            for (j = 0; j < RX_SGE_USABLE; j++) {
6444                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6445                if (rc != 0) {
6446                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6447                              i, ring_prod);
6448                    fp->tpa_enable = FALSE;
6449                    ring_prod = 0;
6450                    goto bxe_alloc_fp_buffers_error;
6451                }
6452
6453                ring_prod = RX_SGE_NEXT(ring_prod);
6454            }
6455
6456            fp->rx_sge_prod = ring_prod;
6457        }
6458    }
6459
6460    return (0);
6461
6462bxe_alloc_fp_buffers_error:
6463
6464    /* unwind what was already allocated */
6465    bxe_free_rx_bd_chain(fp);
6466    bxe_free_tpa_pool(fp);
6467    bxe_free_sge_chain(fp);
6468
6469    return (ENOBUFS);
6470}
6471
6472static void
6473bxe_free_fw_stats_mem(struct bxe_softc *sc)
6474{
6475    bxe_dma_free(sc, &sc->fw_stats_dma);
6476
6477    sc->fw_stats_num = 0;
6478
6479    sc->fw_stats_req_size = 0;
6480    sc->fw_stats_req = NULL;
6481    sc->fw_stats_req_mapping = 0;
6482
6483    sc->fw_stats_data_size = 0;
6484    sc->fw_stats_data = NULL;
6485    sc->fw_stats_data_mapping = 0;
6486}
6487
6488static int
6489bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6490{
6491    uint8_t num_queue_stats;
6492    int num_groups;
6493
6494    /* number of queues for statistics is number of eth queues */
6495    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6496
6497    /*
6498     * Total number of FW statistics requests =
6499     *   1 for port stats + 1 for PF stats + num of queues
6500     */
6501    sc->fw_stats_num = (2 + num_queue_stats);
6502
6503    /*
6504     * Request is built from stats_query_header and an array of
6505     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6506     * rules. The real number or requests is configured in the
6507     * stats_query_header.
6508     */
6509    num_groups =
6510        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6511         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6512
6513    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6514          sc->fw_stats_num, num_groups);
6515
6516    sc->fw_stats_req_size =
6517        (sizeof(struct stats_query_header) +
6518         (num_groups * sizeof(struct stats_query_cmd_group)));
6519
6520    /*
6521     * Data for statistics requests + stats_counter.
6522     * stats_counter holds per-STORM counters that are incremented when
6523     * STORM has finished with the current request. Memory for FCoE
6524     * offloaded statistics are counted anyway, even if they will not be sent.
6525     * VF stats are not accounted for here as the data of VF stats is stored
6526     * in memory allocated by the VF, not here.
6527     */
6528    sc->fw_stats_data_size =
6529        (sizeof(struct stats_counter) +
6530         sizeof(struct per_port_stats) +
6531         sizeof(struct per_pf_stats) +
6532         /* sizeof(struct fcoe_statistics_params) + */
6533         (sizeof(struct per_queue_stats) * num_queue_stats));
6534
6535    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6536                      &sc->fw_stats_dma, "fw stats") != 0) {
6537        bxe_free_fw_stats_mem(sc);
6538        return (-1);
6539    }
6540
6541    /* set up the shortcuts */
6542
6543    sc->fw_stats_req =
6544        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6545    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6546
6547    sc->fw_stats_data =
6548        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6549                                     sc->fw_stats_req_size);
6550    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6551                                 sc->fw_stats_req_size);
6552
6553    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6554          (uintmax_t)sc->fw_stats_req_mapping);
6555
6556    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6557          (uintmax_t)sc->fw_stats_data_mapping);
6558
6559    return (0);
6560}
6561
6562/*
6563 * Bits map:
6564 * 0-7  - Engine0 load counter.
6565 * 8-15 - Engine1 load counter.
6566 * 16   - Engine0 RESET_IN_PROGRESS bit.
6567 * 17   - Engine1 RESET_IN_PROGRESS bit.
6568 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6569 *        function on the engine
6570 * 19   - Engine1 ONE_IS_LOADED.
6571 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6572 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6573 *        for just the one belonging to its engine).
6574 */
6575#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6576#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6577#define BXE_PATH0_LOAD_CNT_SHIFT  0
6578#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6579#define BXE_PATH1_LOAD_CNT_SHIFT  8
6580#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6581#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6582#define BXE_GLOBAL_RESET_BIT      0x00040000
6583
6584/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6585static void
6586bxe_set_reset_global(struct bxe_softc *sc)
6587{
6588    uint32_t val;
6589    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6590    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6591    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6592    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6593}
6594
6595/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6596static void
6597bxe_clear_reset_global(struct bxe_softc *sc)
6598{
6599    uint32_t val;
6600    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6601    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6602    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6603    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6604}
6605
6606/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6607static uint8_t
6608bxe_reset_is_global(struct bxe_softc *sc)
6609{
6610    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6611    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6612    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6613}
6614
6615/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6616static void
6617bxe_set_reset_done(struct bxe_softc *sc)
6618{
6619    uint32_t val;
6620    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6621                                 BXE_PATH0_RST_IN_PROG_BIT;
6622
6623    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6624
6625    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6626    /* Clear the bit */
6627    val &= ~bit;
6628    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6629
6630    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6631}
6632
6633/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6634static void
6635bxe_set_reset_in_progress(struct bxe_softc *sc)
6636{
6637    uint32_t val;
6638    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6639                                 BXE_PATH0_RST_IN_PROG_BIT;
6640
6641    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6642
6643    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6644    /* Set the bit */
6645    val |= bit;
6646    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6647
6648    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6649}
6650
6651/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6652static uint8_t
6653bxe_reset_is_done(struct bxe_softc *sc,
6654                  int              engine)
6655{
6656    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6657    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6658                            BXE_PATH0_RST_IN_PROG_BIT;
6659
6660    /* return false if bit is set */
6661    return (val & bit) ? FALSE : TRUE;
6662}
6663
6664/* get the load status for an engine, should be run under rtnl lock */
6665static uint8_t
6666bxe_get_load_status(struct bxe_softc *sc,
6667                    int              engine)
6668{
6669    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6670                             BXE_PATH0_LOAD_CNT_MASK;
6671    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6672                              BXE_PATH0_LOAD_CNT_SHIFT;
6673    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6674
6675    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6676
6677    val = ((val & mask) >> shift);
6678
6679    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6680
6681    return (val != 0);
6682}
6683
6684/* set pf load mark */
6685/* XXX needs to be under rtnl lock */
6686static void
6687bxe_set_pf_load(struct bxe_softc *sc)
6688{
6689    uint32_t val;
6690    uint32_t val1;
6691    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6692                                  BXE_PATH0_LOAD_CNT_MASK;
6693    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6694                                   BXE_PATH0_LOAD_CNT_SHIFT;
6695
6696    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6697
6698    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6699    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6700
6701    /* get the current counter value */
6702    val1 = ((val & mask) >> shift);
6703
6704    /* set bit of this PF */
6705    val1 |= (1 << SC_ABS_FUNC(sc));
6706
6707    /* clear the old value */
6708    val &= ~mask;
6709
6710    /* set the new one */
6711    val |= ((val1 << shift) & mask);
6712
6713    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6714
6715    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6716}
6717
6718/* clear pf load mark */
6719/* XXX needs to be under rtnl lock */
6720static uint8_t
6721bxe_clear_pf_load(struct bxe_softc *sc)
6722{
6723    uint32_t val1, val;
6724    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6725                                  BXE_PATH0_LOAD_CNT_MASK;
6726    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6727                                   BXE_PATH0_LOAD_CNT_SHIFT;
6728
6729    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6730    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6731    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6732
6733    /* get the current counter value */
6734    val1 = (val & mask) >> shift;
6735
6736    /* clear bit of that PF */
6737    val1 &= ~(1 << SC_ABS_FUNC(sc));
6738
6739    /* clear the old value */
6740    val &= ~mask;
6741
6742    /* set the new one */
6743    val |= ((val1 << shift) & mask);
6744
6745    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6746    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6747    return (val1 != 0);
6748}
6749
6750/* send load requrest to mcp and analyze response */
6751static int
6752bxe_nic_load_request(struct bxe_softc *sc,
6753                     uint32_t         *load_code)
6754{
6755    /* init fw_seq */
6756    sc->fw_seq =
6757        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6758         DRV_MSG_SEQ_NUMBER_MASK);
6759
6760    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6761
6762    /* get the current FW pulse sequence */
6763    sc->fw_drv_pulse_wr_seq =
6764        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6765         DRV_PULSE_SEQ_MASK);
6766
6767    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6768          sc->fw_drv_pulse_wr_seq);
6769
6770    /* load request */
6771    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6772                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6773
6774    /* if the MCP fails to respond we must abort */
6775    if (!(*load_code)) {
6776        BLOGE(sc, "MCP response failure!\n");
6777        return (-1);
6778    }
6779
6780    /* if MCP refused then must abort */
6781    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6782        BLOGE(sc, "MCP refused load request\n");
6783        return (-1);
6784    }
6785
6786    return (0);
6787}
6788
6789/*
6790 * Check whether another PF has already loaded FW to chip. In virtualized
6791 * environments a pf from anoth VM may have already initialized the device
6792 * including loading FW.
6793 */
6794static int
6795bxe_nic_load_analyze_req(struct bxe_softc *sc,
6796                         uint32_t         load_code)
6797{
6798    uint32_t my_fw, loaded_fw;
6799
6800    /* is another pf loaded on this engine? */
6801    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6802        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6803        /* build my FW version dword */
6804        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6805                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6806                 (BCM_5710_FW_REVISION_VERSION << 16) +
6807                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6808
6809        /* read loaded FW from chip */
6810        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6811        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6812              loaded_fw, my_fw);
6813
6814        /* abort nic load if version mismatch */
6815        if (my_fw != loaded_fw) {
6816            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6817                  loaded_fw, my_fw);
6818            return (-1);
6819        }
6820    }
6821
6822    return (0);
6823}
6824
6825/* mark PMF if applicable */
6826static void
6827bxe_nic_load_pmf(struct bxe_softc *sc,
6828                 uint32_t         load_code)
6829{
6830    uint32_t ncsi_oem_data_addr;
6831
6832    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6833        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6834        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6835        /*
6836         * Barrier here for ordering between the writing to sc->port.pmf here
6837         * and reading it from the periodic task.
6838         */
6839        sc->port.pmf = 1;
6840        mb();
6841    } else {
6842        sc->port.pmf = 0;
6843    }
6844
6845    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6846
6847    /* XXX needed? */
6848    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6849        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6850            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6851            if (ncsi_oem_data_addr) {
6852                REG_WR(sc,
6853                       (ncsi_oem_data_addr +
6854                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6855                       0);
6856            }
6857        }
6858    }
6859}
6860
6861static void
6862bxe_read_mf_cfg(struct bxe_softc *sc)
6863{
6864    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6865    int abs_func;
6866    int vn;
6867
6868    if (BXE_NOMCP(sc)) {
6869        return; /* what should be the default bvalue in this case */
6870    }
6871
6872    /*
6873     * The formula for computing the absolute function number is...
6874     * For 2 port configuration (4 functions per port):
6875     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6876     * For 4 port configuration (2 functions per port):
6877     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6878     */
6879    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6880        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6881        if (abs_func >= E1H_FUNC_MAX) {
6882            break;
6883        }
6884        sc->devinfo.mf_info.mf_config[vn] =
6885            MFCFG_RD(sc, func_mf_config[abs_func].config);
6886    }
6887
6888    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6889        FUNC_MF_CFG_FUNC_DISABLED) {
6890        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6891        sc->flags |= BXE_MF_FUNC_DIS;
6892    } else {
6893        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6894        sc->flags &= ~BXE_MF_FUNC_DIS;
6895    }
6896}
6897
6898/* acquire split MCP access lock register */
6899static int bxe_acquire_alr(struct bxe_softc *sc)
6900{
6901    uint32_t j, val;
6902
6903    for (j = 0; j < 1000; j++) {
6904        val = (1UL << 31);
6905        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6906        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6907        if (val & (1L << 31))
6908            break;
6909
6910        DELAY(5000);
6911    }
6912
6913    if (!(val & (1L << 31))) {
6914        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6915        return (-1);
6916    }
6917
6918    return (0);
6919}
6920
6921/* release split MCP access lock register */
6922static void bxe_release_alr(struct bxe_softc *sc)
6923{
6924    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6925}
6926
6927static void
6928bxe_fan_failure(struct bxe_softc *sc)
6929{
6930    int port = SC_PORT(sc);
6931    uint32_t ext_phy_config;
6932
6933    /* mark the failure */
6934    ext_phy_config =
6935        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6936
6937    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6938    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6939    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6940             ext_phy_config);
6941
6942    /* log the failure */
6943    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6944              "the card to prevent permanent damage. "
6945              "Please contact OEM Support for assistance\n");
6946
6947    /* XXX */
6948#if 1
6949    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6950#else
6951    /*
6952     * Schedule device reset (unload)
6953     * This is due to some boards consuming sufficient power when driver is
6954     * up to overheat if fan fails.
6955     */
6956    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6957    schedule_delayed_work(&sc->sp_rtnl_task, 0);
6958#endif
6959}
6960
6961/* this function is called upon a link interrupt */
6962static void
6963bxe_link_attn(struct bxe_softc *sc)
6964{
6965    uint32_t pause_enabled = 0;
6966    struct host_port_stats *pstats;
6967    int cmng_fns;
6968
6969    /* Make sure that we are synced with the current statistics */
6970    bxe_stats_handle(sc, STATS_EVENT_STOP);
6971
6972    elink_link_update(&sc->link_params, &sc->link_vars);
6973
6974    if (sc->link_vars.link_up) {
6975
6976        /* dropless flow control */
6977        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
6978            pause_enabled = 0;
6979
6980            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
6981                pause_enabled = 1;
6982            }
6983
6984            REG_WR(sc,
6985                   (BAR_USTRORM_INTMEM +
6986                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
6987                   pause_enabled);
6988        }
6989
6990        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
6991            pstats = BXE_SP(sc, port_stats);
6992            /* reset old mac stats */
6993            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
6994        }
6995
6996        if (sc->state == BXE_STATE_OPEN) {
6997            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
6998        }
6999    }
7000
7001    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7002        cmng_fns = bxe_get_cmng_fns_mode(sc);
7003
7004        if (cmng_fns != CMNG_FNS_NONE) {
7005            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7006            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7007        } else {
7008            /* rate shaping and fairness are disabled */
7009            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7010        }
7011    }
7012
7013    bxe_link_report_locked(sc);
7014
7015    if (IS_MF(sc)) {
7016        ; // XXX bxe_link_sync_notify(sc);
7017    }
7018}
7019
7020static void
7021bxe_attn_int_asserted(struct bxe_softc *sc,
7022                      uint32_t         asserted)
7023{
7024    int port = SC_PORT(sc);
7025    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7026                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7027    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7028                                        NIG_REG_MASK_INTERRUPT_PORT0;
7029    uint32_t aeu_mask;
7030    uint32_t nig_mask = 0;
7031    uint32_t reg_addr;
7032    uint32_t igu_acked;
7033    uint32_t cnt;
7034
7035    if (sc->attn_state & asserted) {
7036        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7037    }
7038
7039    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7040
7041    aeu_mask = REG_RD(sc, aeu_addr);
7042
7043    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7044          aeu_mask, asserted);
7045
7046    aeu_mask &= ~(asserted & 0x3ff);
7047
7048    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7049
7050    REG_WR(sc, aeu_addr, aeu_mask);
7051
7052    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7053
7054    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7055    sc->attn_state |= asserted;
7056    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7057
7058    if (asserted & ATTN_HARD_WIRED_MASK) {
7059        if (asserted & ATTN_NIG_FOR_FUNC) {
7060
7061	    bxe_acquire_phy_lock(sc);
7062            /* save nig interrupt mask */
7063            nig_mask = REG_RD(sc, nig_int_mask_addr);
7064
7065            /* If nig_mask is not set, no need to call the update function */
7066            if (nig_mask) {
7067                REG_WR(sc, nig_int_mask_addr, 0);
7068
7069                bxe_link_attn(sc);
7070            }
7071
7072            /* handle unicore attn? */
7073        }
7074
7075        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7076            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7077        }
7078
7079        if (asserted & GPIO_2_FUNC) {
7080            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7081        }
7082
7083        if (asserted & GPIO_3_FUNC) {
7084            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7085        }
7086
7087        if (asserted & GPIO_4_FUNC) {
7088            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7089        }
7090
7091        if (port == 0) {
7092            if (asserted & ATTN_GENERAL_ATTN_1) {
7093                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7094                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7095            }
7096            if (asserted & ATTN_GENERAL_ATTN_2) {
7097                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7098                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7099            }
7100            if (asserted & ATTN_GENERAL_ATTN_3) {
7101                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7102                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7103            }
7104        } else {
7105            if (asserted & ATTN_GENERAL_ATTN_4) {
7106                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7107                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7108            }
7109            if (asserted & ATTN_GENERAL_ATTN_5) {
7110                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7111                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7112            }
7113            if (asserted & ATTN_GENERAL_ATTN_6) {
7114                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7115                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7116            }
7117        }
7118    } /* hardwired */
7119
7120    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7121        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7122    } else {
7123        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7124    }
7125
7126    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7127          asserted,
7128          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7129    REG_WR(sc, reg_addr, asserted);
7130
7131    /* now set back the mask */
7132    if (asserted & ATTN_NIG_FOR_FUNC) {
7133        /*
7134         * Verify that IGU ack through BAR was written before restoring
7135         * NIG mask. This loop should exit after 2-3 iterations max.
7136         */
7137        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7138            cnt = 0;
7139
7140            do {
7141                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7142            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7143                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7144
7145            if (!igu_acked) {
7146                BLOGE(sc, "Failed to verify IGU ack on time\n");
7147            }
7148
7149            mb();
7150        }
7151
7152        REG_WR(sc, nig_int_mask_addr, nig_mask);
7153
7154	bxe_release_phy_lock(sc);
7155    }
7156}
7157
7158static void
7159bxe_print_next_block(struct bxe_softc *sc,
7160                     int              idx,
7161                     const char       *blk)
7162{
7163    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7164}
7165
7166static int
7167bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7168                              uint32_t         sig,
7169                              int              par_num,
7170                              uint8_t          print)
7171{
7172    uint32_t cur_bit = 0;
7173    int i = 0;
7174
7175    for (i = 0; sig; i++) {
7176        cur_bit = ((uint32_t)0x1 << i);
7177        if (sig & cur_bit) {
7178            switch (cur_bit) {
7179            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7180                if (print)
7181                    bxe_print_next_block(sc, par_num++, "BRB");
7182                break;
7183            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7184                if (print)
7185                    bxe_print_next_block(sc, par_num++, "PARSER");
7186                break;
7187            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7188                if (print)
7189                    bxe_print_next_block(sc, par_num++, "TSDM");
7190                break;
7191            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7192                if (print)
7193                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7194                break;
7195            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7196                if (print)
7197                    bxe_print_next_block(sc, par_num++, "TCM");
7198                break;
7199            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7200                if (print)
7201                    bxe_print_next_block(sc, par_num++, "TSEMI");
7202                break;
7203            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7204                if (print)
7205                    bxe_print_next_block(sc, par_num++, "XPB");
7206                break;
7207            }
7208
7209            /* Clear the bit */
7210            sig &= ~cur_bit;
7211        }
7212    }
7213
7214    return (par_num);
7215}
7216
7217static int
7218bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7219                              uint32_t         sig,
7220                              int              par_num,
7221                              uint8_t          *global,
7222                              uint8_t          print)
7223{
7224    int i = 0;
7225    uint32_t cur_bit = 0;
7226    for (i = 0; sig; i++) {
7227        cur_bit = ((uint32_t)0x1 << i);
7228        if (sig & cur_bit) {
7229            switch (cur_bit) {
7230            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7231                if (print)
7232                    bxe_print_next_block(sc, par_num++, "PBF");
7233                break;
7234            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7235                if (print)
7236                    bxe_print_next_block(sc, par_num++, "QM");
7237                break;
7238            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7239                if (print)
7240                    bxe_print_next_block(sc, par_num++, "TM");
7241                break;
7242            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7243                if (print)
7244                    bxe_print_next_block(sc, par_num++, "XSDM");
7245                break;
7246            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7247                if (print)
7248                    bxe_print_next_block(sc, par_num++, "XCM");
7249                break;
7250            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7251                if (print)
7252                    bxe_print_next_block(sc, par_num++, "XSEMI");
7253                break;
7254            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7255                if (print)
7256                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7257                break;
7258            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7259                if (print)
7260                    bxe_print_next_block(sc, par_num++, "NIG");
7261                break;
7262            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7263                if (print)
7264                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7265                *global = TRUE;
7266                break;
7267            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7268                if (print)
7269                    bxe_print_next_block(sc, par_num++, "DEBUG");
7270                break;
7271            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7272                if (print)
7273                    bxe_print_next_block(sc, par_num++, "USDM");
7274                break;
7275            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7276                if (print)
7277                    bxe_print_next_block(sc, par_num++, "UCM");
7278                break;
7279            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7280                if (print)
7281                    bxe_print_next_block(sc, par_num++, "USEMI");
7282                break;
7283            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7284                if (print)
7285                    bxe_print_next_block(sc, par_num++, "UPB");
7286                break;
7287            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7288                if (print)
7289                    bxe_print_next_block(sc, par_num++, "CSDM");
7290                break;
7291            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7292                if (print)
7293                    bxe_print_next_block(sc, par_num++, "CCM");
7294                break;
7295            }
7296
7297            /* Clear the bit */
7298            sig &= ~cur_bit;
7299        }
7300    }
7301
7302    return (par_num);
7303}
7304
7305static int
7306bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7307                              uint32_t         sig,
7308                              int              par_num,
7309                              uint8_t          print)
7310{
7311    uint32_t cur_bit = 0;
7312    int i = 0;
7313
7314    for (i = 0; sig; i++) {
7315        cur_bit = ((uint32_t)0x1 << i);
7316        if (sig & cur_bit) {
7317            switch (cur_bit) {
7318            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7319                if (print)
7320                    bxe_print_next_block(sc, par_num++, "CSEMI");
7321                break;
7322            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7323                if (print)
7324                    bxe_print_next_block(sc, par_num++, "PXP");
7325                break;
7326            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7327                if (print)
7328                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7329                break;
7330            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7331                if (print)
7332                    bxe_print_next_block(sc, par_num++, "CFC");
7333                break;
7334            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7335                if (print)
7336                    bxe_print_next_block(sc, par_num++, "CDU");
7337                break;
7338            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7339                if (print)
7340                    bxe_print_next_block(sc, par_num++, "DMAE");
7341                break;
7342            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7343                if (print)
7344                    bxe_print_next_block(sc, par_num++, "IGU");
7345                break;
7346            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7347                if (print)
7348                    bxe_print_next_block(sc, par_num++, "MISC");
7349                break;
7350            }
7351
7352            /* Clear the bit */
7353            sig &= ~cur_bit;
7354        }
7355    }
7356
7357    return (par_num);
7358}
7359
7360static int
7361bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7362                              uint32_t         sig,
7363                              int              par_num,
7364                              uint8_t          *global,
7365                              uint8_t          print)
7366{
7367    uint32_t cur_bit = 0;
7368    int i = 0;
7369
7370    for (i = 0; sig; i++) {
7371        cur_bit = ((uint32_t)0x1 << i);
7372        if (sig & cur_bit) {
7373            switch (cur_bit) {
7374            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7375                if (print)
7376                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7377                *global = TRUE;
7378                break;
7379            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7380                if (print)
7381                    bxe_print_next_block(sc, par_num++,
7382                              "MCP UMP RX");
7383                *global = TRUE;
7384                break;
7385            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7386                if (print)
7387                    bxe_print_next_block(sc, par_num++,
7388                              "MCP UMP TX");
7389                *global = TRUE;
7390                break;
7391            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7392                if (print)
7393                    bxe_print_next_block(sc, par_num++,
7394                              "MCP SCPAD");
7395                *global = TRUE;
7396                break;
7397            }
7398
7399            /* Clear the bit */
7400            sig &= ~cur_bit;
7401        }
7402    }
7403
7404    return (par_num);
7405}
7406
7407static int
7408bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7409                              uint32_t         sig,
7410                              int              par_num,
7411                              uint8_t          print)
7412{
7413    uint32_t cur_bit = 0;
7414    int i = 0;
7415
7416    for (i = 0; sig; i++) {
7417        cur_bit = ((uint32_t)0x1 << i);
7418        if (sig & cur_bit) {
7419            switch (cur_bit) {
7420            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7421                if (print)
7422                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7423                break;
7424            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7425                if (print)
7426                    bxe_print_next_block(sc, par_num++, "ATC");
7427                break;
7428            }
7429
7430            /* Clear the bit */
7431            sig &= ~cur_bit;
7432        }
7433    }
7434
7435    return (par_num);
7436}
7437
7438static uint8_t
7439bxe_parity_attn(struct bxe_softc *sc,
7440                uint8_t          *global,
7441                uint8_t          print,
7442                uint32_t         *sig)
7443{
7444    int par_num = 0;
7445
7446    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7447        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7448        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7449        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7450        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7451        BLOGE(sc, "Parity error: HW block parity attention:\n"
7452                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7453              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7454              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7455              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7456              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7457              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7458
7459        if (print)
7460            BLOGI(sc, "Parity errors detected in blocks: ");
7461
7462        par_num =
7463            bxe_check_blocks_with_parity0(sc, sig[0] &
7464                                          HW_PRTY_ASSERT_SET_0,
7465                                          par_num, print);
7466        par_num =
7467            bxe_check_blocks_with_parity1(sc, sig[1] &
7468                                          HW_PRTY_ASSERT_SET_1,
7469                                          par_num, global, print);
7470        par_num =
7471            bxe_check_blocks_with_parity2(sc, sig[2] &
7472                                          HW_PRTY_ASSERT_SET_2,
7473                                          par_num, print);
7474        par_num =
7475            bxe_check_blocks_with_parity3(sc, sig[3] &
7476                                          HW_PRTY_ASSERT_SET_3,
7477                                          par_num, global, print);
7478        par_num =
7479            bxe_check_blocks_with_parity4(sc, sig[4] &
7480                                          HW_PRTY_ASSERT_SET_4,
7481                                          par_num, print);
7482
7483        if (print)
7484            BLOGI(sc, "\n");
7485
7486        return (TRUE);
7487    }
7488
7489    return (FALSE);
7490}
7491
7492static uint8_t
7493bxe_chk_parity_attn(struct bxe_softc *sc,
7494                    uint8_t          *global,
7495                    uint8_t          print)
7496{
7497    struct attn_route attn = { {0} };
7498    int port = SC_PORT(sc);
7499
7500    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7501    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7502    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7503    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7504
7505    /*
7506     * Since MCP attentions can't be disabled inside the block, we need to
7507     * read AEU registers to see whether they're currently disabled
7508     */
7509    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7510                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7511                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7512                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7513
7514
7515    if (!CHIP_IS_E1x(sc))
7516        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7517
7518    return (bxe_parity_attn(sc, global, print, attn.sig));
7519}
7520
7521static void
7522bxe_attn_int_deasserted4(struct bxe_softc *sc,
7523                         uint32_t         attn)
7524{
7525    uint32_t val;
7526
7527    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7528        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7529        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7530        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7531            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7532        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7533            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7534        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7535            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7536        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7537            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7538        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7539            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7540        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7541            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7542        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7543            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7544        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7545            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7546        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7547            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7548    }
7549
7550    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7551        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7552        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7553        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7554            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7555        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7556            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7557        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7558            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7559        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7560            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7561        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7562            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7563        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7564            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7565    }
7566
7567    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7568                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7569        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7570              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7571                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7572    }
7573}
7574
7575static void
7576bxe_e1h_disable(struct bxe_softc *sc)
7577{
7578    int port = SC_PORT(sc);
7579
7580    bxe_tx_disable(sc);
7581
7582    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7583}
7584
7585static void
7586bxe_e1h_enable(struct bxe_softc *sc)
7587{
7588    int port = SC_PORT(sc);
7589
7590    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7591
7592    // XXX bxe_tx_enable(sc);
7593}
7594
7595/*
7596 * called due to MCP event (on pmf):
7597 *   reread new bandwidth configuration
7598 *   configure FW
7599 *   notify others function about the change
7600 */
7601static void
7602bxe_config_mf_bw(struct bxe_softc *sc)
7603{
7604    if (sc->link_vars.link_up) {
7605        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7606        // XXX bxe_link_sync_notify(sc);
7607    }
7608
7609    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7610}
7611
7612static void
7613bxe_set_mf_bw(struct bxe_softc *sc)
7614{
7615    bxe_config_mf_bw(sc);
7616    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7617}
7618
7619static void
7620bxe_handle_eee_event(struct bxe_softc *sc)
7621{
7622    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7623    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7624}
7625
7626#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7627
7628static void
7629bxe_drv_info_ether_stat(struct bxe_softc *sc)
7630{
7631    struct eth_stats_info *ether_stat =
7632        &sc->sp->drv_info_to_mcp.ether_stat;
7633
7634    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7635            ETH_STAT_INFO_VERSION_LEN);
7636
7637    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7638    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7639                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7640                                          ether_stat->mac_local + MAC_PAD,
7641                                          MAC_PAD, ETH_ALEN);
7642
7643    ether_stat->mtu_size = sc->mtu;
7644
7645    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7646    if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
7647        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7648    }
7649
7650    // XXX ether_stat->feature_flags |= ???;
7651
7652    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7653
7654    ether_stat->txq_size = sc->tx_ring_size;
7655    ether_stat->rxq_size = sc->rx_ring_size;
7656}
7657
7658static void
7659bxe_handle_drv_info_req(struct bxe_softc *sc)
7660{
7661    enum drv_info_opcode op_code;
7662    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7663
7664    /* if drv_info version supported by MFW doesn't match - send NACK */
7665    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7666        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7667        return;
7668    }
7669
7670    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7671               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7672
7673    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7674
7675    switch (op_code) {
7676    case ETH_STATS_OPCODE:
7677        bxe_drv_info_ether_stat(sc);
7678        break;
7679    case FCOE_STATS_OPCODE:
7680    case ISCSI_STATS_OPCODE:
7681    default:
7682        /* if op code isn't supported - send NACK */
7683        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7684        return;
7685    }
7686
7687    /*
7688     * If we got drv_info attn from MFW then these fields are defined in
7689     * shmem2 for sure
7690     */
7691    SHMEM2_WR(sc, drv_info_host_addr_lo,
7692              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7693    SHMEM2_WR(sc, drv_info_host_addr_hi,
7694              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7695
7696    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7697}
7698
7699static void
7700bxe_dcc_event(struct bxe_softc *sc,
7701              uint32_t         dcc_event)
7702{
7703    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7704
7705    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7706        /*
7707         * This is the only place besides the function initialization
7708         * where the sc->flags can change so it is done without any
7709         * locks
7710         */
7711        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7712            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7713            sc->flags |= BXE_MF_FUNC_DIS;
7714            bxe_e1h_disable(sc);
7715        } else {
7716            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7717            sc->flags &= ~BXE_MF_FUNC_DIS;
7718            bxe_e1h_enable(sc);
7719        }
7720        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7721    }
7722
7723    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7724        bxe_config_mf_bw(sc);
7725        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7726    }
7727
7728    /* Report results to MCP */
7729    if (dcc_event)
7730        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7731    else
7732        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7733}
7734
7735static void
7736bxe_pmf_update(struct bxe_softc *sc)
7737{
7738    int port = SC_PORT(sc);
7739    uint32_t val;
7740
7741    sc->port.pmf = 1;
7742    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7743
7744    /*
7745     * We need the mb() to ensure the ordering between the writing to
7746     * sc->port.pmf here and reading it from the bxe_periodic_task().
7747     */
7748    mb();
7749
7750    /* queue a periodic task */
7751    // XXX schedule task...
7752
7753    // XXX bxe_dcbx_pmf_update(sc);
7754
7755    /* enable nig attention */
7756    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7757    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7758        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7759        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7760    } else if (!CHIP_IS_E1x(sc)) {
7761        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7762        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7763    }
7764
7765    bxe_stats_handle(sc, STATS_EVENT_PMF);
7766}
7767
7768static int
7769bxe_mc_assert(struct bxe_softc *sc)
7770{
7771    char last_idx;
7772    int i, rc = 0;
7773    uint32_t row0, row1, row2, row3;
7774
7775    /* XSTORM */
7776    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7777    if (last_idx)
7778        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7779
7780    /* print the asserts */
7781    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7782
7783        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7784        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7785        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7786        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7787
7788        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7789            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7790                  i, row3, row2, row1, row0);
7791            rc++;
7792        } else {
7793            break;
7794        }
7795    }
7796
7797    /* TSTORM */
7798    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7799    if (last_idx) {
7800        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7801    }
7802
7803    /* print the asserts */
7804    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7805
7806        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7807        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7808        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7809        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7810
7811        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7812            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7813                  i, row3, row2, row1, row0);
7814            rc++;
7815        } else {
7816            break;
7817        }
7818    }
7819
7820    /* CSTORM */
7821    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7822    if (last_idx) {
7823        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7824    }
7825
7826    /* print the asserts */
7827    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7828
7829        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7830        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7831        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7832        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7833
7834        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7835            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7836                  i, row3, row2, row1, row0);
7837            rc++;
7838        } else {
7839            break;
7840        }
7841    }
7842
7843    /* USTORM */
7844    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7845    if (last_idx) {
7846        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7847    }
7848
7849    /* print the asserts */
7850    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7851
7852        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7853        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7854        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7855        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7856
7857        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7858            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7859                  i, row3, row2, row1, row0);
7860            rc++;
7861        } else {
7862            break;
7863        }
7864    }
7865
7866    return (rc);
7867}
7868
7869static void
7870bxe_attn_int_deasserted3(struct bxe_softc *sc,
7871                         uint32_t         attn)
7872{
7873    int func = SC_FUNC(sc);
7874    uint32_t val;
7875
7876    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7877
7878        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7879
7880            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7881            bxe_read_mf_cfg(sc);
7882            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7883                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7884            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7885
7886            if (val & DRV_STATUS_DCC_EVENT_MASK)
7887                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7888
7889            if (val & DRV_STATUS_SET_MF_BW)
7890                bxe_set_mf_bw(sc);
7891
7892            if (val & DRV_STATUS_DRV_INFO_REQ)
7893                bxe_handle_drv_info_req(sc);
7894
7895            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7896                bxe_pmf_update(sc);
7897
7898            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7899                bxe_handle_eee_event(sc);
7900
7901            if (sc->link_vars.periodic_flags &
7902                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7903                /* sync with link */
7904		bxe_acquire_phy_lock(sc);
7905                sc->link_vars.periodic_flags &=
7906                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7907		bxe_release_phy_lock(sc);
7908                if (IS_MF(sc))
7909                    ; // XXX bxe_link_sync_notify(sc);
7910                bxe_link_report(sc);
7911            }
7912
7913            /*
7914             * Always call it here: bxe_link_report() will
7915             * prevent the link indication duplication.
7916             */
7917            bxe_link_status_update(sc);
7918
7919        } else if (attn & BXE_MC_ASSERT_BITS) {
7920
7921            BLOGE(sc, "MC assert!\n");
7922            bxe_mc_assert(sc);
7923            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7924            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7925            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7926            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7927            bxe_panic(sc, ("MC assert!\n"));
7928
7929        } else if (attn & BXE_MCP_ASSERT) {
7930
7931            BLOGE(sc, "MCP assert!\n");
7932            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7933            // XXX bxe_fw_dump(sc);
7934
7935        } else {
7936            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
7937        }
7938    }
7939
7940    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
7941        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
7942        if (attn & BXE_GRC_TIMEOUT) {
7943            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
7944            BLOGE(sc, "GRC time-out 0x%08x\n", val);
7945        }
7946        if (attn & BXE_GRC_RSV) {
7947            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
7948            BLOGE(sc, "GRC reserved 0x%08x\n", val);
7949        }
7950        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
7951    }
7952}
7953
7954static void
7955bxe_attn_int_deasserted2(struct bxe_softc *sc,
7956                         uint32_t         attn)
7957{
7958    int port = SC_PORT(sc);
7959    int reg_offset;
7960    uint32_t val0, mask0, val1, mask1;
7961    uint32_t val;
7962
7963    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
7964        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
7965        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
7966        /* CFC error attention */
7967        if (val & 0x2) {
7968            BLOGE(sc, "FATAL error from CFC\n");
7969        }
7970    }
7971
7972    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
7973        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
7974        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
7975        /* RQ_USDMDP_FIFO_OVERFLOW */
7976        if (val & 0x18000) {
7977            BLOGE(sc, "FATAL error from PXP\n");
7978        }
7979
7980        if (!CHIP_IS_E1x(sc)) {
7981            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
7982            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
7983        }
7984    }
7985
7986#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
7987#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
7988
7989    if (attn & AEU_PXP2_HW_INT_BIT) {
7990        /*  CQ47854 workaround do not panic on
7991         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
7992         */
7993        if (!CHIP_IS_E1x(sc)) {
7994            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
7995            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
7996            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
7997            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
7998            /*
7999             * If the olny PXP2_EOP_ERROR_BIT is set in
8000             * STS0 and STS1 - clear it
8001             *
8002             * probably we lose additional attentions between
8003             * STS0 and STS_CLR0, in this case user will not
8004             * be notified about them
8005             */
8006            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8007                !(val1 & mask1))
8008                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8009
8010            /* print the register, since no one can restore it */
8011            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8012
8013            /*
8014             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8015             * then notify
8016             */
8017            if (val0 & PXP2_EOP_ERROR_BIT) {
8018                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8019
8020                /*
8021                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8022                 * set then clear attention from PXP2 block without panic
8023                 */
8024                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8025                    ((val1 & mask1) == 0))
8026                    attn &= ~AEU_PXP2_HW_INT_BIT;
8027            }
8028        }
8029    }
8030
8031    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8032        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8033                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8034
8035        val = REG_RD(sc, reg_offset);
8036        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8037        REG_WR(sc, reg_offset, val);
8038
8039        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8040              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8041        bxe_panic(sc, ("HW block attention set2\n"));
8042    }
8043}
8044
8045static void
8046bxe_attn_int_deasserted1(struct bxe_softc *sc,
8047                         uint32_t         attn)
8048{
8049    int port = SC_PORT(sc);
8050    int reg_offset;
8051    uint32_t val;
8052
8053    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8054        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8055        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8056        /* DORQ discard attention */
8057        if (val & 0x2) {
8058            BLOGE(sc, "FATAL error from DORQ\n");
8059        }
8060    }
8061
8062    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8063        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8064                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8065
8066        val = REG_RD(sc, reg_offset);
8067        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8068        REG_WR(sc, reg_offset, val);
8069
8070        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8071              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8072        bxe_panic(sc, ("HW block attention set1\n"));
8073    }
8074}
8075
8076static void
8077bxe_attn_int_deasserted0(struct bxe_softc *sc,
8078                         uint32_t         attn)
8079{
8080    int port = SC_PORT(sc);
8081    int reg_offset;
8082    uint32_t val;
8083
8084    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8085                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8086
8087    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8088        val = REG_RD(sc, reg_offset);
8089        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8090        REG_WR(sc, reg_offset, val);
8091
8092        BLOGW(sc, "SPIO5 hw attention\n");
8093
8094        /* Fan failure attention */
8095        elink_hw_reset_phy(&sc->link_params);
8096        bxe_fan_failure(sc);
8097    }
8098
8099    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8100	bxe_acquire_phy_lock(sc);
8101        elink_handle_module_detect_int(&sc->link_params);
8102	bxe_release_phy_lock(sc);
8103    }
8104
8105    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8106        val = REG_RD(sc, reg_offset);
8107        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8108        REG_WR(sc, reg_offset, val);
8109
8110        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8111                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8112    }
8113}
8114
8115static void
8116bxe_attn_int_deasserted(struct bxe_softc *sc,
8117                        uint32_t         deasserted)
8118{
8119    struct attn_route attn;
8120    struct attn_route *group_mask;
8121    int port = SC_PORT(sc);
8122    int index;
8123    uint32_t reg_addr;
8124    uint32_t val;
8125    uint32_t aeu_mask;
8126    uint8_t global = FALSE;
8127
8128    /*
8129     * Need to take HW lock because MCP or other port might also
8130     * try to handle this event.
8131     */
8132    bxe_acquire_alr(sc);
8133
8134    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8135        /* XXX
8136         * In case of parity errors don't handle attentions so that
8137         * other function would "see" parity errors.
8138         */
8139        sc->recovery_state = BXE_RECOVERY_INIT;
8140        // XXX schedule a recovery task...
8141        /* disable HW interrupts */
8142        bxe_int_disable(sc);
8143        bxe_release_alr(sc);
8144        return;
8145    }
8146
8147    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8148    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8149    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8150    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8151    if (!CHIP_IS_E1x(sc)) {
8152        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8153    } else {
8154        attn.sig[4] = 0;
8155    }
8156
8157    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8158          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8159
8160    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8161        if (deasserted & (1 << index)) {
8162            group_mask = &sc->attn_group[index];
8163
8164            BLOGD(sc, DBG_INTR,
8165                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8166                  group_mask->sig[0], group_mask->sig[1],
8167                  group_mask->sig[2], group_mask->sig[3],
8168                  group_mask->sig[4]);
8169
8170            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8171            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8172            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8173            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8174            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8175        }
8176    }
8177
8178    bxe_release_alr(sc);
8179
8180    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8181        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8182                    COMMAND_REG_ATTN_BITS_CLR);
8183    } else {
8184        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8185    }
8186
8187    val = ~deasserted;
8188    BLOGD(sc, DBG_INTR,
8189          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8190          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8191    REG_WR(sc, reg_addr, val);
8192
8193    if (~sc->attn_state & deasserted) {
8194        BLOGE(sc, "IGU error\n");
8195    }
8196
8197    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8198                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8199
8200    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8201
8202    aeu_mask = REG_RD(sc, reg_addr);
8203
8204    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8205          aeu_mask, deasserted);
8206    aeu_mask |= (deasserted & 0x3ff);
8207    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8208
8209    REG_WR(sc, reg_addr, aeu_mask);
8210    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8211
8212    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8213    sc->attn_state &= ~deasserted;
8214    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8215}
8216
8217static void
8218bxe_attn_int(struct bxe_softc *sc)
8219{
8220    /* read local copy of bits */
8221    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8222    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8223    uint32_t attn_state = sc->attn_state;
8224
8225    /* look for changed bits */
8226    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8227    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8228
8229    BLOGD(sc, DBG_INTR,
8230          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8231          attn_bits, attn_ack, asserted, deasserted);
8232
8233    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8234        BLOGE(sc, "BAD attention state\n");
8235    }
8236
8237    /* handle bits that were raised */
8238    if (asserted) {
8239        bxe_attn_int_asserted(sc, asserted);
8240    }
8241
8242    if (deasserted) {
8243        bxe_attn_int_deasserted(sc, deasserted);
8244    }
8245}
8246
8247static uint16_t
8248bxe_update_dsb_idx(struct bxe_softc *sc)
8249{
8250    struct host_sp_status_block *def_sb = sc->def_sb;
8251    uint16_t rc = 0;
8252
8253    mb(); /* status block is written to by the chip */
8254
8255    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8256        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8257        rc |= BXE_DEF_SB_ATT_IDX;
8258    }
8259
8260    if (sc->def_idx != def_sb->sp_sb.running_index) {
8261        sc->def_idx = def_sb->sp_sb.running_index;
8262        rc |= BXE_DEF_SB_IDX;
8263    }
8264
8265    mb();
8266
8267    return (rc);
8268}
8269
8270static inline struct ecore_queue_sp_obj *
8271bxe_cid_to_q_obj(struct bxe_softc *sc,
8272                 uint32_t         cid)
8273{
8274    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8275    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8276}
8277
8278static void
8279bxe_handle_mcast_eqe(struct bxe_softc *sc)
8280{
8281    struct ecore_mcast_ramrod_params rparam;
8282    int rc;
8283
8284    memset(&rparam, 0, sizeof(rparam));
8285
8286    rparam.mcast_obj = &sc->mcast_obj;
8287
8288    BXE_MCAST_LOCK(sc);
8289
8290    /* clear pending state for the last command */
8291    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8292
8293    /* if there are pending mcast commands - send them */
8294    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8295        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8296        if (rc < 0) {
8297            BLOGD(sc, DBG_SP,
8298                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8299        }
8300    }
8301
8302    BXE_MCAST_UNLOCK(sc);
8303}
8304
8305static void
8306bxe_handle_classification_eqe(struct bxe_softc      *sc,
8307                              union event_ring_elem *elem)
8308{
8309    unsigned long ramrod_flags = 0;
8310    int rc = 0;
8311    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8312    struct ecore_vlan_mac_obj *vlan_mac_obj;
8313
8314    /* always push next commands out, don't wait here */
8315    bit_set(&ramrod_flags, RAMROD_CONT);
8316
8317    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8318    case ECORE_FILTER_MAC_PENDING:
8319        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8320        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8321        break;
8322
8323    case ECORE_FILTER_MCAST_PENDING:
8324        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8325        /*
8326         * This is only relevant for 57710 where multicast MACs are
8327         * configured as unicast MACs using the same ramrod.
8328         */
8329        bxe_handle_mcast_eqe(sc);
8330        return;
8331
8332    default:
8333        BLOGE(sc, "Unsupported classification command: %d\n",
8334              elem->message.data.eth_event.echo);
8335        return;
8336    }
8337
8338    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8339
8340    if (rc < 0) {
8341        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8342    } else if (rc > 0) {
8343        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8344    }
8345}
8346
8347static void
8348bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8349                       union event_ring_elem *elem)
8350{
8351    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8352
8353    /* send rx_mode command again if was requested */
8354    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8355                               &sc->sp_state)) {
8356        bxe_set_storm_rx_mode(sc);
8357    }
8358}
8359
8360static void
8361bxe_update_eq_prod(struct bxe_softc *sc,
8362                   uint16_t         prod)
8363{
8364    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8365    wmb(); /* keep prod updates ordered */
8366}
8367
8368static void
8369bxe_eq_int(struct bxe_softc *sc)
8370{
8371    uint16_t hw_cons, sw_cons, sw_prod;
8372    union event_ring_elem *elem;
8373    uint8_t echo;
8374    uint32_t cid;
8375    uint8_t opcode;
8376    int spqe_cnt = 0;
8377    struct ecore_queue_sp_obj *q_obj;
8378    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8379    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8380
8381    hw_cons = le16toh(*sc->eq_cons_sb);
8382
8383    /*
8384     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8385     * when we get to the next-page we need to adjust so the loop
8386     * condition below will be met. The next element is the size of a
8387     * regular element and hence incrementing by 1
8388     */
8389    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8390        hw_cons++;
8391    }
8392
8393    /*
8394     * This function may never run in parallel with itself for a
8395     * specific sc and no need for a read memory barrier here.
8396     */
8397    sw_cons = sc->eq_cons;
8398    sw_prod = sc->eq_prod;
8399
8400    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8401          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8402
8403    for (;
8404         sw_cons != hw_cons;
8405         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8406
8407        elem = &sc->eq[EQ_DESC(sw_cons)];
8408
8409        /* elem CID originates from FW, actually LE */
8410        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8411        opcode = elem->message.opcode;
8412
8413        /* handle eq element */
8414        switch (opcode) {
8415
8416        case EVENT_RING_OPCODE_STAT_QUERY:
8417            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8418                  sc->stats_comp++);
8419            /* nothing to do with stats comp */
8420            goto next_spqe;
8421
8422        case EVENT_RING_OPCODE_CFC_DEL:
8423            /* handle according to cid range */
8424            /* we may want to verify here that the sc state is HALTING */
8425            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8426            q_obj = bxe_cid_to_q_obj(sc, cid);
8427            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8428                break;
8429            }
8430            goto next_spqe;
8431
8432        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8433            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8434            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8435                break;
8436            }
8437            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8438            goto next_spqe;
8439
8440        case EVENT_RING_OPCODE_START_TRAFFIC:
8441            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8442            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8443                break;
8444            }
8445            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8446            goto next_spqe;
8447
8448        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8449            echo = elem->message.data.function_update_event.echo;
8450            if (echo == SWITCH_UPDATE) {
8451                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8452                if (f_obj->complete_cmd(sc, f_obj,
8453                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8454                    break;
8455                }
8456            }
8457            else {
8458                BLOGD(sc, DBG_SP,
8459                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8460            }
8461            goto next_spqe;
8462
8463        case EVENT_RING_OPCODE_FORWARD_SETUP:
8464            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8465            if (q_obj->complete_cmd(sc, q_obj,
8466                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8467                break;
8468            }
8469            goto next_spqe;
8470
8471        case EVENT_RING_OPCODE_FUNCTION_START:
8472            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8473            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8474                break;
8475            }
8476            goto next_spqe;
8477
8478        case EVENT_RING_OPCODE_FUNCTION_STOP:
8479            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8480            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8481                break;
8482            }
8483            goto next_spqe;
8484        }
8485
8486        switch (opcode | sc->state) {
8487        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8488        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8489            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8490            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8491            rss_raw->clear_pending(rss_raw);
8492            break;
8493
8494        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8495        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8496        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8497        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8498        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8499        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8500            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8501            bxe_handle_classification_eqe(sc, elem);
8502            break;
8503
8504        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8505        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8506        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8507            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8508            bxe_handle_mcast_eqe(sc);
8509            break;
8510
8511        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8512        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8513        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8514            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8515            bxe_handle_rx_mode_eqe(sc, elem);
8516            break;
8517
8518        default:
8519            /* unknown event log error and continue */
8520            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8521                  elem->message.opcode, sc->state);
8522        }
8523
8524next_spqe:
8525        spqe_cnt++;
8526    } /* for */
8527
8528    mb();
8529    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8530
8531    sc->eq_cons = sw_cons;
8532    sc->eq_prod = sw_prod;
8533
8534    /* make sure that above mem writes were issued towards the memory */
8535    wmb();
8536
8537    /* update producer */
8538    bxe_update_eq_prod(sc, sc->eq_prod);
8539}
8540
8541static void
8542bxe_handle_sp_tq(void *context,
8543                 int  pending)
8544{
8545    struct bxe_softc *sc = (struct bxe_softc *)context;
8546    uint16_t status;
8547
8548    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8549
8550    /* what work needs to be performed? */
8551    status = bxe_update_dsb_idx(sc);
8552
8553    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8554
8555    /* HW attentions */
8556    if (status & BXE_DEF_SB_ATT_IDX) {
8557        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8558        bxe_attn_int(sc);
8559        status &= ~BXE_DEF_SB_ATT_IDX;
8560    }
8561
8562    /* SP events: STAT_QUERY and others */
8563    if (status & BXE_DEF_SB_IDX) {
8564        /* handle EQ completions */
8565        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8566        bxe_eq_int(sc);
8567        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8568                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8569        status &= ~BXE_DEF_SB_IDX;
8570    }
8571
8572    /* if status is non zero then something went wrong */
8573    if (__predict_false(status)) {
8574        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8575    }
8576
8577    /* ack status block only if something was actually handled */
8578    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8579               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8580
8581    /*
8582     * Must be called after the EQ processing (since eq leads to sriov
8583     * ramrod completion flows).
8584     * This flow may have been scheduled by the arrival of a ramrod
8585     * completion, or by the sriov code rescheduling itself.
8586     */
8587    // XXX bxe_iov_sp_task(sc);
8588
8589}
8590
8591static void
8592bxe_handle_fp_tq(void *context,
8593                 int  pending)
8594{
8595    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8596    struct bxe_softc *sc = fp->sc;
8597    uint8_t more_tx = FALSE;
8598    uint8_t more_rx = FALSE;
8599
8600    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8601
8602    /* XXX
8603     * IFF_DRV_RUNNING state can't be checked here since we process
8604     * slowpath events on a client queue during setup. Instead
8605     * we need to add a "process/continue" flag here that the driver
8606     * can use to tell the task here not to do anything.
8607     */
8608#if 0
8609    if (!(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) {
8610        return;
8611    }
8612#endif
8613
8614    /* update the fastpath index */
8615    bxe_update_fp_sb_idx(fp);
8616
8617    /* XXX add loop here if ever support multiple tx CoS */
8618    /* fp->txdata[cos] */
8619    if (bxe_has_tx_work(fp)) {
8620        BXE_FP_TX_LOCK(fp);
8621        more_tx = bxe_txeof(sc, fp);
8622        BXE_FP_TX_UNLOCK(fp);
8623    }
8624
8625    if (bxe_has_rx_work(fp)) {
8626        more_rx = bxe_rxeof(sc, fp);
8627    }
8628
8629    if (more_rx /*|| more_tx*/) {
8630        /* still more work to do */
8631        taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
8632        return;
8633    }
8634
8635    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8636               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8637}
8638
8639static void
8640bxe_task_fp(struct bxe_fastpath *fp)
8641{
8642    struct bxe_softc *sc = fp->sc;
8643    uint8_t more_tx = FALSE;
8644    uint8_t more_rx = FALSE;
8645
8646    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8647
8648    /* update the fastpath index */
8649    bxe_update_fp_sb_idx(fp);
8650
8651    /* XXX add loop here if ever support multiple tx CoS */
8652    /* fp->txdata[cos] */
8653    if (bxe_has_tx_work(fp)) {
8654        BXE_FP_TX_LOCK(fp);
8655        more_tx = bxe_txeof(sc, fp);
8656        BXE_FP_TX_UNLOCK(fp);
8657    }
8658
8659    if (bxe_has_rx_work(fp)) {
8660        more_rx = bxe_rxeof(sc, fp);
8661    }
8662
8663    if (more_rx /*|| more_tx*/) {
8664        /* still more work to do, bail out if this ISR and process later */
8665        taskqueue_enqueue_fast(fp->tq, &fp->tq_task);
8666        return;
8667    }
8668
8669    /*
8670     * Here we write the fastpath index taken before doing any tx or rx work.
8671     * It is very well possible other hw events occurred up to this point and
8672     * they were actually processed accordingly above. Since we're going to
8673     * write an older fastpath index, an interrupt is coming which we might
8674     * not do any work in.
8675     */
8676    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8677               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8678}
8679
8680/*
8681 * Legacy interrupt entry point.
8682 *
8683 * Verifies that the controller generated the interrupt and
8684 * then calls a separate routine to handle the various
8685 * interrupt causes: link, RX, and TX.
8686 */
8687static void
8688bxe_intr_legacy(void *xsc)
8689{
8690    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8691    struct bxe_fastpath *fp;
8692    uint16_t status, mask;
8693    int i;
8694
8695    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8696
8697    /*
8698     * 0 for ustorm, 1 for cstorm
8699     * the bits returned from ack_int() are 0-15
8700     * bit 0 = attention status block
8701     * bit 1 = fast path status block
8702     * a mask of 0x2 or more = tx/rx event
8703     * a mask of 1 = slow path event
8704     */
8705
8706    status = bxe_ack_int(sc);
8707
8708    /* the interrupt is not for us */
8709    if (__predict_false(status == 0)) {
8710        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8711        return;
8712    }
8713
8714    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8715
8716    FOR_EACH_ETH_QUEUE(sc, i) {
8717        fp = &sc->fp[i];
8718        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8719        if (status & mask) {
8720            /* acknowledge and disable further fastpath interrupts */
8721            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8722            bxe_task_fp(fp);
8723            status &= ~mask;
8724        }
8725    }
8726
8727    if (__predict_false(status & 0x1)) {
8728        /* acknowledge and disable further slowpath interrupts */
8729        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8730
8731        /* schedule slowpath handler */
8732        taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
8733
8734        status &= ~0x1;
8735    }
8736
8737    if (__predict_false(status)) {
8738        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8739    }
8740}
8741
8742/* slowpath interrupt entry point */
8743static void
8744bxe_intr_sp(void *xsc)
8745{
8746    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8747
8748    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8749
8750    /* acknowledge and disable further slowpath interrupts */
8751    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8752
8753    /* schedule slowpath handler */
8754    taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task);
8755}
8756
8757/* fastpath interrupt entry point */
8758static void
8759bxe_intr_fp(void *xfp)
8760{
8761    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8762    struct bxe_softc *sc = fp->sc;
8763
8764    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8765
8766    BLOGD(sc, DBG_INTR,
8767          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8768          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8769
8770    /* acknowledge and disable further fastpath interrupts */
8771    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8772
8773    bxe_task_fp(fp);
8774}
8775
8776/* Release all interrupts allocated by the driver. */
8777static void
8778bxe_interrupt_free(struct bxe_softc *sc)
8779{
8780    int i;
8781
8782    switch (sc->interrupt_mode) {
8783    case INTR_MODE_INTX:
8784        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8785        if (sc->intr[0].resource != NULL) {
8786            bus_release_resource(sc->dev,
8787                                 SYS_RES_IRQ,
8788                                 sc->intr[0].rid,
8789                                 sc->intr[0].resource);
8790        }
8791        break;
8792    case INTR_MODE_MSI:
8793        for (i = 0; i < sc->intr_count; i++) {
8794            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8795            if (sc->intr[i].resource && sc->intr[i].rid) {
8796                bus_release_resource(sc->dev,
8797                                     SYS_RES_IRQ,
8798                                     sc->intr[i].rid,
8799                                     sc->intr[i].resource);
8800            }
8801        }
8802        pci_release_msi(sc->dev);
8803        break;
8804    case INTR_MODE_MSIX:
8805        for (i = 0; i < sc->intr_count; i++) {
8806            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8807            if (sc->intr[i].resource && sc->intr[i].rid) {
8808                bus_release_resource(sc->dev,
8809                                     SYS_RES_IRQ,
8810                                     sc->intr[i].rid,
8811                                     sc->intr[i].resource);
8812            }
8813        }
8814        pci_release_msi(sc->dev);
8815        break;
8816    default:
8817        /* nothing to do as initial allocation failed */
8818        break;
8819    }
8820}
8821
8822/*
8823 * This function determines and allocates the appropriate
8824 * interrupt based on system capabilites and user request.
8825 *
8826 * The user may force a particular interrupt mode, specify
8827 * the number of receive queues, specify the method for
8828 * distribuitng received frames to receive queues, or use
8829 * the default settings which will automatically select the
8830 * best supported combination.  In addition, the OS may or
8831 * may not support certain combinations of these settings.
8832 * This routine attempts to reconcile the settings requested
8833 * by the user with the capabilites available from the system
8834 * to select the optimal combination of features.
8835 *
8836 * Returns:
8837 *   0 = Success, !0 = Failure.
8838 */
8839static int
8840bxe_interrupt_alloc(struct bxe_softc *sc)
8841{
8842    int msix_count = 0;
8843    int msi_count = 0;
8844    int num_requested = 0;
8845    int num_allocated = 0;
8846    int rid, i, j;
8847    int rc;
8848
8849    /* get the number of available MSI/MSI-X interrupts from the OS */
8850    if (sc->interrupt_mode > 0) {
8851        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8852            msix_count = pci_msix_count(sc->dev);
8853        }
8854
8855        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8856            msi_count = pci_msi_count(sc->dev);
8857        }
8858
8859        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8860              msi_count, msix_count);
8861    }
8862
8863    do { /* try allocating MSI-X interrupt resources (at least 2) */
8864        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8865            break;
8866        }
8867
8868        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8869            (msix_count < 2)) {
8870            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8871            break;
8872        }
8873
8874        /* ask for the necessary number of MSI-X vectors */
8875        num_requested = min((sc->num_queues + 1), msix_count);
8876
8877        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8878
8879        num_allocated = num_requested;
8880        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8881            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8882            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8883            break;
8884        }
8885
8886        if (num_allocated < 2) { /* possible? */
8887            BLOGE(sc, "MSI-X allocation less than 2!\n");
8888            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8889            pci_release_msi(sc->dev);
8890            break;
8891        }
8892
8893        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8894              num_requested, num_allocated);
8895
8896        /* best effort so use the number of vectors allocated to us */
8897        sc->intr_count = num_allocated;
8898        sc->num_queues = num_allocated - 1;
8899
8900        rid = 1; /* initial resource identifier */
8901
8902        /* allocate the MSI-X vectors */
8903        for (i = 0; i < num_allocated; i++) {
8904            sc->intr[i].rid = (rid + i);
8905
8906            if ((sc->intr[i].resource =
8907                 bus_alloc_resource_any(sc->dev,
8908                                        SYS_RES_IRQ,
8909                                        &sc->intr[i].rid,
8910                                        RF_ACTIVE)) == NULL) {
8911                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8912                      i, (rid + i));
8913
8914                for (j = (i - 1); j >= 0; j--) {
8915                    bus_release_resource(sc->dev,
8916                                         SYS_RES_IRQ,
8917                                         sc->intr[j].rid,
8918                                         sc->intr[j].resource);
8919                }
8920
8921                sc->intr_count = 0;
8922                sc->num_queues = 0;
8923                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8924                pci_release_msi(sc->dev);
8925                break;
8926            }
8927
8928            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
8929        }
8930    } while (0);
8931
8932    do { /* try allocating MSI vector resources (at least 2) */
8933        if (sc->interrupt_mode != INTR_MODE_MSI) {
8934            break;
8935        }
8936
8937        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
8938            (msi_count < 1)) {
8939            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8940            break;
8941        }
8942
8943        /* ask for a single MSI vector */
8944        num_requested = 1;
8945
8946        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
8947
8948        num_allocated = num_requested;
8949        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
8950            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
8951            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8952            break;
8953        }
8954
8955        if (num_allocated != 1) { /* possible? */
8956            BLOGE(sc, "MSI allocation is not 1!\n");
8957            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8958            pci_release_msi(sc->dev);
8959            break;
8960        }
8961
8962        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
8963              num_requested, num_allocated);
8964
8965        /* best effort so use the number of vectors allocated to us */
8966        sc->intr_count = num_allocated;
8967        sc->num_queues = num_allocated;
8968
8969        rid = 1; /* initial resource identifier */
8970
8971        sc->intr[0].rid = rid;
8972
8973        if ((sc->intr[0].resource =
8974             bus_alloc_resource_any(sc->dev,
8975                                    SYS_RES_IRQ,
8976                                    &sc->intr[0].rid,
8977                                    RF_ACTIVE)) == NULL) {
8978            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
8979            sc->intr_count = 0;
8980            sc->num_queues = 0;
8981            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
8982            pci_release_msi(sc->dev);
8983            break;
8984        }
8985
8986        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
8987    } while (0);
8988
8989    do { /* try allocating INTx vector resources */
8990        if (sc->interrupt_mode != INTR_MODE_INTX) {
8991            break;
8992        }
8993
8994        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
8995
8996        /* only one vector for INTx */
8997        sc->intr_count = 1;
8998        sc->num_queues = 1;
8999
9000        rid = 0; /* initial resource identifier */
9001
9002        sc->intr[0].rid = rid;
9003
9004        if ((sc->intr[0].resource =
9005             bus_alloc_resource_any(sc->dev,
9006                                    SYS_RES_IRQ,
9007                                    &sc->intr[0].rid,
9008                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9009            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9010            sc->intr_count = 0;
9011            sc->num_queues = 0;
9012            sc->interrupt_mode = -1; /* Failed! */
9013            break;
9014        }
9015
9016        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9017    } while (0);
9018
9019    if (sc->interrupt_mode == -1) {
9020        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9021        rc = 1;
9022    } else {
9023        BLOGD(sc, DBG_LOAD,
9024              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9025              sc->interrupt_mode, sc->num_queues);
9026        rc = 0;
9027    }
9028
9029    return (rc);
9030}
9031
9032static void
9033bxe_interrupt_detach(struct bxe_softc *sc)
9034{
9035    struct bxe_fastpath *fp;
9036    int i;
9037
9038    /* release interrupt resources */
9039    for (i = 0; i < sc->intr_count; i++) {
9040        if (sc->intr[i].resource && sc->intr[i].tag) {
9041            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9042            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9043        }
9044    }
9045
9046    for (i = 0; i < sc->num_queues; i++) {
9047        fp = &sc->fp[i];
9048        if (fp->tq) {
9049            taskqueue_drain(fp->tq, &fp->tq_task);
9050            taskqueue_free(fp->tq);
9051            fp->tq = NULL;
9052        }
9053    }
9054
9055
9056    if (sc->sp_tq) {
9057        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9058        taskqueue_free(sc->sp_tq);
9059        sc->sp_tq = NULL;
9060    }
9061}
9062
9063/*
9064 * Enables interrupts and attach to the ISR.
9065 *
9066 * When using multiple MSI/MSI-X vectors the first vector
9067 * is used for slowpath operations while all remaining
9068 * vectors are used for fastpath operations.  If only a
9069 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9070 * ISR must look for both slowpath and fastpath completions.
9071 */
9072static int
9073bxe_interrupt_attach(struct bxe_softc *sc)
9074{
9075    struct bxe_fastpath *fp;
9076    int rc = 0;
9077    int i;
9078
9079    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9080             "bxe%d_sp_tq", sc->unit);
9081    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9082    sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT,
9083                                      taskqueue_thread_enqueue,
9084                                      &sc->sp_tq);
9085    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9086                            "%s", sc->sp_tq_name);
9087
9088
9089    for (i = 0; i < sc->num_queues; i++) {
9090        fp = &sc->fp[i];
9091        snprintf(fp->tq_name, sizeof(fp->tq_name),
9092                 "bxe%d_fp%d_tq", sc->unit, i);
9093        TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9094        fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT,
9095                                       taskqueue_thread_enqueue,
9096                                       &fp->tq);
9097        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9098                                "%s", fp->tq_name);
9099    }
9100
9101    /* setup interrupt handlers */
9102    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9103        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9104
9105        /*
9106         * Setup the interrupt handler. Note that we pass the driver instance
9107         * to the interrupt handler for the slowpath.
9108         */
9109        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9110                                 (INTR_TYPE_NET | INTR_MPSAFE),
9111                                 NULL, bxe_intr_sp, sc,
9112                                 &sc->intr[0].tag)) != 0) {
9113            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9114            goto bxe_interrupt_attach_exit;
9115        }
9116
9117        bus_describe_intr(sc->dev, sc->intr[0].resource,
9118                          sc->intr[0].tag, "sp");
9119
9120        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9121
9122        /* initialize the fastpath vectors (note the first was used for sp) */
9123        for (i = 0; i < sc->num_queues; i++) {
9124            fp = &sc->fp[i];
9125            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9126
9127            /*
9128             * Setup the interrupt handler. Note that we pass the
9129             * fastpath context to the interrupt handler in this
9130             * case.
9131             */
9132            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9133                                     (INTR_TYPE_NET | INTR_MPSAFE),
9134                                     NULL, bxe_intr_fp, fp,
9135                                     &sc->intr[i + 1].tag)) != 0) {
9136                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9137                      (i + 1), rc);
9138                goto bxe_interrupt_attach_exit;
9139            }
9140
9141            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9142                              sc->intr[i + 1].tag, "fp%02d", i);
9143
9144            /* bind the fastpath instance to a cpu */
9145            if (sc->num_queues > 1) {
9146                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9147            }
9148
9149            fp->state = BXE_FP_STATE_IRQ;
9150        }
9151    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9152        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9153
9154        /*
9155         * Setup the interrupt handler. Note that we pass the
9156         * driver instance to the interrupt handler which
9157         * will handle both the slowpath and fastpath.
9158         */
9159        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9160                                 (INTR_TYPE_NET | INTR_MPSAFE),
9161                                 NULL, bxe_intr_legacy, sc,
9162                                 &sc->intr[0].tag)) != 0) {
9163            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9164            goto bxe_interrupt_attach_exit;
9165        }
9166
9167    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9168        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9169
9170        /*
9171         * Setup the interrupt handler. Note that we pass the
9172         * driver instance to the interrupt handler which
9173         * will handle both the slowpath and fastpath.
9174         */
9175        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9176                                 (INTR_TYPE_NET | INTR_MPSAFE),
9177                                 NULL, bxe_intr_legacy, sc,
9178                                 &sc->intr[0].tag)) != 0) {
9179            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9180            goto bxe_interrupt_attach_exit;
9181        }
9182    }
9183
9184bxe_interrupt_attach_exit:
9185
9186    return (rc);
9187}
9188
9189static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9190static int  bxe_init_hw_common(struct bxe_softc *sc);
9191static int  bxe_init_hw_port(struct bxe_softc *sc);
9192static int  bxe_init_hw_func(struct bxe_softc *sc);
9193static void bxe_reset_common(struct bxe_softc *sc);
9194static void bxe_reset_port(struct bxe_softc *sc);
9195static void bxe_reset_func(struct bxe_softc *sc);
9196static int  bxe_gunzip_init(struct bxe_softc *sc);
9197static void bxe_gunzip_end(struct bxe_softc *sc);
9198static int  bxe_init_firmware(struct bxe_softc *sc);
9199static void bxe_release_firmware(struct bxe_softc *sc);
9200
9201static struct
9202ecore_func_sp_drv_ops bxe_func_sp_drv = {
9203    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9204    .init_hw_cmn      = bxe_init_hw_common,
9205    .init_hw_port     = bxe_init_hw_port,
9206    .init_hw_func     = bxe_init_hw_func,
9207
9208    .reset_hw_cmn     = bxe_reset_common,
9209    .reset_hw_port    = bxe_reset_port,
9210    .reset_hw_func    = bxe_reset_func,
9211
9212    .gunzip_init      = bxe_gunzip_init,
9213    .gunzip_end       = bxe_gunzip_end,
9214
9215    .init_fw          = bxe_init_firmware,
9216    .release_fw       = bxe_release_firmware,
9217};
9218
9219static void
9220bxe_init_func_obj(struct bxe_softc *sc)
9221{
9222    sc->dmae_ready = 0;
9223
9224    ecore_init_func_obj(sc,
9225                        &sc->func_obj,
9226                        BXE_SP(sc, func_rdata),
9227                        BXE_SP_MAPPING(sc, func_rdata),
9228                        BXE_SP(sc, func_afex_rdata),
9229                        BXE_SP_MAPPING(sc, func_afex_rdata),
9230                        &bxe_func_sp_drv);
9231}
9232
9233static int
9234bxe_init_hw(struct bxe_softc *sc,
9235            uint32_t         load_code)
9236{
9237    struct ecore_func_state_params func_params = { NULL };
9238    int rc;
9239
9240    /* prepare the parameters for function state transitions */
9241    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9242
9243    func_params.f_obj = &sc->func_obj;
9244    func_params.cmd = ECORE_F_CMD_HW_INIT;
9245
9246    func_params.params.hw_init.load_phase = load_code;
9247
9248    /*
9249     * Via a plethora of function pointers, we will eventually reach
9250     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9251     */
9252    rc = ecore_func_state_change(sc, &func_params);
9253
9254    return (rc);
9255}
9256
9257static void
9258bxe_fill(struct bxe_softc *sc,
9259         uint32_t         addr,
9260         int              fill,
9261         uint32_t         len)
9262{
9263    uint32_t i;
9264
9265    if (!(len % 4) && !(addr % 4)) {
9266        for (i = 0; i < len; i += 4) {
9267            REG_WR(sc, (addr + i), fill);
9268        }
9269    } else {
9270        for (i = 0; i < len; i++) {
9271            REG_WR8(sc, (addr + i), fill);
9272        }
9273    }
9274}
9275
9276/* writes FP SP data to FW - data_size in dwords */
9277static void
9278bxe_wr_fp_sb_data(struct bxe_softc *sc,
9279                  int              fw_sb_id,
9280                  uint32_t         *sb_data_p,
9281                  uint32_t         data_size)
9282{
9283    int index;
9284
9285    for (index = 0; index < data_size; index++) {
9286        REG_WR(sc,
9287               (BAR_CSTRORM_INTMEM +
9288                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9289                (sizeof(uint32_t) * index)),
9290               *(sb_data_p + index));
9291    }
9292}
9293
9294static void
9295bxe_zero_fp_sb(struct bxe_softc *sc,
9296               int              fw_sb_id)
9297{
9298    struct hc_status_block_data_e2 sb_data_e2;
9299    struct hc_status_block_data_e1x sb_data_e1x;
9300    uint32_t *sb_data_p;
9301    uint32_t data_size = 0;
9302
9303    if (!CHIP_IS_E1x(sc)) {
9304        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9305        sb_data_e2.common.state = SB_DISABLED;
9306        sb_data_e2.common.p_func.vf_valid = FALSE;
9307        sb_data_p = (uint32_t *)&sb_data_e2;
9308        data_size = (sizeof(struct hc_status_block_data_e2) /
9309                     sizeof(uint32_t));
9310    } else {
9311        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9312        sb_data_e1x.common.state = SB_DISABLED;
9313        sb_data_e1x.common.p_func.vf_valid = FALSE;
9314        sb_data_p = (uint32_t *)&sb_data_e1x;
9315        data_size = (sizeof(struct hc_status_block_data_e1x) /
9316                     sizeof(uint32_t));
9317    }
9318
9319    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9320
9321    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9322             0, CSTORM_STATUS_BLOCK_SIZE);
9323    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9324             0, CSTORM_SYNC_BLOCK_SIZE);
9325}
9326
9327static void
9328bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9329                  struct hc_sp_status_block_data *sp_sb_data)
9330{
9331    int i;
9332
9333    for (i = 0;
9334         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9335         i++) {
9336        REG_WR(sc,
9337               (BAR_CSTRORM_INTMEM +
9338                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9339                (i * sizeof(uint32_t))),
9340               *((uint32_t *)sp_sb_data + i));
9341    }
9342}
9343
9344static void
9345bxe_zero_sp_sb(struct bxe_softc *sc)
9346{
9347    struct hc_sp_status_block_data sp_sb_data;
9348
9349    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9350
9351    sp_sb_data.state           = SB_DISABLED;
9352    sp_sb_data.p_func.vf_valid = FALSE;
9353
9354    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9355
9356    bxe_fill(sc,
9357             (BAR_CSTRORM_INTMEM +
9358              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9359              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9360    bxe_fill(sc,
9361             (BAR_CSTRORM_INTMEM +
9362              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9363              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9364}
9365
9366static void
9367bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9368                             int                       igu_sb_id,
9369                             int                       igu_seg_id)
9370{
9371    hc_sm->igu_sb_id      = igu_sb_id;
9372    hc_sm->igu_seg_id     = igu_seg_id;
9373    hc_sm->timer_value    = 0xFF;
9374    hc_sm->time_to_expire = 0xFFFFFFFF;
9375}
9376
9377static void
9378bxe_map_sb_state_machines(struct hc_index_data *index_data)
9379{
9380    /* zero out state machine indices */
9381
9382    /* rx indices */
9383    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9384
9385    /* tx indices */
9386    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9387    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9388    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9389    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9390
9391    /* map indices */
9392
9393    /* rx indices */
9394    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9395        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9396
9397    /* tx indices */
9398    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9399        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9400    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9401        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9402    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9403        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9404    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9405        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9406}
9407
9408static void
9409bxe_init_sb(struct bxe_softc *sc,
9410            bus_addr_t       busaddr,
9411            int              vfid,
9412            uint8_t          vf_valid,
9413            int              fw_sb_id,
9414            int              igu_sb_id)
9415{
9416    struct hc_status_block_data_e2  sb_data_e2;
9417    struct hc_status_block_data_e1x sb_data_e1x;
9418    struct hc_status_block_sm       *hc_sm_p;
9419    uint32_t *sb_data_p;
9420    int igu_seg_id;
9421    int data_size;
9422
9423    if (CHIP_INT_MODE_IS_BC(sc)) {
9424        igu_seg_id = HC_SEG_ACCESS_NORM;
9425    } else {
9426        igu_seg_id = IGU_SEG_ACCESS_NORM;
9427    }
9428
9429    bxe_zero_fp_sb(sc, fw_sb_id);
9430
9431    if (!CHIP_IS_E1x(sc)) {
9432        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9433        sb_data_e2.common.state = SB_ENABLED;
9434        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9435        sb_data_e2.common.p_func.vf_id = vfid;
9436        sb_data_e2.common.p_func.vf_valid = vf_valid;
9437        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9438        sb_data_e2.common.same_igu_sb_1b = TRUE;
9439        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9440        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9441        hc_sm_p = sb_data_e2.common.state_machine;
9442        sb_data_p = (uint32_t *)&sb_data_e2;
9443        data_size = (sizeof(struct hc_status_block_data_e2) /
9444                     sizeof(uint32_t));
9445        bxe_map_sb_state_machines(sb_data_e2.index_data);
9446    } else {
9447        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9448        sb_data_e1x.common.state = SB_ENABLED;
9449        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9450        sb_data_e1x.common.p_func.vf_id = 0xff;
9451        sb_data_e1x.common.p_func.vf_valid = FALSE;
9452        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9453        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9454        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9455        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9456        hc_sm_p = sb_data_e1x.common.state_machine;
9457        sb_data_p = (uint32_t *)&sb_data_e1x;
9458        data_size = (sizeof(struct hc_status_block_data_e1x) /
9459                     sizeof(uint32_t));
9460        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9461    }
9462
9463    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9464    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9465
9466    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9467
9468    /* write indices to HW - PCI guarantees endianity of regpairs */
9469    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9470}
9471
9472static inline uint8_t
9473bxe_fp_qzone_id(struct bxe_fastpath *fp)
9474{
9475    if (CHIP_IS_E1x(fp->sc)) {
9476        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9477    } else {
9478        return (fp->cl_id);
9479    }
9480}
9481
9482static inline uint32_t
9483bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9484                           struct bxe_fastpath *fp)
9485{
9486    uint32_t offset = BAR_USTRORM_INTMEM;
9487
9488    if (!CHIP_IS_E1x(sc)) {
9489        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9490    } else {
9491        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9492    }
9493
9494    return (offset);
9495}
9496
9497static void
9498bxe_init_eth_fp(struct bxe_softc *sc,
9499                int              idx)
9500{
9501    struct bxe_fastpath *fp = &sc->fp[idx];
9502    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9503    unsigned long q_type = 0;
9504    int cos;
9505
9506    fp->sc    = sc;
9507    fp->index = idx;
9508
9509    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9510    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9511
9512    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9513                    (SC_L_ID(sc) + idx) :
9514                    /* want client ID same as IGU SB ID for non-E1 */
9515                    fp->igu_sb_id;
9516    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9517
9518    /* setup sb indices */
9519    if (!CHIP_IS_E1x(sc)) {
9520        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9521        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9522    } else {
9523        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9524        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9525    }
9526
9527    /* init shortcut */
9528    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9529
9530    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9531
9532    /*
9533     * XXX If multiple CoS is ever supported then each fastpath structure
9534     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9535     */
9536    for (cos = 0; cos < sc->max_cos; cos++) {
9537        cids[cos] = idx;
9538    }
9539    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9540
9541    /* nothing more for a VF to do */
9542    if (IS_VF(sc)) {
9543        return;
9544    }
9545
9546    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9547                fp->fw_sb_id, fp->igu_sb_id);
9548
9549    bxe_update_fp_sb_idx(fp);
9550
9551    /* Configure Queue State object */
9552    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9553    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9554
9555    ecore_init_queue_obj(sc,
9556                         &sc->sp_objs[idx].q_obj,
9557                         fp->cl_id,
9558                         cids,
9559                         sc->max_cos,
9560                         SC_FUNC(sc),
9561                         BXE_SP(sc, q_rdata),
9562                         BXE_SP_MAPPING(sc, q_rdata),
9563                         q_type);
9564
9565    /* configure classification DBs */
9566    ecore_init_mac_obj(sc,
9567                       &sc->sp_objs[idx].mac_obj,
9568                       fp->cl_id,
9569                       idx,
9570                       SC_FUNC(sc),
9571                       BXE_SP(sc, mac_rdata),
9572                       BXE_SP_MAPPING(sc, mac_rdata),
9573                       ECORE_FILTER_MAC_PENDING,
9574                       &sc->sp_state,
9575                       ECORE_OBJ_TYPE_RX_TX,
9576                       &sc->macs_pool);
9577
9578    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9579          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9580}
9581
9582static inline void
9583bxe_update_rx_prod(struct bxe_softc    *sc,
9584                   struct bxe_fastpath *fp,
9585                   uint16_t            rx_bd_prod,
9586                   uint16_t            rx_cq_prod,
9587                   uint16_t            rx_sge_prod)
9588{
9589    struct ustorm_eth_rx_producers rx_prods = { 0 };
9590    uint32_t i;
9591
9592    /* update producers */
9593    rx_prods.bd_prod  = rx_bd_prod;
9594    rx_prods.cqe_prod = rx_cq_prod;
9595    rx_prods.sge_prod = rx_sge_prod;
9596
9597    /*
9598     * Make sure that the BD and SGE data is updated before updating the
9599     * producers since FW might read the BD/SGE right after the producer
9600     * is updated.
9601     * This is only applicable for weak-ordered memory model archs such
9602     * as IA-64. The following barrier is also mandatory since FW will
9603     * assumes BDs must have buffers.
9604     */
9605    wmb();
9606
9607    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9608        REG_WR(sc,
9609               (fp->ustorm_rx_prods_offset + (i * 4)),
9610               ((uint32_t *)&rx_prods)[i]);
9611    }
9612
9613    wmb(); /* keep prod updates ordered */
9614
9615    BLOGD(sc, DBG_RX,
9616          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9617          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9618}
9619
9620static void
9621bxe_init_rx_rings(struct bxe_softc *sc)
9622{
9623    struct bxe_fastpath *fp;
9624    int i;
9625
9626    for (i = 0; i < sc->num_queues; i++) {
9627        fp = &sc->fp[i];
9628
9629        fp->rx_bd_cons = 0;
9630
9631        /*
9632         * Activate the BD ring...
9633         * Warning, this will generate an interrupt (to the TSTORM)
9634         * so this can only be done after the chip is initialized
9635         */
9636        bxe_update_rx_prod(sc, fp,
9637                           fp->rx_bd_prod,
9638                           fp->rx_cq_prod,
9639                           fp->rx_sge_prod);
9640
9641        if (i != 0) {
9642            continue;
9643        }
9644
9645        if (CHIP_IS_E1(sc)) {
9646            REG_WR(sc,
9647                   (BAR_USTRORM_INTMEM +
9648                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9649                   U64_LO(fp->rcq_dma.paddr));
9650            REG_WR(sc,
9651                   (BAR_USTRORM_INTMEM +
9652                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9653                   U64_HI(fp->rcq_dma.paddr));
9654        }
9655    }
9656}
9657
9658static void
9659bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9660{
9661    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9662    fp->tx_db.data.zero_fill1 = 0;
9663    fp->tx_db.data.prod = 0;
9664
9665    fp->tx_pkt_prod = 0;
9666    fp->tx_pkt_cons = 0;
9667    fp->tx_bd_prod = 0;
9668    fp->tx_bd_cons = 0;
9669    fp->eth_q_stats.tx_pkts = 0;
9670}
9671
9672static inline void
9673bxe_init_tx_rings(struct bxe_softc *sc)
9674{
9675    int i;
9676
9677    for (i = 0; i < sc->num_queues; i++) {
9678        bxe_init_tx_ring_one(&sc->fp[i]);
9679    }
9680}
9681
9682static void
9683bxe_init_def_sb(struct bxe_softc *sc)
9684{
9685    struct host_sp_status_block *def_sb = sc->def_sb;
9686    bus_addr_t mapping = sc->def_sb_dma.paddr;
9687    int igu_sp_sb_index;
9688    int igu_seg_id;
9689    int port = SC_PORT(sc);
9690    int func = SC_FUNC(sc);
9691    int reg_offset, reg_offset_en5;
9692    uint64_t section;
9693    int index, sindex;
9694    struct hc_sp_status_block_data sp_sb_data;
9695
9696    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9697
9698    if (CHIP_INT_MODE_IS_BC(sc)) {
9699        igu_sp_sb_index = DEF_SB_IGU_ID;
9700        igu_seg_id = HC_SEG_ACCESS_DEF;
9701    } else {
9702        igu_sp_sb_index = sc->igu_dsb_id;
9703        igu_seg_id = IGU_SEG_ACCESS_DEF;
9704    }
9705
9706    /* attentions */
9707    section = ((uint64_t)mapping +
9708               offsetof(struct host_sp_status_block, atten_status_block));
9709    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9710    sc->attn_state = 0;
9711
9712    reg_offset = (port) ?
9713                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9714                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9715    reg_offset_en5 = (port) ?
9716                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9717                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9718
9719    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9720        /* take care of sig[0]..sig[4] */
9721        for (sindex = 0; sindex < 4; sindex++) {
9722            sc->attn_group[index].sig[sindex] =
9723                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9724        }
9725
9726        if (!CHIP_IS_E1x(sc)) {
9727            /*
9728             * enable5 is separate from the rest of the registers,
9729             * and the address skip is 4 and not 16 between the
9730             * different groups
9731             */
9732            sc->attn_group[index].sig[4] =
9733                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9734        } else {
9735            sc->attn_group[index].sig[4] = 0;
9736        }
9737    }
9738
9739    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9740        reg_offset = (port) ?
9741                         HC_REG_ATTN_MSG1_ADDR_L :
9742                         HC_REG_ATTN_MSG0_ADDR_L;
9743        REG_WR(sc, reg_offset, U64_LO(section));
9744        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9745    } else if (!CHIP_IS_E1x(sc)) {
9746        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9747        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9748    }
9749
9750    section = ((uint64_t)mapping +
9751               offsetof(struct host_sp_status_block, sp_sb));
9752
9753    bxe_zero_sp_sb(sc);
9754
9755    /* PCI guarantees endianity of regpair */
9756    sp_sb_data.state           = SB_ENABLED;
9757    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9758    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9759    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9760    sp_sb_data.igu_seg_id      = igu_seg_id;
9761    sp_sb_data.p_func.pf_id    = func;
9762    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9763    sp_sb_data.p_func.vf_id    = 0xff;
9764
9765    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9766
9767    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9768}
9769
9770static void
9771bxe_init_sp_ring(struct bxe_softc *sc)
9772{
9773    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9774    sc->spq_prod_idx = 0;
9775    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9776    sc->spq_prod_bd = sc->spq;
9777    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9778}
9779
9780static void
9781bxe_init_eq_ring(struct bxe_softc *sc)
9782{
9783    union event_ring_elem *elem;
9784    int i;
9785
9786    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9787        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9788
9789        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9790                                                 BCM_PAGE_SIZE *
9791                                                 (i % NUM_EQ_PAGES)));
9792        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9793                                                 BCM_PAGE_SIZE *
9794                                                 (i % NUM_EQ_PAGES)));
9795    }
9796
9797    sc->eq_cons    = 0;
9798    sc->eq_prod    = NUM_EQ_DESC;
9799    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9800
9801    atomic_store_rel_long(&sc->eq_spq_left,
9802                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9803                               NUM_EQ_DESC) - 1));
9804}
9805
9806static void
9807bxe_init_internal_common(struct bxe_softc *sc)
9808{
9809    int i;
9810
9811    /*
9812     * Zero this manually as its initialization is currently missing
9813     * in the initTool.
9814     */
9815    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9816        REG_WR(sc,
9817               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9818               0);
9819    }
9820
9821    if (!CHIP_IS_E1x(sc)) {
9822        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9823                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9824    }
9825}
9826
9827static void
9828bxe_init_internal(struct bxe_softc *sc,
9829                  uint32_t         load_code)
9830{
9831    switch (load_code) {
9832    case FW_MSG_CODE_DRV_LOAD_COMMON:
9833    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9834        bxe_init_internal_common(sc);
9835        /* no break */
9836
9837    case FW_MSG_CODE_DRV_LOAD_PORT:
9838        /* nothing to do */
9839        /* no break */
9840
9841    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9842        /* internal memory per function is initialized inside bxe_pf_init */
9843        break;
9844
9845    default:
9846        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9847        break;
9848    }
9849}
9850
9851static void
9852storm_memset_func_cfg(struct bxe_softc                         *sc,
9853                      struct tstorm_eth_function_common_config *tcfg,
9854                      uint16_t                                  abs_fid)
9855{
9856    uint32_t addr;
9857    size_t size;
9858
9859    addr = (BAR_TSTRORM_INTMEM +
9860            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9861    size = sizeof(struct tstorm_eth_function_common_config);
9862    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9863}
9864
9865static void
9866bxe_func_init(struct bxe_softc            *sc,
9867              struct bxe_func_init_params *p)
9868{
9869    struct tstorm_eth_function_common_config tcfg = { 0 };
9870
9871    if (CHIP_IS_E1x(sc)) {
9872        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9873    }
9874
9875    /* Enable the function in the FW */
9876    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9877    storm_memset_func_en(sc, p->func_id, 1);
9878
9879    /* spq */
9880    if (p->func_flgs & FUNC_FLG_SPQ) {
9881        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9882        REG_WR(sc,
9883               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9884               p->spq_prod);
9885    }
9886}
9887
9888/*
9889 * Calculates the sum of vn_min_rates.
9890 * It's needed for further normalizing of the min_rates.
9891 * Returns:
9892 *   sum of vn_min_rates.
9893 *     or
9894 *   0 - if all the min_rates are 0.
9895 * In the later case fainess algorithm should be deactivated.
9896 * If all min rates are not zero then those that are zeroes will be set to 1.
9897 */
9898static void
9899bxe_calc_vn_min(struct bxe_softc       *sc,
9900                struct cmng_init_input *input)
9901{
9902    uint32_t vn_cfg;
9903    uint32_t vn_min_rate;
9904    int all_zero = 1;
9905    int vn;
9906
9907    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
9908        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9909        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
9910                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
9911
9912        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9913            /* skip hidden VNs */
9914            vn_min_rate = 0;
9915        } else if (!vn_min_rate) {
9916            /* If min rate is zero - set it to 100 */
9917            vn_min_rate = DEF_MIN_RATE;
9918        } else {
9919            all_zero = 0;
9920        }
9921
9922        input->vnic_min_rate[vn] = vn_min_rate;
9923    }
9924
9925    /* if ETS or all min rates are zeros - disable fairness */
9926    if (BXE_IS_ETS_ENABLED(sc)) {
9927        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9928        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
9929    } else if (all_zero) {
9930        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9931        BLOGD(sc, DBG_LOAD,
9932              "Fariness disabled (all MIN values are zeroes)\n");
9933    } else {
9934        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
9935    }
9936}
9937
9938static inline uint16_t
9939bxe_extract_max_cfg(struct bxe_softc *sc,
9940                    uint32_t         mf_cfg)
9941{
9942    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
9943                        FUNC_MF_CFG_MAX_BW_SHIFT);
9944
9945    if (!max_cfg) {
9946        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
9947        max_cfg = 100;
9948    }
9949
9950    return (max_cfg);
9951}
9952
9953static void
9954bxe_calc_vn_max(struct bxe_softc       *sc,
9955                int                    vn,
9956                struct cmng_init_input *input)
9957{
9958    uint16_t vn_max_rate;
9959    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
9960    uint32_t max_cfg;
9961
9962    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
9963        vn_max_rate = 0;
9964    } else {
9965        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
9966
9967        if (IS_MF_SI(sc)) {
9968            /* max_cfg in percents of linkspeed */
9969            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
9970        } else { /* SD modes */
9971            /* max_cfg is absolute in 100Mb units */
9972            vn_max_rate = (max_cfg * 100);
9973        }
9974    }
9975
9976    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
9977
9978    input->vnic_max_rate[vn] = vn_max_rate;
9979}
9980
9981static void
9982bxe_cmng_fns_init(struct bxe_softc *sc,
9983                  uint8_t          read_cfg,
9984                  uint8_t          cmng_type)
9985{
9986    struct cmng_init_input input;
9987    int vn;
9988
9989    memset(&input, 0, sizeof(struct cmng_init_input));
9990
9991    input.port_rate = sc->link_vars.line_speed;
9992
9993    if (cmng_type == CMNG_FNS_MINMAX) {
9994        /* read mf conf from shmem */
9995        if (read_cfg) {
9996            bxe_read_mf_cfg(sc);
9997        }
9998
9999        /* get VN min rate and enable fairness if not 0 */
10000        bxe_calc_vn_min(sc, &input);
10001
10002        /* get VN max rate */
10003        if (sc->port.pmf) {
10004            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10005                bxe_calc_vn_max(sc, vn, &input);
10006            }
10007        }
10008
10009        /* always enable rate shaping and fairness */
10010        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10011
10012        ecore_init_cmng(&input, &sc->cmng);
10013        return;
10014    }
10015
10016    /* rate shaping and fairness are disabled */
10017    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10018}
10019
10020static int
10021bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10022{
10023    if (CHIP_REV_IS_SLOW(sc)) {
10024        return (CMNG_FNS_NONE);
10025    }
10026
10027    if (IS_MF(sc)) {
10028        return (CMNG_FNS_MINMAX);
10029    }
10030
10031    return (CMNG_FNS_NONE);
10032}
10033
10034static void
10035storm_memset_cmng(struct bxe_softc *sc,
10036                  struct cmng_init *cmng,
10037                  uint8_t          port)
10038{
10039    int vn;
10040    int func;
10041    uint32_t addr;
10042    size_t size;
10043
10044    addr = (BAR_XSTRORM_INTMEM +
10045            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10046    size = sizeof(struct cmng_struct_per_port);
10047    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10048
10049    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10050        func = func_by_vn(sc, vn);
10051
10052        addr = (BAR_XSTRORM_INTMEM +
10053                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10054        size = sizeof(struct rate_shaping_vars_per_vn);
10055        ecore_storm_memset_struct(sc, addr, size,
10056                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10057
10058        addr = (BAR_XSTRORM_INTMEM +
10059                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10060        size = sizeof(struct fairness_vars_per_vn);
10061        ecore_storm_memset_struct(sc, addr, size,
10062                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10063    }
10064}
10065
10066static void
10067bxe_pf_init(struct bxe_softc *sc)
10068{
10069    struct bxe_func_init_params func_init = { 0 };
10070    struct event_ring_data eq_data = { { 0 } };
10071    uint16_t flags;
10072
10073    if (!CHIP_IS_E1x(sc)) {
10074        /* reset IGU PF statistics: MSIX + ATTN */
10075        /* PF */
10076        REG_WR(sc,
10077               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10078                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10079                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10080               0);
10081        /* ATTN */
10082        REG_WR(sc,
10083               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10084                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10085                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10086                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10087               0);
10088    }
10089
10090    /* function setup flags */
10091    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10092
10093    /*
10094     * This flag is relevant for E1x only.
10095     * E2 doesn't have a TPA configuration in a function level.
10096     */
10097    flags |= (sc->ifnet->if_capenable & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10098
10099    func_init.func_flgs = flags;
10100    func_init.pf_id     = SC_FUNC(sc);
10101    func_init.func_id   = SC_FUNC(sc);
10102    func_init.spq_map   = sc->spq_dma.paddr;
10103    func_init.spq_prod  = sc->spq_prod_idx;
10104
10105    bxe_func_init(sc, &func_init);
10106
10107    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10108
10109    /*
10110     * Congestion management values depend on the link rate.
10111     * There is no active link so initial link rate is set to 10Gbps.
10112     * When the link comes up the congestion management values are
10113     * re-calculated according to the actual link rate.
10114     */
10115    sc->link_vars.line_speed = SPEED_10000;
10116    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10117
10118    /* Only the PMF sets the HW */
10119    if (sc->port.pmf) {
10120        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10121    }
10122
10123    /* init Event Queue - PCI bus guarantees correct endainity */
10124    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10125    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10126    eq_data.producer     = sc->eq_prod;
10127    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10128    eq_data.sb_id        = DEF_SB_ID;
10129    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10130}
10131
10132static void
10133bxe_hc_int_enable(struct bxe_softc *sc)
10134{
10135    int port = SC_PORT(sc);
10136    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10137    uint32_t val = REG_RD(sc, addr);
10138    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10139    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10140                           (sc->intr_count == 1)) ? TRUE : FALSE;
10141    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10142
10143    if (msix) {
10144        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10145                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10146        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10147                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10148        if (single_msix) {
10149            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10150        }
10151    } else if (msi) {
10152        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10153        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10154                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10155                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10156    } else {
10157        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10158                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10159                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10160                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10161
10162        if (!CHIP_IS_E1(sc)) {
10163            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10164                  val, port, addr);
10165
10166            REG_WR(sc, addr, val);
10167
10168            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10169        }
10170    }
10171
10172    if (CHIP_IS_E1(sc)) {
10173        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10174    }
10175
10176    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10177          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10178
10179    REG_WR(sc, addr, val);
10180
10181    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10182    mb();
10183
10184    if (!CHIP_IS_E1(sc)) {
10185        /* init leading/trailing edge */
10186        if (IS_MF(sc)) {
10187            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10188            if (sc->port.pmf) {
10189                /* enable nig and gpio3 attention */
10190                val |= 0x1100;
10191            }
10192        } else {
10193            val = 0xffff;
10194        }
10195
10196        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10197        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10198    }
10199
10200    /* make sure that interrupts are indeed enabled from here on */
10201    mb();
10202}
10203
10204static void
10205bxe_igu_int_enable(struct bxe_softc *sc)
10206{
10207    uint32_t val;
10208    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10209    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10210                           (sc->intr_count == 1)) ? TRUE : FALSE;
10211    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10212
10213    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10214
10215    if (msix) {
10216        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10217                 IGU_PF_CONF_SINGLE_ISR_EN);
10218        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10219                IGU_PF_CONF_ATTN_BIT_EN);
10220        if (single_msix) {
10221            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10222        }
10223    } else if (msi) {
10224        val &= ~IGU_PF_CONF_INT_LINE_EN;
10225        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10226                IGU_PF_CONF_ATTN_BIT_EN |
10227                IGU_PF_CONF_SINGLE_ISR_EN);
10228    } else {
10229        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10230        val |= (IGU_PF_CONF_INT_LINE_EN |
10231                IGU_PF_CONF_ATTN_BIT_EN |
10232                IGU_PF_CONF_SINGLE_ISR_EN);
10233    }
10234
10235    /* clean previous status - need to configure igu prior to ack*/
10236    if ((!msix) || single_msix) {
10237        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10238        bxe_ack_int(sc);
10239    }
10240
10241    val |= IGU_PF_CONF_FUNC_EN;
10242
10243    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10244          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10245
10246    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10247
10248    mb();
10249
10250    /* init leading/trailing edge */
10251    if (IS_MF(sc)) {
10252        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10253        if (sc->port.pmf) {
10254            /* enable nig and gpio3 attention */
10255            val |= 0x1100;
10256        }
10257    } else {
10258        val = 0xffff;
10259    }
10260
10261    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10262    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10263
10264    /* make sure that interrupts are indeed enabled from here on */
10265    mb();
10266}
10267
10268static void
10269bxe_int_enable(struct bxe_softc *sc)
10270{
10271    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10272        bxe_hc_int_enable(sc);
10273    } else {
10274        bxe_igu_int_enable(sc);
10275    }
10276}
10277
10278static void
10279bxe_hc_int_disable(struct bxe_softc *sc)
10280{
10281    int port = SC_PORT(sc);
10282    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10283    uint32_t val = REG_RD(sc, addr);
10284
10285    /*
10286     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10287     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10288     * block
10289     */
10290    if (CHIP_IS_E1(sc)) {
10291        /*
10292         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10293         * to prevent from HC sending interrupts after we exit the function
10294         */
10295        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10296
10297        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10298                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10299                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10300    } else {
10301        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10302                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10303                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10304                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10305    }
10306
10307    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10308
10309    /* flush all outstanding writes */
10310    mb();
10311
10312    REG_WR(sc, addr, val);
10313    if (REG_RD(sc, addr) != val) {
10314        BLOGE(sc, "proper val not read from HC IGU!\n");
10315    }
10316}
10317
10318static void
10319bxe_igu_int_disable(struct bxe_softc *sc)
10320{
10321    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10322
10323    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10324             IGU_PF_CONF_INT_LINE_EN |
10325             IGU_PF_CONF_ATTN_BIT_EN);
10326
10327    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10328
10329    /* flush all outstanding writes */
10330    mb();
10331
10332    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10333    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10334        BLOGE(sc, "proper val not read from IGU!\n");
10335    }
10336}
10337
10338static void
10339bxe_int_disable(struct bxe_softc *sc)
10340{
10341    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10342        bxe_hc_int_disable(sc);
10343    } else {
10344        bxe_igu_int_disable(sc);
10345    }
10346}
10347
10348static void
10349bxe_nic_init(struct bxe_softc *sc,
10350             int              load_code)
10351{
10352    int i;
10353
10354    for (i = 0; i < sc->num_queues; i++) {
10355        bxe_init_eth_fp(sc, i);
10356    }
10357
10358    rmb(); /* ensure status block indices were read */
10359
10360    bxe_init_rx_rings(sc);
10361    bxe_init_tx_rings(sc);
10362
10363    if (IS_VF(sc)) {
10364        return;
10365    }
10366
10367    /* initialize MOD_ABS interrupts */
10368    elink_init_mod_abs_int(sc, &sc->link_vars,
10369                           sc->devinfo.chip_id,
10370                           sc->devinfo.shmem_base,
10371                           sc->devinfo.shmem2_base,
10372                           SC_PORT(sc));
10373
10374    bxe_init_def_sb(sc);
10375    bxe_update_dsb_idx(sc);
10376    bxe_init_sp_ring(sc);
10377    bxe_init_eq_ring(sc);
10378    bxe_init_internal(sc, load_code);
10379    bxe_pf_init(sc);
10380    bxe_stats_init(sc);
10381
10382    /* flush all before enabling interrupts */
10383    mb();
10384
10385    bxe_int_enable(sc);
10386
10387    /* check for SPIO5 */
10388    bxe_attn_int_deasserted0(sc,
10389                             REG_RD(sc,
10390                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10391                                     SC_PORT(sc)*4)) &
10392                             AEU_INPUTS_ATTN_BITS_SPIO5);
10393}
10394
10395static inline void
10396bxe_init_objs(struct bxe_softc *sc)
10397{
10398    /* mcast rules must be added to tx if tx switching is enabled */
10399    ecore_obj_type o_type =
10400        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10401                                         ECORE_OBJ_TYPE_RX;
10402
10403    /* RX_MODE controlling object */
10404    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10405
10406    /* multicast configuration controlling object */
10407    ecore_init_mcast_obj(sc,
10408                         &sc->mcast_obj,
10409                         sc->fp[0].cl_id,
10410                         sc->fp[0].index,
10411                         SC_FUNC(sc),
10412                         SC_FUNC(sc),
10413                         BXE_SP(sc, mcast_rdata),
10414                         BXE_SP_MAPPING(sc, mcast_rdata),
10415                         ECORE_FILTER_MCAST_PENDING,
10416                         &sc->sp_state,
10417                         o_type);
10418
10419    /* Setup CAM credit pools */
10420    ecore_init_mac_credit_pool(sc,
10421                               &sc->macs_pool,
10422                               SC_FUNC(sc),
10423                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10424                                                 VNICS_PER_PATH(sc));
10425
10426    ecore_init_vlan_credit_pool(sc,
10427                                &sc->vlans_pool,
10428                                SC_ABS_FUNC(sc) >> 1,
10429                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10430                                                  VNICS_PER_PATH(sc));
10431
10432    /* RSS configuration object */
10433    ecore_init_rss_config_obj(sc,
10434                              &sc->rss_conf_obj,
10435                              sc->fp[0].cl_id,
10436                              sc->fp[0].index,
10437                              SC_FUNC(sc),
10438                              SC_FUNC(sc),
10439                              BXE_SP(sc, rss_rdata),
10440                              BXE_SP_MAPPING(sc, rss_rdata),
10441                              ECORE_FILTER_RSS_CONF_PENDING,
10442                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10443}
10444
10445/*
10446 * Initialize the function. This must be called before sending CLIENT_SETUP
10447 * for the first client.
10448 */
10449static inline int
10450bxe_func_start(struct bxe_softc *sc)
10451{
10452    struct ecore_func_state_params func_params = { NULL };
10453    struct ecore_func_start_params *start_params = &func_params.params.start;
10454
10455    /* Prepare parameters for function state transitions */
10456    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10457
10458    func_params.f_obj = &sc->func_obj;
10459    func_params.cmd = ECORE_F_CMD_START;
10460
10461    /* Function parameters */
10462    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10463    start_params->sd_vlan_tag = OVLAN(sc);
10464
10465    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10466        start_params->network_cos_mode = STATIC_COS;
10467    } else { /* CHIP_IS_E1X */
10468        start_params->network_cos_mode = FW_WRR;
10469    }
10470
10471    //start_params->gre_tunnel_mode = 0;
10472    //start_params->gre_tunnel_rss  = 0;
10473
10474    return (ecore_func_state_change(sc, &func_params));
10475}
10476
10477static int
10478bxe_set_power_state(struct bxe_softc *sc,
10479                    uint8_t          state)
10480{
10481    uint16_t pmcsr;
10482
10483    /* If there is no power capability, silently succeed */
10484    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10485        BLOGW(sc, "No power capability\n");
10486        return (0);
10487    }
10488
10489    pmcsr = pci_read_config(sc->dev,
10490                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10491                            2);
10492
10493    switch (state) {
10494    case PCI_PM_D0:
10495        pci_write_config(sc->dev,
10496                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10497                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10498
10499        if (pmcsr & PCIM_PSTAT_DMASK) {
10500            /* delay required during transition out of D3hot */
10501            DELAY(20000);
10502        }
10503
10504        break;
10505
10506    case PCI_PM_D3hot:
10507        /* XXX if there are other clients above don't shut down the power */
10508
10509        /* don't shut down the power for emulation and FPGA */
10510        if (CHIP_REV_IS_SLOW(sc)) {
10511            return (0);
10512        }
10513
10514        pmcsr &= ~PCIM_PSTAT_DMASK;
10515        pmcsr |= PCIM_PSTAT_D3;
10516
10517        if (sc->wol) {
10518            pmcsr |= PCIM_PSTAT_PMEENABLE;
10519        }
10520
10521        pci_write_config(sc->dev,
10522                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10523                         pmcsr, 4);
10524
10525        /*
10526         * No more memory access after this point until device is brought back
10527         * to D0 state.
10528         */
10529        break;
10530
10531    default:
10532        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10533            state, pmcsr);
10534        return (-1);
10535    }
10536
10537    return (0);
10538}
10539
10540
10541/* return true if succeeded to acquire the lock */
10542static uint8_t
10543bxe_trylock_hw_lock(struct bxe_softc *sc,
10544                    uint32_t         resource)
10545{
10546    uint32_t lock_status;
10547    uint32_t resource_bit = (1 << resource);
10548    int func = SC_FUNC(sc);
10549    uint32_t hw_lock_control_reg;
10550
10551    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10552
10553    /* Validating that the resource is within range */
10554    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10555        BLOGD(sc, DBG_LOAD,
10556              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10557              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10558        return (FALSE);
10559    }
10560
10561    if (func <= 5) {
10562        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10563    } else {
10564        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10565    }
10566
10567    /* try to acquire the lock */
10568    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10569    lock_status = REG_RD(sc, hw_lock_control_reg);
10570    if (lock_status & resource_bit) {
10571        return (TRUE);
10572    }
10573
10574    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10575        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10576        lock_status, resource_bit);
10577
10578    return (FALSE);
10579}
10580
10581/*
10582 * Get the recovery leader resource id according to the engine this function
10583 * belongs to. Currently only only 2 engines is supported.
10584 */
10585static int
10586bxe_get_leader_lock_resource(struct bxe_softc *sc)
10587{
10588    if (SC_PATH(sc)) {
10589        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10590    } else {
10591        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10592    }
10593}
10594
10595/* try to acquire a leader lock for current engine */
10596static uint8_t
10597bxe_trylock_leader_lock(struct bxe_softc *sc)
10598{
10599    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10600}
10601
10602static int
10603bxe_release_leader_lock(struct bxe_softc *sc)
10604{
10605    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10606}
10607
10608/* close gates #2, #3 and #4 */
10609static void
10610bxe_set_234_gates(struct bxe_softc *sc,
10611                  uint8_t          close)
10612{
10613    uint32_t val;
10614
10615    /* gates #2 and #4a are closed/opened for "not E1" only */
10616    if (!CHIP_IS_E1(sc)) {
10617        /* #4 */
10618        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10619        /* #2 */
10620        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10621    }
10622
10623    /* #3 */
10624    if (CHIP_IS_E1x(sc)) {
10625        /* prevent interrupts from HC on both ports */
10626        val = REG_RD(sc, HC_REG_CONFIG_1);
10627        REG_WR(sc, HC_REG_CONFIG_1,
10628               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10629               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10630
10631        val = REG_RD(sc, HC_REG_CONFIG_0);
10632        REG_WR(sc, HC_REG_CONFIG_0,
10633               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10634               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10635    } else {
10636        /* Prevent incomming interrupts in IGU */
10637        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10638
10639        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10640               (!close) ?
10641               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10642               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10643    }
10644
10645    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10646          close ? "closing" : "opening");
10647
10648    wmb();
10649}
10650
10651/* poll for pending writes bit, it should get cleared in no more than 1s */
10652static int
10653bxe_er_poll_igu_vq(struct bxe_softc *sc)
10654{
10655    uint32_t cnt = 1000;
10656    uint32_t pend_bits = 0;
10657
10658    do {
10659        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10660
10661        if (pend_bits == 0) {
10662            break;
10663        }
10664
10665        DELAY(1000);
10666    } while (--cnt > 0);
10667
10668    if (cnt == 0) {
10669        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10670        return (-1);
10671    }
10672
10673    return (0);
10674}
10675
10676#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10677
10678static void
10679bxe_clp_reset_prep(struct bxe_softc *sc,
10680                   uint32_t         *magic_val)
10681{
10682    /* Do some magic... */
10683    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10684    *magic_val = val & SHARED_MF_CLP_MAGIC;
10685    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10686}
10687
10688/* restore the value of the 'magic' bit */
10689static void
10690bxe_clp_reset_done(struct bxe_softc *sc,
10691                   uint32_t         magic_val)
10692{
10693    /* Restore the 'magic' bit value... */
10694    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10695    MFCFG_WR(sc, shared_mf_config.clp_mb,
10696              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10697}
10698
10699/* prepare for MCP reset, takes care of CLP configurations */
10700static void
10701bxe_reset_mcp_prep(struct bxe_softc *sc,
10702                   uint32_t         *magic_val)
10703{
10704    uint32_t shmem;
10705    uint32_t validity_offset;
10706
10707    /* set `magic' bit in order to save MF config */
10708    if (!CHIP_IS_E1(sc)) {
10709        bxe_clp_reset_prep(sc, magic_val);
10710    }
10711
10712    /* get shmem offset */
10713    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10714    validity_offset =
10715        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10716
10717    /* Clear validity map flags */
10718    if (shmem > 0) {
10719        REG_WR(sc, shmem + validity_offset, 0);
10720    }
10721}
10722
10723#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10724#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10725
10726static void
10727bxe_mcp_wait_one(struct bxe_softc *sc)
10728{
10729    /* special handling for emulation and FPGA (10 times longer) */
10730    if (CHIP_REV_IS_SLOW(sc)) {
10731        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10732    } else {
10733        DELAY((MCP_ONE_TIMEOUT) * 1000);
10734    }
10735}
10736
10737/* initialize shmem_base and waits for validity signature to appear */
10738static int
10739bxe_init_shmem(struct bxe_softc *sc)
10740{
10741    int cnt = 0;
10742    uint32_t val = 0;
10743
10744    do {
10745        sc->devinfo.shmem_base     =
10746        sc->link_params.shmem_base =
10747            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10748
10749        if (sc->devinfo.shmem_base) {
10750            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10751            if (val & SHR_MEM_VALIDITY_MB)
10752                return (0);
10753        }
10754
10755        bxe_mcp_wait_one(sc);
10756
10757    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10758
10759    BLOGE(sc, "BAD MCP validity signature\n");
10760
10761    return (-1);
10762}
10763
10764static int
10765bxe_reset_mcp_comp(struct bxe_softc *sc,
10766                   uint32_t         magic_val)
10767{
10768    int rc = bxe_init_shmem(sc);
10769
10770    /* Restore the `magic' bit value */
10771    if (!CHIP_IS_E1(sc)) {
10772        bxe_clp_reset_done(sc, magic_val);
10773    }
10774
10775    return (rc);
10776}
10777
10778static void
10779bxe_pxp_prep(struct bxe_softc *sc)
10780{
10781    if (!CHIP_IS_E1(sc)) {
10782        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10783        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10784        wmb();
10785    }
10786}
10787
10788/*
10789 * Reset the whole chip except for:
10790 *      - PCIE core
10791 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10792 *      - IGU
10793 *      - MISC (including AEU)
10794 *      - GRC
10795 *      - RBCN, RBCP
10796 */
10797static void
10798bxe_process_kill_chip_reset(struct bxe_softc *sc,
10799                            uint8_t          global)
10800{
10801    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10802    uint32_t global_bits2, stay_reset2;
10803
10804    /*
10805     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10806     * (per chip) blocks.
10807     */
10808    global_bits2 =
10809        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10810        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10811
10812    /*
10813     * Don't reset the following blocks.
10814     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10815     *            reset, as in 4 port device they might still be owned
10816     *            by the MCP (there is only one leader per path).
10817     */
10818    not_reset_mask1 =
10819        MISC_REGISTERS_RESET_REG_1_RST_HC |
10820        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10821        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10822
10823    not_reset_mask2 =
10824        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10825        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10826        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10827        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10828        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10829        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10830        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10831        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10832        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10833        MISC_REGISTERS_RESET_REG_2_PGLC |
10834        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10835        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10836        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10837        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10838        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10839        MISC_REGISTERS_RESET_REG_2_UMAC1;
10840
10841    /*
10842     * Keep the following blocks in reset:
10843     *  - all xxMACs are handled by the elink code.
10844     */
10845    stay_reset2 =
10846        MISC_REGISTERS_RESET_REG_2_XMAC |
10847        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10848
10849    /* Full reset masks according to the chip */
10850    reset_mask1 = 0xffffffff;
10851
10852    if (CHIP_IS_E1(sc))
10853        reset_mask2 = 0xffff;
10854    else if (CHIP_IS_E1H(sc))
10855        reset_mask2 = 0x1ffff;
10856    else if (CHIP_IS_E2(sc))
10857        reset_mask2 = 0xfffff;
10858    else /* CHIP_IS_E3 */
10859        reset_mask2 = 0x3ffffff;
10860
10861    /* Don't reset global blocks unless we need to */
10862    if (!global)
10863        reset_mask2 &= ~global_bits2;
10864
10865    /*
10866     * In case of attention in the QM, we need to reset PXP
10867     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10868     * because otherwise QM reset would release 'close the gates' shortly
10869     * before resetting the PXP, then the PSWRQ would send a write
10870     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10871     * read the payload data from PSWWR, but PSWWR would not
10872     * respond. The write queue in PGLUE would stuck, dmae commands
10873     * would not return. Therefore it's important to reset the second
10874     * reset register (containing the
10875     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10876     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10877     * bit).
10878     */
10879    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10880           reset_mask2 & (~not_reset_mask2));
10881
10882    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10883           reset_mask1 & (~not_reset_mask1));
10884
10885    mb();
10886    wmb();
10887
10888    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10889           reset_mask2 & (~stay_reset2));
10890
10891    mb();
10892    wmb();
10893
10894    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10895    wmb();
10896}
10897
10898static int
10899bxe_process_kill(struct bxe_softc *sc,
10900                 uint8_t          global)
10901{
10902    int cnt = 1000;
10903    uint32_t val = 0;
10904    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
10905    uint32_t tags_63_32 = 0;
10906
10907    /* Empty the Tetris buffer, wait for 1s */
10908    do {
10909        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
10910        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
10911        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
10912        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
10913        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
10914        if (CHIP_IS_E3(sc)) {
10915            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
10916        }
10917
10918        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
10919            ((port_is_idle_0 & 0x1) == 0x1) &&
10920            ((port_is_idle_1 & 0x1) == 0x1) &&
10921            (pgl_exp_rom2 == 0xffffffff) &&
10922            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
10923            break;
10924        DELAY(1000);
10925    } while (cnt-- > 0);
10926
10927    if (cnt <= 0) {
10928        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
10929                  "are still outstanding read requests after 1s! "
10930                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
10931                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
10932              sr_cnt, blk_cnt, port_is_idle_0,
10933              port_is_idle_1, pgl_exp_rom2);
10934        return (-1);
10935    }
10936
10937    mb();
10938
10939    /* Close gates #2, #3 and #4 */
10940    bxe_set_234_gates(sc, TRUE);
10941
10942    /* Poll for IGU VQs for 57712 and newer chips */
10943    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
10944        return (-1);
10945    }
10946
10947    /* XXX indicate that "process kill" is in progress to MCP */
10948
10949    /* clear "unprepared" bit */
10950    REG_WR(sc, MISC_REG_UNPREPARED, 0);
10951    mb();
10952
10953    /* Make sure all is written to the chip before the reset */
10954    wmb();
10955
10956    /*
10957     * Wait for 1ms to empty GLUE and PCI-E core queues,
10958     * PSWHST, GRC and PSWRD Tetris buffer.
10959     */
10960    DELAY(1000);
10961
10962    /* Prepare to chip reset: */
10963    /* MCP */
10964    if (global) {
10965        bxe_reset_mcp_prep(sc, &val);
10966    }
10967
10968    /* PXP */
10969    bxe_pxp_prep(sc);
10970    mb();
10971
10972    /* reset the chip */
10973    bxe_process_kill_chip_reset(sc, global);
10974    mb();
10975
10976    /* clear errors in PGB */
10977    if (!CHIP_IS_E1(sc))
10978        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
10979
10980    /* Recover after reset: */
10981    /* MCP */
10982    if (global && bxe_reset_mcp_comp(sc, val)) {
10983        return (-1);
10984    }
10985
10986    /* XXX add resetting the NO_MCP mode DB here */
10987
10988    /* Open the gates #2, #3 and #4 */
10989    bxe_set_234_gates(sc, FALSE);
10990
10991    /* XXX
10992     * IGU/AEU preparation bring back the AEU/IGU to a reset state
10993     * re-enable attentions
10994     */
10995
10996    return (0);
10997}
10998
10999static int
11000bxe_leader_reset(struct bxe_softc *sc)
11001{
11002    int rc = 0;
11003    uint8_t global = bxe_reset_is_global(sc);
11004    uint32_t load_code;
11005
11006    /*
11007     * If not going to reset MCP, load "fake" driver to reset HW while
11008     * driver is owner of the HW.
11009     */
11010    if (!global && !BXE_NOMCP(sc)) {
11011        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11012                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11013        if (!load_code) {
11014            BLOGE(sc, "MCP response failure, aborting\n");
11015            rc = -1;
11016            goto exit_leader_reset;
11017        }
11018
11019        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11020            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11021            BLOGE(sc, "MCP unexpected response, aborting\n");
11022            rc = -1;
11023            goto exit_leader_reset2;
11024        }
11025
11026        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11027        if (!load_code) {
11028            BLOGE(sc, "MCP response failure, aborting\n");
11029            rc = -1;
11030            goto exit_leader_reset2;
11031        }
11032    }
11033
11034    /* try to recover after the failure */
11035    if (bxe_process_kill(sc, global)) {
11036        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11037        rc = -1;
11038        goto exit_leader_reset2;
11039    }
11040
11041    /*
11042     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11043     * state.
11044     */
11045    bxe_set_reset_done(sc);
11046    if (global) {
11047        bxe_clear_reset_global(sc);
11048    }
11049
11050exit_leader_reset2:
11051
11052    /* unload "fake driver" if it was loaded */
11053    if (!global && !BXE_NOMCP(sc)) {
11054        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11055        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11056    }
11057
11058exit_leader_reset:
11059
11060    sc->is_leader = 0;
11061    bxe_release_leader_lock(sc);
11062
11063    mb();
11064    return (rc);
11065}
11066
11067/*
11068 * prepare INIT transition, parameters configured:
11069 *   - HC configuration
11070 *   - Queue's CDU context
11071 */
11072static void
11073bxe_pf_q_prep_init(struct bxe_softc               *sc,
11074                   struct bxe_fastpath            *fp,
11075                   struct ecore_queue_init_params *init_params)
11076{
11077    uint8_t cos;
11078    int cxt_index, cxt_offset;
11079
11080    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11081    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11082
11083    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11084    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11085
11086    /* HC rate */
11087    init_params->rx.hc_rate =
11088        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11089    init_params->tx.hc_rate =
11090        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11091
11092    /* FW SB ID */
11093    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11094
11095    /* CQ index among the SB indices */
11096    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11097    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11098
11099    /* set maximum number of COSs supported by this queue */
11100    init_params->max_cos = sc->max_cos;
11101
11102    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11103          fp->index, init_params->max_cos);
11104
11105    /* set the context pointers queue object */
11106    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11107        /* XXX change index/cid here if ever support multiple tx CoS */
11108        /* fp->txdata[cos]->cid */
11109        cxt_index = fp->index / ILT_PAGE_CIDS;
11110        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11111        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11112    }
11113}
11114
11115/* set flags that are common for the Tx-only and not normal connections */
11116static unsigned long
11117bxe_get_common_flags(struct bxe_softc    *sc,
11118                     struct bxe_fastpath *fp,
11119                     uint8_t             zero_stats)
11120{
11121    unsigned long flags = 0;
11122
11123    /* PF driver will always initialize the Queue to an ACTIVE state */
11124    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11125
11126    /*
11127     * tx only connections collect statistics (on the same index as the
11128     * parent connection). The statistics are zeroed when the parent
11129     * connection is initialized.
11130     */
11131
11132    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11133    if (zero_stats) {
11134        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11135    }
11136
11137    /*
11138     * tx only connections can support tx-switching, though their
11139     * CoS-ness doesn't survive the loopback
11140     */
11141    if (sc->flags & BXE_TX_SWITCHING) {
11142        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11143    }
11144
11145    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11146
11147    return (flags);
11148}
11149
11150static unsigned long
11151bxe_get_q_flags(struct bxe_softc    *sc,
11152                struct bxe_fastpath *fp,
11153                uint8_t             leading)
11154{
11155    unsigned long flags = 0;
11156
11157    if (IS_MF_SD(sc)) {
11158        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11159    }
11160
11161    if (sc->ifnet->if_capenable & IFCAP_LRO) {
11162        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11163        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11164    }
11165
11166    if (leading) {
11167        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11168        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11169    }
11170
11171    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11172
11173    /* merge with common flags */
11174    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11175}
11176
11177static void
11178bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11179                      struct bxe_fastpath               *fp,
11180                      struct ecore_general_setup_params *gen_init,
11181                      uint8_t                           cos)
11182{
11183    gen_init->stat_id = bxe_stats_id(fp);
11184    gen_init->spcl_id = fp->cl_id;
11185    gen_init->mtu = sc->mtu;
11186    gen_init->cos = cos;
11187}
11188
11189static void
11190bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11191                 struct bxe_fastpath           *fp,
11192                 struct rxq_pause_params       *pause,
11193                 struct ecore_rxq_setup_params *rxq_init)
11194{
11195    uint8_t max_sge = 0;
11196    uint16_t sge_sz = 0;
11197    uint16_t tpa_agg_size = 0;
11198
11199    pause->sge_th_lo = SGE_TH_LO(sc);
11200    pause->sge_th_hi = SGE_TH_HI(sc);
11201
11202    /* validate SGE ring has enough to cross high threshold */
11203    if (sc->dropless_fc &&
11204            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11205            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11206        BLOGW(sc, "sge ring threshold limit\n");
11207    }
11208
11209    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11210    tpa_agg_size = (2 * sc->mtu);
11211    if (tpa_agg_size < sc->max_aggregation_size) {
11212        tpa_agg_size = sc->max_aggregation_size;
11213    }
11214
11215    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11216    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11217                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11218    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11219
11220    /* pause - not for e1 */
11221    if (!CHIP_IS_E1(sc)) {
11222        pause->bd_th_lo = BD_TH_LO(sc);
11223        pause->bd_th_hi = BD_TH_HI(sc);
11224
11225        pause->rcq_th_lo = RCQ_TH_LO(sc);
11226        pause->rcq_th_hi = RCQ_TH_HI(sc);
11227
11228        /* validate rings have enough entries to cross high thresholds */
11229        if (sc->dropless_fc &&
11230            pause->bd_th_hi + FW_PREFETCH_CNT >
11231            sc->rx_ring_size) {
11232            BLOGW(sc, "rx bd ring threshold limit\n");
11233        }
11234
11235        if (sc->dropless_fc &&
11236            pause->rcq_th_hi + FW_PREFETCH_CNT >
11237            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11238            BLOGW(sc, "rcq ring threshold limit\n");
11239        }
11240
11241        pause->pri_map = 1;
11242    }
11243
11244    /* rxq setup */
11245    rxq_init->dscr_map   = fp->rx_dma.paddr;
11246    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11247    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11248    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11249
11250    /*
11251     * This should be a maximum number of data bytes that may be
11252     * placed on the BD (not including paddings).
11253     */
11254    rxq_init->buf_sz = (fp->rx_buf_size -
11255                        IP_HEADER_ALIGNMENT_PADDING);
11256
11257    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11258    rxq_init->tpa_agg_sz      = tpa_agg_size;
11259    rxq_init->sge_buf_sz      = sge_sz;
11260    rxq_init->max_sges_pkt    = max_sge;
11261    rxq_init->rss_engine_id   = SC_FUNC(sc);
11262    rxq_init->mcast_engine_id = SC_FUNC(sc);
11263
11264    /*
11265     * Maximum number or simultaneous TPA aggregation for this Queue.
11266     * For PF Clients it should be the maximum available number.
11267     * VF driver(s) may want to define it to a smaller value.
11268     */
11269    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11270
11271    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11272    rxq_init->fw_sb_id = fp->fw_sb_id;
11273
11274    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11275
11276    /*
11277     * configure silent vlan removal
11278     * if multi function mode is afex, then mask default vlan
11279     */
11280    if (IS_MF_AFEX(sc)) {
11281        rxq_init->silent_removal_value =
11282            sc->devinfo.mf_info.afex_def_vlan_tag;
11283        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11284    }
11285}
11286
11287static void
11288bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11289                 struct bxe_fastpath           *fp,
11290                 struct ecore_txq_setup_params *txq_init,
11291                 uint8_t                       cos)
11292{
11293    /*
11294     * XXX If multiple CoS is ever supported then each fastpath structure
11295     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11296     * fp->txdata[cos]->tx_dma.paddr;
11297     */
11298    txq_init->dscr_map     = fp->tx_dma.paddr;
11299    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11300    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11301    txq_init->fw_sb_id     = fp->fw_sb_id;
11302
11303    /*
11304     * set the TSS leading client id for TX classfication to the
11305     * leading RSS client id
11306     */
11307    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11308}
11309
11310/*
11311 * This function performs 2 steps in a queue state machine:
11312 *   1) RESET->INIT
11313 *   2) INIT->SETUP
11314 */
11315static int
11316bxe_setup_queue(struct bxe_softc    *sc,
11317                struct bxe_fastpath *fp,
11318                uint8_t             leading)
11319{
11320    struct ecore_queue_state_params q_params = { NULL };
11321    struct ecore_queue_setup_params *setup_params =
11322                        &q_params.params.setup;
11323    int rc;
11324
11325    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11326
11327    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11328
11329    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11330
11331    /* we want to wait for completion in this context */
11332    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11333
11334    /* prepare the INIT parameters */
11335    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11336
11337    /* Set the command */
11338    q_params.cmd = ECORE_Q_CMD_INIT;
11339
11340    /* Change the state to INIT */
11341    rc = ecore_queue_state_change(sc, &q_params);
11342    if (rc) {
11343        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11344        return (rc);
11345    }
11346
11347    BLOGD(sc, DBG_LOAD, "init complete\n");
11348
11349    /* now move the Queue to the SETUP state */
11350    memset(setup_params, 0, sizeof(*setup_params));
11351
11352    /* set Queue flags */
11353    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11354
11355    /* set general SETUP parameters */
11356    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11357                          FIRST_TX_COS_INDEX);
11358
11359    bxe_pf_rx_q_prep(sc, fp,
11360                     &setup_params->pause_params,
11361                     &setup_params->rxq_params);
11362
11363    bxe_pf_tx_q_prep(sc, fp,
11364                     &setup_params->txq_params,
11365                     FIRST_TX_COS_INDEX);
11366
11367    /* Set the command */
11368    q_params.cmd = ECORE_Q_CMD_SETUP;
11369
11370    /* change the state to SETUP */
11371    rc = ecore_queue_state_change(sc, &q_params);
11372    if (rc) {
11373        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11374        return (rc);
11375    }
11376
11377    return (rc);
11378}
11379
11380static int
11381bxe_setup_leading(struct bxe_softc *sc)
11382{
11383    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11384}
11385
11386static int
11387bxe_config_rss_pf(struct bxe_softc            *sc,
11388                  struct ecore_rss_config_obj *rss_obj,
11389                  uint8_t                     config_hash)
11390{
11391    struct ecore_config_rss_params params = { NULL };
11392    int i;
11393
11394    /*
11395     * Although RSS is meaningless when there is a single HW queue we
11396     * still need it enabled in order to have HW Rx hash generated.
11397     */
11398
11399    params.rss_obj = rss_obj;
11400
11401    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11402
11403    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11404
11405    /* RSS configuration */
11406    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11407    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11408    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11409    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11410    if (rss_obj->udp_rss_v4) {
11411        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11412    }
11413    if (rss_obj->udp_rss_v6) {
11414        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11415    }
11416
11417    /* Hash bits */
11418    params.rss_result_mask = MULTI_MASK;
11419
11420    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11421
11422    if (config_hash) {
11423        /* RSS keys */
11424        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11425            params.rss_key[i] = arc4random();
11426        }
11427
11428        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11429    }
11430
11431    return (ecore_config_rss(sc, &params));
11432}
11433
11434static int
11435bxe_config_rss_eth(struct bxe_softc *sc,
11436                   uint8_t          config_hash)
11437{
11438    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11439}
11440
11441static int
11442bxe_init_rss_pf(struct bxe_softc *sc)
11443{
11444    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11445    int i;
11446
11447    /*
11448     * Prepare the initial contents of the indirection table if
11449     * RSS is enabled
11450     */
11451    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11452        sc->rss_conf_obj.ind_table[i] =
11453            (sc->fp->cl_id + (i % num_eth_queues));
11454    }
11455
11456    if (sc->udp_rss) {
11457        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11458    }
11459
11460    /*
11461     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11462     * per-port, so if explicit configuration is needed, do it only
11463     * for a PMF.
11464     *
11465     * For 57712 and newer it's a per-function configuration.
11466     */
11467    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11468}
11469
11470static int
11471bxe_set_mac_one(struct bxe_softc          *sc,
11472                uint8_t                   *mac,
11473                struct ecore_vlan_mac_obj *obj,
11474                uint8_t                   set,
11475                int                       mac_type,
11476                unsigned long             *ramrod_flags)
11477{
11478    struct ecore_vlan_mac_ramrod_params ramrod_param;
11479    int rc;
11480
11481    memset(&ramrod_param, 0, sizeof(ramrod_param));
11482
11483    /* fill in general parameters */
11484    ramrod_param.vlan_mac_obj = obj;
11485    ramrod_param.ramrod_flags = *ramrod_flags;
11486
11487    /* fill a user request section if needed */
11488    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11489        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11490
11491        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11492
11493        /* Set the command: ADD or DEL */
11494        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11495                                            ECORE_VLAN_MAC_DEL;
11496    }
11497
11498    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11499
11500    if (rc == ECORE_EXISTS) {
11501        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11502        /* do not treat adding same MAC as error */
11503        rc = 0;
11504    } else if (rc < 0) {
11505        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11506    }
11507
11508    return (rc);
11509}
11510
11511static int
11512bxe_set_eth_mac(struct bxe_softc *sc,
11513                uint8_t          set)
11514{
11515    unsigned long ramrod_flags = 0;
11516
11517    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11518
11519    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11520
11521    /* Eth MAC is set on RSS leading client (fp[0]) */
11522    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11523                            &sc->sp_objs->mac_obj,
11524                            set, ECORE_ETH_MAC, &ramrod_flags));
11525}
11526
11527static int
11528bxe_get_cur_phy_idx(struct bxe_softc *sc)
11529{
11530    uint32_t sel_phy_idx = 0;
11531
11532    if (sc->link_params.num_phys <= 1) {
11533        return (ELINK_INT_PHY);
11534    }
11535
11536    if (sc->link_vars.link_up) {
11537        sel_phy_idx = ELINK_EXT_PHY1;
11538        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11539        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11540            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11541             ELINK_SUPPORTED_FIBRE))
11542            sel_phy_idx = ELINK_EXT_PHY2;
11543    } else {
11544        switch (elink_phy_selection(&sc->link_params)) {
11545        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11546        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11547        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11548               sel_phy_idx = ELINK_EXT_PHY1;
11549               break;
11550        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11551        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11552               sel_phy_idx = ELINK_EXT_PHY2;
11553               break;
11554        }
11555    }
11556
11557    return (sel_phy_idx);
11558}
11559
11560static int
11561bxe_get_link_cfg_idx(struct bxe_softc *sc)
11562{
11563    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11564
11565    /*
11566     * The selected activated PHY is always after swapping (in case PHY
11567     * swapping is enabled). So when swapping is enabled, we need to reverse
11568     * the configuration
11569     */
11570
11571    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11572        if (sel_phy_idx == ELINK_EXT_PHY1)
11573            sel_phy_idx = ELINK_EXT_PHY2;
11574        else if (sel_phy_idx == ELINK_EXT_PHY2)
11575            sel_phy_idx = ELINK_EXT_PHY1;
11576    }
11577
11578    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11579}
11580
11581static void
11582bxe_set_requested_fc(struct bxe_softc *sc)
11583{
11584    /*
11585     * Initialize link parameters structure variables
11586     * It is recommended to turn off RX FC for jumbo frames
11587     * for better performance
11588     */
11589    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11590        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11591    } else {
11592        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11593    }
11594}
11595
11596static void
11597bxe_calc_fc_adv(struct bxe_softc *sc)
11598{
11599    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11600    switch (sc->link_vars.ieee_fc &
11601            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11602    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
11603    default:
11604        sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11605                                           ADVERTISED_Pause);
11606        break;
11607
11608    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11609        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11610                                          ADVERTISED_Pause);
11611        break;
11612
11613    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11614        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11615        break;
11616    }
11617}
11618
11619static uint16_t
11620bxe_get_mf_speed(struct bxe_softc *sc)
11621{
11622    uint16_t line_speed = sc->link_vars.line_speed;
11623    if (IS_MF(sc)) {
11624        uint16_t maxCfg =
11625            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11626
11627        /* calculate the current MAX line speed limit for the MF devices */
11628        if (IS_MF_SI(sc)) {
11629            line_speed = (line_speed * maxCfg) / 100;
11630        } else { /* SD mode */
11631            uint16_t vn_max_rate = maxCfg * 100;
11632
11633            if (vn_max_rate < line_speed) {
11634                line_speed = vn_max_rate;
11635            }
11636        }
11637    }
11638
11639    return (line_speed);
11640}
11641
11642static void
11643bxe_fill_report_data(struct bxe_softc            *sc,
11644                     struct bxe_link_report_data *data)
11645{
11646    uint16_t line_speed = bxe_get_mf_speed(sc);
11647
11648    memset(data, 0, sizeof(*data));
11649
11650    /* fill the report data with the effective line speed */
11651    data->line_speed = line_speed;
11652
11653    /* Link is down */
11654    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11655        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11656    }
11657
11658    /* Full DUPLEX */
11659    if (sc->link_vars.duplex == DUPLEX_FULL) {
11660        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11661    }
11662
11663    /* Rx Flow Control is ON */
11664    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11665        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11666    }
11667
11668    /* Tx Flow Control is ON */
11669    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11670        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11671    }
11672}
11673
11674/* report link status to OS, should be called under phy_lock */
11675static void
11676bxe_link_report_locked(struct bxe_softc *sc)
11677{
11678    struct bxe_link_report_data cur_data;
11679
11680    /* reread mf_cfg */
11681    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11682        bxe_read_mf_cfg(sc);
11683    }
11684
11685    /* Read the current link report info */
11686    bxe_fill_report_data(sc, &cur_data);
11687
11688    /* Don't report link down or exactly the same link status twice */
11689    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11690        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11691                      &sc->last_reported_link.link_report_flags) &&
11692         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11693                      &cur_data.link_report_flags))) {
11694        return;
11695    }
11696
11697    sc->link_cnt++;
11698
11699    /* report new link params and remember the state for the next time */
11700    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11701
11702    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11703                     &cur_data.link_report_flags)) {
11704        if_link_state_change(sc->ifnet, LINK_STATE_DOWN);
11705        BLOGI(sc, "NIC Link is Down\n");
11706    } else {
11707        const char *duplex;
11708        const char *flow;
11709
11710        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11711                                   &cur_data.link_report_flags)) {
11712            duplex = "full";
11713        } else {
11714            duplex = "half";
11715        }
11716
11717        /*
11718         * Handle the FC at the end so that only these flags would be
11719         * possibly set. This way we may easily check if there is no FC
11720         * enabled.
11721         */
11722        if (cur_data.link_report_flags) {
11723            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11724                             &cur_data.link_report_flags) &&
11725                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11726                             &cur_data.link_report_flags)) {
11727                flow = "ON - receive & transmit";
11728            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11729                                    &cur_data.link_report_flags) &&
11730                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11731                                     &cur_data.link_report_flags)) {
11732                flow = "ON - receive";
11733            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11734                                     &cur_data.link_report_flags) &&
11735                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11736                                    &cur_data.link_report_flags)) {
11737                flow = "ON - transmit";
11738            } else {
11739                flow = "none"; /* possible? */
11740            }
11741        } else {
11742            flow = "none";
11743        }
11744
11745        if_link_state_change(sc->ifnet, LINK_STATE_UP);
11746        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11747              cur_data.line_speed, duplex, flow);
11748    }
11749}
11750
11751static void
11752bxe_link_report(struct bxe_softc *sc)
11753{
11754    bxe_acquire_phy_lock(sc);
11755    bxe_link_report_locked(sc);
11756    bxe_release_phy_lock(sc);
11757}
11758
11759static void
11760bxe_link_status_update(struct bxe_softc *sc)
11761{
11762    if (sc->state != BXE_STATE_OPEN) {
11763        return;
11764    }
11765
11766    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11767        elink_link_status_update(&sc->link_params, &sc->link_vars);
11768    } else {
11769        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11770                                  ELINK_SUPPORTED_10baseT_Full |
11771                                  ELINK_SUPPORTED_100baseT_Half |
11772                                  ELINK_SUPPORTED_100baseT_Full |
11773                                  ELINK_SUPPORTED_1000baseT_Full |
11774                                  ELINK_SUPPORTED_2500baseX_Full |
11775                                  ELINK_SUPPORTED_10000baseT_Full |
11776                                  ELINK_SUPPORTED_TP |
11777                                  ELINK_SUPPORTED_FIBRE |
11778                                  ELINK_SUPPORTED_Autoneg |
11779                                  ELINK_SUPPORTED_Pause |
11780                                  ELINK_SUPPORTED_Asym_Pause);
11781        sc->port.advertising[0] = sc->port.supported[0];
11782
11783        sc->link_params.sc                = sc;
11784        sc->link_params.port              = SC_PORT(sc);
11785        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11786        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11787        sc->link_params.req_line_speed[0] = SPEED_10000;
11788        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11789        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11790
11791        if (CHIP_REV_IS_FPGA(sc)) {
11792            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11793            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11794            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11795                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11796        } else {
11797            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11798            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11799            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11800                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11801        }
11802
11803        sc->link_vars.link_up = 1;
11804
11805        sc->link_vars.duplex    = DUPLEX_FULL;
11806        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11807
11808        if (IS_PF(sc)) {
11809            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11810            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11811            bxe_link_report(sc);
11812        }
11813    }
11814
11815    if (IS_PF(sc)) {
11816        if (sc->link_vars.link_up) {
11817            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11818        } else {
11819            bxe_stats_handle(sc, STATS_EVENT_STOP);
11820        }
11821        bxe_link_report(sc);
11822    } else {
11823        bxe_link_report(sc);
11824        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11825    }
11826}
11827
11828static int
11829bxe_initial_phy_init(struct bxe_softc *sc,
11830                     int              load_mode)
11831{
11832    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11833    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11834    struct elink_params *lp = &sc->link_params;
11835
11836    bxe_set_requested_fc(sc);
11837
11838    if (CHIP_REV_IS_SLOW(sc)) {
11839        uint32_t bond = CHIP_BOND_ID(sc);
11840        uint32_t feat = 0;
11841
11842        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11843            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11844        } else if (bond & 0x4) {
11845            if (CHIP_IS_E3(sc)) {
11846                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11847            } else {
11848                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11849            }
11850        } else if (bond & 0x8) {
11851            if (CHIP_IS_E3(sc)) {
11852                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11853            } else {
11854                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11855            }
11856        }
11857
11858        /* disable EMAC for E3 and above */
11859        if (bond & 0x2) {
11860            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11861        }
11862
11863        sc->link_params.feature_config_flags |= feat;
11864    }
11865
11866    bxe_acquire_phy_lock(sc);
11867
11868    if (load_mode == LOAD_DIAG) {
11869        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11870        /* Prefer doing PHY loopback at 10G speed, if possible */
11871        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11872            if (lp->speed_cap_mask[cfg_idx] &
11873                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11874                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11875            } else {
11876                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11877            }
11878        }
11879    }
11880
11881    if (load_mode == LOAD_LOOPBACK_EXT) {
11882        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11883    }
11884
11885    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11886
11887    bxe_release_phy_lock(sc);
11888
11889    bxe_calc_fc_adv(sc);
11890
11891    if (sc->link_vars.link_up) {
11892        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11893        bxe_link_report(sc);
11894    }
11895
11896    if (!CHIP_REV_IS_SLOW(sc)) {
11897        bxe_periodic_start(sc);
11898    }
11899
11900    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
11901    return (rc);
11902}
11903
11904/* must be called under IF_ADDR_LOCK */
11905static int
11906bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
11907                         struct ecore_mcast_ramrod_params *p)
11908{
11909    struct ifnet *ifp = sc->ifnet;
11910    int mc_count = 0;
11911    struct ifmultiaddr *ifma;
11912    struct ecore_mcast_list_elem *mc_mac;
11913
11914    TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
11915        if (ifma->ifma_addr->sa_family != AF_LINK) {
11916            continue;
11917        }
11918
11919        mc_count++;
11920    }
11921
11922    ECORE_LIST_INIT(&p->mcast_list);
11923    p->mcast_list_len = 0;
11924
11925    if (!mc_count) {
11926        return (0);
11927    }
11928
11929    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
11930                    (M_NOWAIT | M_ZERO));
11931    if (!mc_mac) {
11932        BLOGE(sc, "Failed to allocate temp mcast list\n");
11933        return (-1);
11934    }
11935    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
11936
11937    TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
11938        if (ifma->ifma_addr->sa_family != AF_LINK) {
11939            continue;
11940        }
11941
11942        mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
11943        ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list);
11944
11945        BLOGD(sc, DBG_LOAD,
11946              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n",
11947              mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2],
11948              mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]);
11949
11950        mc_mac++;
11951    }
11952
11953    p->mcast_list_len = mc_count;
11954
11955    return (0);
11956}
11957
11958static void
11959bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
11960{
11961    struct ecore_mcast_list_elem *mc_mac =
11962        ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
11963                               struct ecore_mcast_list_elem,
11964                               link);
11965
11966    if (mc_mac) {
11967        /* only a single free as all mc_macs are in the same heap array */
11968        free(mc_mac, M_DEVBUF);
11969    }
11970}
11971
11972static int
11973bxe_set_mc_list(struct bxe_softc *sc)
11974{
11975    struct ecore_mcast_ramrod_params rparam = { NULL };
11976    int rc = 0;
11977
11978    rparam.mcast_obj = &sc->mcast_obj;
11979
11980    BXE_MCAST_LOCK(sc);
11981
11982    /* first, clear all configured multicast MACs */
11983    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
11984    if (rc < 0) {
11985        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
11986        BXE_MCAST_UNLOCK(sc);
11987        return (rc);
11988    }
11989
11990    /* configure a new MACs list */
11991    rc = bxe_init_mcast_macs_list(sc, &rparam);
11992    if (rc) {
11993        BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
11994        BXE_MCAST_UNLOCK(sc);
11995        return (rc);
11996    }
11997
11998    /* Now add the new MACs */
11999    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12000    if (rc < 0) {
12001        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12002    }
12003
12004    bxe_free_mcast_macs_list(&rparam);
12005
12006    BXE_MCAST_UNLOCK(sc);
12007
12008    return (rc);
12009}
12010
12011static int
12012bxe_set_uc_list(struct bxe_softc *sc)
12013{
12014    struct ifnet *ifp = sc->ifnet;
12015    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12016    struct ifaddr *ifa;
12017    unsigned long ramrod_flags = 0;
12018    int rc;
12019
12020#if __FreeBSD_version < 800000
12021    IF_ADDR_LOCK(ifp);
12022#else
12023    if_addr_rlock(ifp);
12024#endif
12025
12026    /* first schedule a cleanup up of old configuration */
12027    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12028    if (rc < 0) {
12029        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12030#if __FreeBSD_version < 800000
12031        IF_ADDR_UNLOCK(ifp);
12032#else
12033        if_addr_runlock(ifp);
12034#endif
12035        return (rc);
12036    }
12037
12038    ifa = ifp->if_addr;
12039    while (ifa) {
12040        if (ifa->ifa_addr->sa_family != AF_LINK) {
12041            ifa = TAILQ_NEXT(ifa, ifa_link);
12042            continue;
12043        }
12044
12045        rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
12046                             mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags);
12047        if (rc == -EEXIST) {
12048            BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12049            /* do not treat adding same MAC as an error */
12050            rc = 0;
12051        } else if (rc < 0) {
12052            BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc);
12053#if __FreeBSD_version < 800000
12054            IF_ADDR_UNLOCK(ifp);
12055#else
12056            if_addr_runlock(ifp);
12057#endif
12058            return (rc);
12059        }
12060
12061        ifa = TAILQ_NEXT(ifa, ifa_link);
12062    }
12063
12064#if __FreeBSD_version < 800000
12065    IF_ADDR_UNLOCK(ifp);
12066#else
12067    if_addr_runlock(ifp);
12068#endif
12069
12070    /* Execute the pending commands */
12071    bit_set(&ramrod_flags, RAMROD_CONT);
12072    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12073                            ECORE_UC_LIST_MAC, &ramrod_flags));
12074}
12075
12076static void
12077bxe_set_rx_mode(struct bxe_softc *sc)
12078{
12079    struct ifnet *ifp = sc->ifnet;
12080    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12081
12082    if (sc->state != BXE_STATE_OPEN) {
12083        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12084        return;
12085    }
12086
12087    BLOGD(sc, DBG_SP, "ifp->if_flags=0x%x\n", ifp->if_flags);
12088
12089    if (ifp->if_flags & IFF_PROMISC) {
12090        rx_mode = BXE_RX_MODE_PROMISC;
12091    } else if ((ifp->if_flags & IFF_ALLMULTI) ||
12092               ((ifp->if_amcount > BXE_MAX_MULTICAST) &&
12093                CHIP_IS_E1(sc))) {
12094        rx_mode = BXE_RX_MODE_ALLMULTI;
12095    } else {
12096        if (IS_PF(sc)) {
12097            /* some multicasts */
12098            if (bxe_set_mc_list(sc) < 0) {
12099                rx_mode = BXE_RX_MODE_ALLMULTI;
12100            }
12101            if (bxe_set_uc_list(sc) < 0) {
12102                rx_mode = BXE_RX_MODE_PROMISC;
12103            }
12104        }
12105    }
12106
12107    sc->rx_mode = rx_mode;
12108
12109    /* schedule the rx_mode command */
12110    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12111        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12112        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12113        return;
12114    }
12115
12116    if (IS_PF(sc)) {
12117        bxe_set_storm_rx_mode(sc);
12118    }
12119}
12120
12121
12122/* update flags in shmem */
12123static void
12124bxe_update_drv_flags(struct bxe_softc *sc,
12125                     uint32_t         flags,
12126                     uint32_t         set)
12127{
12128    uint32_t drv_flags;
12129
12130    if (SHMEM2_HAS(sc, drv_flags)) {
12131        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12132        drv_flags = SHMEM2_RD(sc, drv_flags);
12133
12134        if (set) {
12135            SET_FLAGS(drv_flags, flags);
12136        } else {
12137            RESET_FLAGS(drv_flags, flags);
12138        }
12139
12140        SHMEM2_WR(sc, drv_flags, drv_flags);
12141        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12142
12143        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12144    }
12145}
12146
12147/* periodic timer callout routine, only runs when the interface is up */
12148
12149static void
12150bxe_periodic_callout_func(void *xsc)
12151{
12152    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12153    struct bxe_fastpath *fp;
12154    uint16_t tx_bd_avail;
12155    int i;
12156
12157    if (!BXE_CORE_TRYLOCK(sc)) {
12158        /* just bail and try again next time */
12159
12160        if ((sc->state == BXE_STATE_OPEN) &&
12161            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12162            /* schedule the next periodic callout */
12163            callout_reset(&sc->periodic_callout, hz,
12164                          bxe_periodic_callout_func, sc);
12165        }
12166
12167        return;
12168    }
12169
12170    if ((sc->state != BXE_STATE_OPEN) ||
12171        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12172        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12173        BXE_CORE_UNLOCK(sc);
12174        return;
12175    }
12176
12177#if __FreeBSD_version >= 800000
12178
12179    FOR_EACH_QUEUE(sc, i) {
12180        fp = &sc->fp[i];
12181
12182        if (BXE_FP_TX_TRYLOCK(fp)) {
12183            struct ifnet *ifp = sc->ifnet;
12184            /*
12185             * If interface was stopped due to unavailable
12186             * bds, try to process some tx completions
12187             */
12188            (void) bxe_txeof(sc, fp);
12189
12190            tx_bd_avail = bxe_tx_avail(sc, fp);
12191            if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) {
12192                bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
12193            }
12194            BXE_FP_TX_UNLOCK(fp);
12195        }
12196    }
12197
12198#else
12199
12200    fp = &sc->fp[0];
12201    if (BXE_FP_TX_TRYLOCK(fp)) {
12202        struct ifnet *ifp = sc->ifnet;
12203        /*
12204         * If interface was stopped due to unavailable
12205         * bds, try to process some tx completions
12206         */
12207        (void) bxe_txeof(sc, fp);
12208
12209        tx_bd_avail = bxe_tx_avail(sc, fp);
12210        if (tx_bd_avail >= BXE_TX_CLEANUP_THRESHOLD) {
12211            bxe_tx_start_locked(sc, ifp, fp);
12212        }
12213
12214        BXE_FP_TX_UNLOCK(fp);
12215    }
12216
12217#endif /* #if __FreeBSD_version >= 800000 */
12218
12219    /* Check for TX timeouts on any fastpath. */
12220    FOR_EACH_QUEUE(sc, i) {
12221        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12222            /* Ruh-Roh, chip was reset! */
12223            break;
12224        }
12225    }
12226
12227    if (!CHIP_REV_IS_SLOW(sc)) {
12228        /*
12229         * This barrier is needed to ensure the ordering between the writing
12230         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12231         * the reading here.
12232         */
12233        mb();
12234        if (sc->port.pmf) {
12235	    bxe_acquire_phy_lock(sc);
12236            elink_period_func(&sc->link_params, &sc->link_vars);
12237	    bxe_release_phy_lock(sc);
12238        }
12239    }
12240
12241    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12242        int mb_idx = SC_FW_MB_IDX(sc);
12243        uint32_t drv_pulse;
12244        uint32_t mcp_pulse;
12245
12246        ++sc->fw_drv_pulse_wr_seq;
12247        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12248
12249        drv_pulse = sc->fw_drv_pulse_wr_seq;
12250        bxe_drv_pulse(sc);
12251
12252        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12253                     MCP_PULSE_SEQ_MASK);
12254
12255        /*
12256         * The delta between driver pulse and mcp response should
12257         * be 1 (before mcp response) or 0 (after mcp response).
12258         */
12259        if ((drv_pulse != mcp_pulse) &&
12260            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12261            /* someone lost a heartbeat... */
12262            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12263                  drv_pulse, mcp_pulse);
12264        }
12265    }
12266
12267    /* state is BXE_STATE_OPEN */
12268    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12269
12270    BXE_CORE_UNLOCK(sc);
12271
12272    if ((sc->state == BXE_STATE_OPEN) &&
12273        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12274        /* schedule the next periodic callout */
12275        callout_reset(&sc->periodic_callout, hz,
12276                      bxe_periodic_callout_func, sc);
12277    }
12278}
12279
12280static void
12281bxe_periodic_start(struct bxe_softc *sc)
12282{
12283    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12284    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12285}
12286
12287static void
12288bxe_periodic_stop(struct bxe_softc *sc)
12289{
12290    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12291    callout_drain(&sc->periodic_callout);
12292}
12293
12294/* start the controller */
12295static __noinline int
12296bxe_nic_load(struct bxe_softc *sc,
12297             int              load_mode)
12298{
12299    uint32_t val;
12300    int load_code = 0;
12301    int i, rc = 0;
12302
12303    BXE_CORE_LOCK_ASSERT(sc);
12304
12305    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12306
12307    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12308
12309    if (IS_PF(sc)) {
12310        /* must be called before memory allocation and HW init */
12311        bxe_ilt_set_info(sc);
12312    }
12313
12314    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12315
12316    bxe_set_fp_rx_buf_size(sc);
12317
12318    if (bxe_alloc_fp_buffers(sc) != 0) {
12319        BLOGE(sc, "Failed to allocate fastpath memory\n");
12320        sc->state = BXE_STATE_CLOSED;
12321        rc = ENOMEM;
12322        goto bxe_nic_load_error0;
12323    }
12324
12325    if (bxe_alloc_mem(sc) != 0) {
12326        sc->state = BXE_STATE_CLOSED;
12327        rc = ENOMEM;
12328        goto bxe_nic_load_error0;
12329    }
12330
12331    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12332        sc->state = BXE_STATE_CLOSED;
12333        rc = ENOMEM;
12334        goto bxe_nic_load_error0;
12335    }
12336
12337    if (IS_PF(sc)) {
12338        /* set pf load just before approaching the MCP */
12339        bxe_set_pf_load(sc);
12340
12341        /* if MCP exists send load request and analyze response */
12342        if (!BXE_NOMCP(sc)) {
12343            /* attempt to load pf */
12344            if (bxe_nic_load_request(sc, &load_code) != 0) {
12345                sc->state = BXE_STATE_CLOSED;
12346                rc = ENXIO;
12347                goto bxe_nic_load_error1;
12348            }
12349
12350            /* what did the MCP say? */
12351            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12352                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12353                sc->state = BXE_STATE_CLOSED;
12354                rc = ENXIO;
12355                goto bxe_nic_load_error2;
12356            }
12357        } else {
12358            BLOGI(sc, "Device has no MCP!\n");
12359            load_code = bxe_nic_load_no_mcp(sc);
12360        }
12361
12362        /* mark PMF if applicable */
12363        bxe_nic_load_pmf(sc, load_code);
12364
12365        /* Init Function state controlling object */
12366        bxe_init_func_obj(sc);
12367
12368        /* Initialize HW */
12369        if (bxe_init_hw(sc, load_code) != 0) {
12370            BLOGE(sc, "HW init failed\n");
12371            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12372            sc->state = BXE_STATE_CLOSED;
12373            rc = ENXIO;
12374            goto bxe_nic_load_error2;
12375        }
12376    }
12377
12378    /* set ALWAYS_ALIVE bit in shmem */
12379    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12380    bxe_drv_pulse(sc);
12381    sc->flags |= BXE_NO_PULSE;
12382
12383    /* attach interrupts */
12384    if (bxe_interrupt_attach(sc) != 0) {
12385        sc->state = BXE_STATE_CLOSED;
12386        rc = ENXIO;
12387        goto bxe_nic_load_error2;
12388    }
12389
12390    bxe_nic_init(sc, load_code);
12391
12392    /* Init per-function objects */
12393    if (IS_PF(sc)) {
12394        bxe_init_objs(sc);
12395        // XXX bxe_iov_nic_init(sc);
12396
12397        /* set AFEX default VLAN tag to an invalid value */
12398        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12399        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12400
12401        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12402        rc = bxe_func_start(sc);
12403        if (rc) {
12404            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12405            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12406            sc->state = BXE_STATE_ERROR;
12407            goto bxe_nic_load_error3;
12408        }
12409
12410        /* send LOAD_DONE command to MCP */
12411        if (!BXE_NOMCP(sc)) {
12412            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12413            if (!load_code) {
12414                BLOGE(sc, "MCP response failure, aborting\n");
12415                sc->state = BXE_STATE_ERROR;
12416                rc = ENXIO;
12417                goto bxe_nic_load_error3;
12418            }
12419        }
12420
12421        rc = bxe_setup_leading(sc);
12422        if (rc) {
12423            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12424            sc->state = BXE_STATE_ERROR;
12425            goto bxe_nic_load_error3;
12426        }
12427
12428        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12429            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12430            if (rc) {
12431                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12432                sc->state = BXE_STATE_ERROR;
12433                goto bxe_nic_load_error3;
12434            }
12435        }
12436
12437        rc = bxe_init_rss_pf(sc);
12438        if (rc) {
12439            BLOGE(sc, "PF RSS init failed\n");
12440            sc->state = BXE_STATE_ERROR;
12441            goto bxe_nic_load_error3;
12442        }
12443    }
12444    /* XXX VF */
12445
12446    /* now when Clients are configured we are ready to work */
12447    sc->state = BXE_STATE_OPEN;
12448
12449    /* Configure a ucast MAC */
12450    if (IS_PF(sc)) {
12451        rc = bxe_set_eth_mac(sc, TRUE);
12452    }
12453    if (rc) {
12454        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12455        sc->state = BXE_STATE_ERROR;
12456        goto bxe_nic_load_error3;
12457    }
12458
12459    if (sc->port.pmf) {
12460        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12461        if (rc) {
12462            sc->state = BXE_STATE_ERROR;
12463            goto bxe_nic_load_error3;
12464        }
12465    }
12466
12467    sc->link_params.feature_config_flags &=
12468        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12469
12470    /* start fast path */
12471
12472    /* Initialize Rx filter */
12473    bxe_set_rx_mode(sc);
12474
12475    /* start the Tx */
12476    switch (/* XXX load_mode */LOAD_OPEN) {
12477    case LOAD_NORMAL:
12478    case LOAD_OPEN:
12479        break;
12480
12481    case LOAD_DIAG:
12482    case LOAD_LOOPBACK_EXT:
12483        sc->state = BXE_STATE_DIAG;
12484        break;
12485
12486    default:
12487        break;
12488    }
12489
12490    if (sc->port.pmf) {
12491        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12492    } else {
12493        bxe_link_status_update(sc);
12494    }
12495
12496    /* start the periodic timer callout */
12497    bxe_periodic_start(sc);
12498
12499    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12500        /* mark driver is loaded in shmem2 */
12501        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12502        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12503                  (val |
12504                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12505                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12506    }
12507
12508    /* wait for all pending SP commands to complete */
12509    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12510        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12511        bxe_periodic_stop(sc);
12512        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12513        return (ENXIO);
12514    }
12515
12516    /* Tell the stack the driver is running! */
12517    sc->ifnet->if_drv_flags = IFF_DRV_RUNNING;
12518
12519    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12520
12521    return (0);
12522
12523bxe_nic_load_error3:
12524
12525    if (IS_PF(sc)) {
12526        bxe_int_disable_sync(sc, 1);
12527
12528        /* clean out queued objects */
12529        bxe_squeeze_objects(sc);
12530    }
12531
12532    bxe_interrupt_detach(sc);
12533
12534bxe_nic_load_error2:
12535
12536    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12537        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12538        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12539    }
12540
12541    sc->port.pmf = 0;
12542
12543bxe_nic_load_error1:
12544
12545    /* clear pf_load status, as it was already set */
12546    if (IS_PF(sc)) {
12547        bxe_clear_pf_load(sc);
12548    }
12549
12550bxe_nic_load_error0:
12551
12552    bxe_free_fw_stats_mem(sc);
12553    bxe_free_fp_buffers(sc);
12554    bxe_free_mem(sc);
12555
12556    return (rc);
12557}
12558
12559static int
12560bxe_init_locked(struct bxe_softc *sc)
12561{
12562    int other_engine = SC_PATH(sc) ? 0 : 1;
12563    uint8_t other_load_status, load_status;
12564    uint8_t global = FALSE;
12565    int rc;
12566
12567    BXE_CORE_LOCK_ASSERT(sc);
12568
12569    /* check if the driver is already running */
12570    if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) {
12571        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12572        return (0);
12573    }
12574
12575    bxe_set_power_state(sc, PCI_PM_D0);
12576
12577    /*
12578     * If parity occurred during the unload, then attentions and/or
12579     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12580     * loaded on the current engine to complete the recovery. Parity recovery
12581     * is only relevant for PF driver.
12582     */
12583    if (IS_PF(sc)) {
12584        other_load_status = bxe_get_load_status(sc, other_engine);
12585        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12586
12587        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12588            bxe_chk_parity_attn(sc, &global, TRUE)) {
12589            do {
12590                /*
12591                 * If there are attentions and they are in global blocks, set
12592                 * the GLOBAL_RESET bit regardless whether it will be this
12593                 * function that will complete the recovery or not.
12594                 */
12595                if (global) {
12596                    bxe_set_reset_global(sc);
12597                }
12598
12599                /*
12600                 * Only the first function on the current engine should try
12601                 * to recover in open. In case of attentions in global blocks
12602                 * only the first in the chip should try to recover.
12603                 */
12604                if ((!load_status && (!global || !other_load_status)) &&
12605                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12606                    BLOGI(sc, "Recovered during init\n");
12607                    break;
12608                }
12609
12610                /* recovery has failed... */
12611                bxe_set_power_state(sc, PCI_PM_D3hot);
12612                sc->recovery_state = BXE_RECOVERY_FAILED;
12613
12614                BLOGE(sc, "Recovery flow hasn't properly "
12615                          "completed yet, try again later. "
12616                          "If you still see this message after a "
12617                          "few retries then power cycle is required.\n");
12618
12619                rc = ENXIO;
12620                goto bxe_init_locked_done;
12621            } while (0);
12622        }
12623    }
12624
12625    sc->recovery_state = BXE_RECOVERY_DONE;
12626
12627    rc = bxe_nic_load(sc, LOAD_OPEN);
12628
12629bxe_init_locked_done:
12630
12631    if (rc) {
12632        /* Tell the stack the driver is NOT running! */
12633        BLOGE(sc, "Initialization failed, "
12634                  "stack notified driver is NOT running!\n");
12635        sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
12636    }
12637
12638    return (rc);
12639}
12640
12641static int
12642bxe_stop_locked(struct bxe_softc *sc)
12643{
12644    BXE_CORE_LOCK_ASSERT(sc);
12645    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12646}
12647
12648/*
12649 * Handles controller initialization when called from an unlocked routine.
12650 * ifconfig calls this function.
12651 *
12652 * Returns:
12653 *   void
12654 */
12655static void
12656bxe_init(void *xsc)
12657{
12658    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12659
12660    BXE_CORE_LOCK(sc);
12661    bxe_init_locked(sc);
12662    BXE_CORE_UNLOCK(sc);
12663}
12664
12665static int
12666bxe_init_ifnet(struct bxe_softc *sc)
12667{
12668    struct ifnet *ifp;
12669
12670    /* ifconfig entrypoint for media type/status reporting */
12671    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12672                 bxe_ifmedia_update,
12673                 bxe_ifmedia_status);
12674
12675    /* set the default interface values */
12676    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12677    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12678    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12679
12680    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12681
12682    /* allocate the ifnet structure */
12683    if ((ifp = if_alloc(IFT_ETHER)) == NULL) {
12684        BLOGE(sc, "Interface allocation failed!\n");
12685        return (ENXIO);
12686    }
12687
12688    ifp->if_softc = sc;
12689    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12690    ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
12691    ifp->if_ioctl = bxe_ioctl;
12692    ifp->if_start = bxe_tx_start;
12693#if __FreeBSD_version >= 800000
12694    ifp->if_transmit = bxe_tx_mq_start;
12695    ifp->if_qflush = bxe_mq_flush;
12696#endif
12697#ifdef FreeBSD8_0
12698    ifp->if_timer = 0;
12699#endif
12700    ifp->if_init = bxe_init;
12701    ifp->if_mtu = sc->mtu;
12702    ifp->if_hwassist = (CSUM_IP       |
12703                        CSUM_TCP      |
12704                        CSUM_UDP      |
12705                        CSUM_TSO      |
12706                        CSUM_TCP_IPV6 |
12707                        CSUM_UDP_IPV6);
12708    ifp->if_capabilities =
12709#if __FreeBSD_version < 700000
12710        (IFCAP_VLAN_MTU       |
12711         IFCAP_VLAN_HWTAGGING |
12712         IFCAP_HWCSUM         |
12713         IFCAP_JUMBO_MTU      |
12714         IFCAP_LRO);
12715#else
12716        (IFCAP_VLAN_MTU       |
12717         IFCAP_VLAN_HWTAGGING |
12718         IFCAP_VLAN_HWTSO     |
12719         IFCAP_VLAN_HWFILTER  |
12720         IFCAP_VLAN_HWCSUM    |
12721         IFCAP_HWCSUM         |
12722         IFCAP_JUMBO_MTU      |
12723         IFCAP_LRO            |
12724         IFCAP_TSO4           |
12725         IFCAP_TSO6           |
12726         IFCAP_WOL_MAGIC);
12727#endif
12728    ifp->if_capenable = ifp->if_capabilities;
12729    ifp->if_capenable &= ~IFCAP_WOL_MAGIC; /* XXX not yet... */
12730#if __FreeBSD_version < 1000025
12731    ifp->if_baudrate = 1000000000;
12732#else
12733    if_initbaudrate(ifp, IF_Gbps(10));
12734#endif
12735    ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size;
12736
12737    IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
12738    IFQ_SET_READY(&ifp->if_snd);
12739
12740    sc->ifnet = ifp;
12741
12742    /* attach to the Ethernet interface list */
12743    ether_ifattach(ifp, sc->link_params.mac_addr);
12744
12745    return (0);
12746}
12747
12748static void
12749bxe_deallocate_bars(struct bxe_softc *sc)
12750{
12751    int i;
12752
12753    for (i = 0; i < MAX_BARS; i++) {
12754        if (sc->bar[i].resource != NULL) {
12755            bus_release_resource(sc->dev,
12756                                 SYS_RES_MEMORY,
12757                                 sc->bar[i].rid,
12758                                 sc->bar[i].resource);
12759            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
12760                  i, PCIR_BAR(i));
12761        }
12762    }
12763}
12764
12765static int
12766bxe_allocate_bars(struct bxe_softc *sc)
12767{
12768    u_int flags;
12769    int i;
12770
12771    memset(sc->bar, 0, sizeof(sc->bar));
12772
12773    for (i = 0; i < MAX_BARS; i++) {
12774
12775        /* memory resources reside at BARs 0, 2, 4 */
12776        /* Run `pciconf -lb` to see mappings */
12777        if ((i != 0) && (i != 2) && (i != 4)) {
12778            continue;
12779        }
12780
12781        sc->bar[i].rid = PCIR_BAR(i);
12782
12783        flags = RF_ACTIVE;
12784        if (i == 0) {
12785            flags |= RF_SHAREABLE;
12786        }
12787
12788        if ((sc->bar[i].resource =
12789             bus_alloc_resource_any(sc->dev,
12790                                    SYS_RES_MEMORY,
12791                                    &sc->bar[i].rid,
12792                                    flags)) == NULL) {
12793            return (0);
12794        }
12795
12796        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
12797        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
12798        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
12799
12800        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n",
12801              i, PCIR_BAR(i),
12802              (void *)rman_get_start(sc->bar[i].resource),
12803              (void *)rman_get_end(sc->bar[i].resource),
12804              rman_get_size(sc->bar[i].resource),
12805              (void *)sc->bar[i].kva);
12806    }
12807
12808    return (0);
12809}
12810
12811static void
12812bxe_get_function_num(struct bxe_softc *sc)
12813{
12814    uint32_t val = 0;
12815
12816    /*
12817     * Read the ME register to get the function number. The ME register
12818     * holds the relative-function number and absolute-function number. The
12819     * absolute-function number appears only in E2 and above. Before that
12820     * these bits always contained zero, therefore we cannot blindly use them.
12821     */
12822
12823    val = REG_RD(sc, BAR_ME_REGISTER);
12824
12825    sc->pfunc_rel =
12826        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
12827    sc->path_id =
12828        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
12829
12830    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
12831        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
12832    } else {
12833        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
12834    }
12835
12836    BLOGD(sc, DBG_LOAD,
12837          "Relative function %d, Absolute function %d, Path %d\n",
12838          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
12839}
12840
12841static uint32_t
12842bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
12843{
12844    uint32_t shmem2_size;
12845    uint32_t offset;
12846    uint32_t mf_cfg_offset_value;
12847
12848    /* Non 57712 */
12849    offset = (SHMEM_RD(sc, func_mb) +
12850              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
12851
12852    /* 57712 plus */
12853    if (sc->devinfo.shmem2_base != 0) {
12854        shmem2_size = SHMEM2_RD(sc, size);
12855        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
12856            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
12857            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
12858                offset = mf_cfg_offset_value;
12859            }
12860        }
12861    }
12862
12863    return (offset);
12864}
12865
12866static uint32_t
12867bxe_pcie_capability_read(struct bxe_softc *sc,
12868                         int    reg,
12869                         int    width)
12870{
12871    int pcie_reg;
12872
12873    /* ensure PCIe capability is enabled */
12874    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
12875        if (pcie_reg != 0) {
12876            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
12877            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
12878        }
12879    }
12880
12881    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
12882
12883    return (0);
12884}
12885
12886static uint8_t
12887bxe_is_pcie_pending(struct bxe_softc *sc)
12888{
12889    return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) &
12890            PCIM_EXP_STA_TRANSACTION_PND);
12891}
12892
12893/*
12894 * Walk the PCI capabiites list for the device to find what features are
12895 * supported. These capabilites may be enabled/disabled by firmware so it's
12896 * best to walk the list rather than make assumptions.
12897 */
12898static void
12899bxe_probe_pci_caps(struct bxe_softc *sc)
12900{
12901    uint16_t link_status;
12902    int reg;
12903
12904    /* check if PCI Power Management is enabled */
12905    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
12906        if (reg != 0) {
12907            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
12908
12909            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
12910            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
12911        }
12912    }
12913
12914    link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2);
12915
12916    /* handle PCIe 2.0 workarounds for 57710 */
12917    if (CHIP_IS_E1(sc)) {
12918        /* workaround for 57710 errata E4_57710_27462 */
12919        sc->devinfo.pcie_link_speed =
12920            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
12921
12922        /* workaround for 57710 errata E4_57710_27488 */
12923        sc->devinfo.pcie_link_width =
12924            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12925        if (sc->devinfo.pcie_link_speed > 1) {
12926            sc->devinfo.pcie_link_width =
12927                ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1;
12928        }
12929    } else {
12930        sc->devinfo.pcie_link_speed =
12931            (link_status & PCIM_LINK_STA_SPEED);
12932        sc->devinfo.pcie_link_width =
12933            ((link_status & PCIM_LINK_STA_WIDTH) >> 4);
12934    }
12935
12936    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
12937          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
12938
12939    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
12940    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
12941
12942    /* check if MSI capability is enabled */
12943    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
12944        if (reg != 0) {
12945            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
12946
12947            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
12948            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
12949        }
12950    }
12951
12952    /* check if MSI-X capability is enabled */
12953    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
12954        if (reg != 0) {
12955            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
12956
12957            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
12958            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
12959        }
12960    }
12961}
12962
12963static int
12964bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
12965{
12966    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
12967    uint32_t val;
12968
12969    /* get the outer vlan if we're in switch-dependent mode */
12970
12971    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
12972    mf_info->ext_id = (uint16_t)val;
12973
12974    mf_info->multi_vnics_mode = 1;
12975
12976    if (!VALID_OVLAN(mf_info->ext_id)) {
12977        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
12978        return (1);
12979    }
12980
12981    /* get the capabilities */
12982    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
12983        FUNC_MF_CFG_PROTOCOL_ISCSI) {
12984        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
12985    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
12986               FUNC_MF_CFG_PROTOCOL_FCOE) {
12987        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
12988    } else {
12989        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
12990    }
12991
12992    mf_info->vnics_per_port =
12993        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
12994
12995    return (0);
12996}
12997
12998static uint32_t
12999bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13000{
13001    uint32_t retval = 0;
13002    uint32_t val;
13003
13004    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13005
13006    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13007        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13008            retval |= MF_PROTO_SUPPORT_ETHERNET;
13009        }
13010        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13011            retval |= MF_PROTO_SUPPORT_ISCSI;
13012        }
13013        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13014            retval |= MF_PROTO_SUPPORT_FCOE;
13015        }
13016    }
13017
13018    return (retval);
13019}
13020
13021static int
13022bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13023{
13024    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13025    uint32_t val;
13026
13027    /*
13028     * There is no outer vlan if we're in switch-independent mode.
13029     * If the mac is valid then assume multi-function.
13030     */
13031
13032    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13033
13034    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13035
13036    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13037
13038    mf_info->vnics_per_port =
13039        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13040
13041    return (0);
13042}
13043
13044static int
13045bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13046{
13047    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13048    uint32_t e1hov_tag;
13049    uint32_t func_config;
13050    uint32_t niv_config;
13051
13052    mf_info->multi_vnics_mode = 1;
13053
13054    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13055    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13056    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13057
13058    mf_info->ext_id =
13059        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13060                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13061
13062    mf_info->default_vlan =
13063        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13064                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13065
13066    mf_info->niv_allowed_priorities =
13067        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13068                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13069
13070    mf_info->niv_default_cos =
13071        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13072                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13073
13074    mf_info->afex_vlan_mode =
13075        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13076         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13077
13078    mf_info->niv_mba_enabled =
13079        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13080         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13081
13082    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13083
13084    mf_info->vnics_per_port =
13085        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13086
13087    return (0);
13088}
13089
13090static int
13091bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13092{
13093    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13094    uint32_t mf_cfg1;
13095    uint32_t mf_cfg2;
13096    uint32_t ovlan1;
13097    uint32_t ovlan2;
13098    uint8_t i, j;
13099
13100    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13101          SC_PORT(sc));
13102    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13103          mf_info->mf_config[SC_VN(sc)]);
13104    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13105          mf_info->multi_vnics_mode);
13106    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13107          mf_info->vnics_per_port);
13108    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13109          mf_info->ext_id);
13110    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13111          mf_info->min_bw[0], mf_info->min_bw[1],
13112          mf_info->min_bw[2], mf_info->min_bw[3]);
13113    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13114          mf_info->max_bw[0], mf_info->max_bw[1],
13115          mf_info->max_bw[2], mf_info->max_bw[3]);
13116    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13117          sc->mac_addr_str);
13118
13119    /* various MF mode sanity checks... */
13120
13121    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13122        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13123              SC_PORT(sc));
13124        return (1);
13125    }
13126
13127    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13128        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13129              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13130        return (1);
13131    }
13132
13133    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13134        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13135        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13136            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13137                  SC_VN(sc), OVLAN(sc));
13138            return (1);
13139        }
13140
13141        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13142            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13143                  mf_info->multi_vnics_mode, OVLAN(sc));
13144            return (1);
13145        }
13146
13147        /*
13148         * Verify all functions are either MF or SF mode. If MF, make sure
13149         * sure that all non-hidden functions have a valid ovlan. If SF,
13150         * make sure that all non-hidden functions have an invalid ovlan.
13151         */
13152        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13153            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13154            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13155            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13156                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13157                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13158                BLOGE(sc, "mf_mode=SD function %d MF config "
13159                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13160                      i, mf_info->multi_vnics_mode, ovlan1);
13161                return (1);
13162            }
13163        }
13164
13165        /* Verify all funcs on the same port each have a different ovlan. */
13166        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13167            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13168            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13169            /* iterate from the next function on the port to the max func */
13170            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13171                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13172                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13173                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13174                    VALID_OVLAN(ovlan1) &&
13175                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13176                    VALID_OVLAN(ovlan2) &&
13177                    (ovlan1 == ovlan2)) {
13178                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13179                              "have the same ovlan (%d)\n",
13180                          i, j, ovlan1);
13181                    return (1);
13182                }
13183            }
13184        }
13185    } /* MULTI_FUNCTION_SD */
13186
13187    return (0);
13188}
13189
13190static int
13191bxe_get_mf_cfg_info(struct bxe_softc *sc)
13192{
13193    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13194    uint32_t val, mac_upper;
13195    uint8_t i, vnic;
13196
13197    /* initialize mf_info defaults */
13198    mf_info->vnics_per_port   = 1;
13199    mf_info->multi_vnics_mode = FALSE;
13200    mf_info->path_has_ovlan   = FALSE;
13201    mf_info->mf_mode          = SINGLE_FUNCTION;
13202
13203    if (!CHIP_IS_MF_CAP(sc)) {
13204        return (0);
13205    }
13206
13207    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13208        BLOGE(sc, "Invalid mf_cfg_base!\n");
13209        return (1);
13210    }
13211
13212    /* get the MF mode (switch dependent / independent / single-function) */
13213
13214    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13215
13216    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13217    {
13218    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13219
13220        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13221
13222        /* check for legal upper mac bytes */
13223        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13224            mf_info->mf_mode = MULTI_FUNCTION_SI;
13225        } else {
13226            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13227        }
13228
13229        break;
13230
13231    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13232    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13233
13234        /* get outer vlan configuration */
13235        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13236
13237        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13238            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13239            mf_info->mf_mode = MULTI_FUNCTION_SD;
13240        } else {
13241            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13242        }
13243
13244        break;
13245
13246    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13247
13248        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13249        return (0);
13250
13251    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13252
13253        /*
13254         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13255         * and the MAC address is valid.
13256         */
13257        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13258
13259        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13260            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13261            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13262        } else {
13263            BLOGE(sc, "Invalid config for AFEX mode\n");
13264        }
13265
13266        break;
13267
13268    default:
13269
13270        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13271              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13272
13273        return (1);
13274    }
13275
13276    /* set path mf_mode (which could be different than function mf_mode) */
13277    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13278        mf_info->path_has_ovlan = TRUE;
13279    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13280        /*
13281         * Decide on path multi vnics mode. If we're not in MF mode and in
13282         * 4-port mode, this is good enough to check vnic-0 of the other port
13283         * on the same path
13284         */
13285        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13286            uint8_t other_port = !(PORT_ID(sc) & 1);
13287            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13288
13289            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13290
13291            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13292        }
13293    }
13294
13295    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13296        /* invalid MF config */
13297        if (SC_VN(sc) >= 1) {
13298            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13299            return (1);
13300        }
13301
13302        return (0);
13303    }
13304
13305    /* get the MF configuration */
13306    mf_info->mf_config[SC_VN(sc)] =
13307        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13308
13309    switch(mf_info->mf_mode)
13310    {
13311    case MULTI_FUNCTION_SD:
13312
13313        bxe_get_shmem_mf_cfg_info_sd(sc);
13314        break;
13315
13316    case MULTI_FUNCTION_SI:
13317
13318        bxe_get_shmem_mf_cfg_info_si(sc);
13319        break;
13320
13321    case MULTI_FUNCTION_AFEX:
13322
13323        bxe_get_shmem_mf_cfg_info_niv(sc);
13324        break;
13325
13326    default:
13327
13328        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13329              mf_info->mf_mode);
13330        return (1);
13331    }
13332
13333    /* get the congestion management parameters */
13334
13335    vnic = 0;
13336    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13337        /* get min/max bw */
13338        val = MFCFG_RD(sc, func_mf_config[i].config);
13339        mf_info->min_bw[vnic] =
13340            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13341        mf_info->max_bw[vnic] =
13342            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13343        vnic++;
13344    }
13345
13346    return (bxe_check_valid_mf_cfg(sc));
13347}
13348
13349static int
13350bxe_get_shmem_info(struct bxe_softc *sc)
13351{
13352    int port;
13353    uint32_t mac_hi, mac_lo, val;
13354
13355    port = SC_PORT(sc);
13356    mac_hi = mac_lo = 0;
13357
13358    sc->link_params.sc   = sc;
13359    sc->link_params.port = port;
13360
13361    /* get the hardware config info */
13362    sc->devinfo.hw_config =
13363        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13364    sc->devinfo.hw_config2 =
13365        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13366
13367    sc->link_params.hw_led_mode =
13368        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13369         SHARED_HW_CFG_LED_MODE_SHIFT);
13370
13371    /* get the port feature config */
13372    sc->port.config =
13373        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13374
13375    /* get the link params */
13376    sc->link_params.speed_cap_mask[0] =
13377        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13378    sc->link_params.speed_cap_mask[1] =
13379        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13380
13381    /* get the lane config */
13382    sc->link_params.lane_config =
13383        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13384
13385    /* get the link config */
13386    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13387    sc->port.link_config[ELINK_INT_PHY] = val;
13388    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13389    sc->port.link_config[ELINK_EXT_PHY1] =
13390        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13391
13392    /* get the override preemphasis flag and enable it or turn it off */
13393    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13394    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13395        sc->link_params.feature_config_flags |=
13396            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13397    } else {
13398        sc->link_params.feature_config_flags &=
13399            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13400    }
13401
13402    /* get the initial value of the link params */
13403    sc->link_params.multi_phy_config =
13404        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13405
13406    /* get external phy info */
13407    sc->port.ext_phy_config =
13408        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13409
13410    /* get the multifunction configuration */
13411    bxe_get_mf_cfg_info(sc);
13412
13413    /* get the mac address */
13414    if (IS_MF(sc)) {
13415        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13416        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13417    } else {
13418        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13419        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13420    }
13421
13422    if ((mac_lo == 0) && (mac_hi == 0)) {
13423        *sc->mac_addr_str = 0;
13424        BLOGE(sc, "No Ethernet address programmed!\n");
13425    } else {
13426        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13427        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13428        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13429        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13430        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13431        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13432        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13433                 "%02x:%02x:%02x:%02x:%02x:%02x",
13434                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13435                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13436                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13437        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13438    }
13439
13440    return (0);
13441}
13442
13443static void
13444bxe_get_tunable_params(struct bxe_softc *sc)
13445{
13446    /* sanity checks */
13447
13448    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13449        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13450        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13451        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13452        bxe_interrupt_mode = INTR_MODE_MSIX;
13453    }
13454
13455    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13456        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13457        bxe_queue_count = 0;
13458    }
13459
13460    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13461        if (bxe_max_rx_bufs == 0) {
13462            bxe_max_rx_bufs = RX_BD_USABLE;
13463        } else {
13464            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13465            bxe_max_rx_bufs = 2048;
13466        }
13467    }
13468
13469    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13470        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13471        bxe_hc_rx_ticks = 25;
13472    }
13473
13474    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13475        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13476        bxe_hc_tx_ticks = 50;
13477    }
13478
13479    if (bxe_max_aggregation_size == 0) {
13480        bxe_max_aggregation_size = TPA_AGG_SIZE;
13481    }
13482
13483    if (bxe_max_aggregation_size > 0xffff) {
13484        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13485              bxe_max_aggregation_size);
13486        bxe_max_aggregation_size = TPA_AGG_SIZE;
13487    }
13488
13489    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13490        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13491        bxe_mrrs = -1;
13492    }
13493
13494    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13495        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13496        bxe_autogreeen = 0;
13497    }
13498
13499    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13500        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13501        bxe_udp_rss = 0;
13502    }
13503
13504    /* pull in user settings */
13505
13506    sc->interrupt_mode       = bxe_interrupt_mode;
13507    sc->max_rx_bufs          = bxe_max_rx_bufs;
13508    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13509    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13510    sc->max_aggregation_size = bxe_max_aggregation_size;
13511    sc->mrrs                 = bxe_mrrs;
13512    sc->autogreeen           = bxe_autogreeen;
13513    sc->udp_rss              = bxe_udp_rss;
13514
13515    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13516        sc->num_queues = 1;
13517    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13518        sc->num_queues =
13519            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13520                MAX_RSS_CHAINS);
13521        if (sc->num_queues > mp_ncpus) {
13522            sc->num_queues = mp_ncpus;
13523        }
13524    }
13525
13526    BLOGD(sc, DBG_LOAD,
13527          "User Config: "
13528          "debug=0x%lx "
13529          "interrupt_mode=%d "
13530          "queue_count=%d "
13531          "hc_rx_ticks=%d "
13532          "hc_tx_ticks=%d "
13533          "rx_budget=%d "
13534          "max_aggregation_size=%d "
13535          "mrrs=%d "
13536          "autogreeen=%d "
13537          "udp_rss=%d\n",
13538          bxe_debug,
13539          sc->interrupt_mode,
13540          sc->num_queues,
13541          sc->hc_rx_ticks,
13542          sc->hc_tx_ticks,
13543          bxe_rx_budget,
13544          sc->max_aggregation_size,
13545          sc->mrrs,
13546          sc->autogreeen,
13547          sc->udp_rss);
13548}
13549
13550static int
13551bxe_media_detect(struct bxe_softc *sc)
13552{
13553    int port_type;
13554    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13555
13556    switch (sc->link_params.phy[phy_idx].media_type) {
13557    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13558    case ELINK_ETH_PHY_XFP_FIBER:
13559        BLOGI(sc, "Found 10Gb Fiber media.\n");
13560        sc->media = IFM_10G_SR;
13561        port_type = PORT_FIBRE;
13562        break;
13563    case ELINK_ETH_PHY_SFP_1G_FIBER:
13564        BLOGI(sc, "Found 1Gb Fiber media.\n");
13565        sc->media = IFM_1000_SX;
13566        port_type = PORT_FIBRE;
13567        break;
13568    case ELINK_ETH_PHY_KR:
13569    case ELINK_ETH_PHY_CX4:
13570        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13571        sc->media = IFM_10G_CX4;
13572        port_type = PORT_FIBRE;
13573        break;
13574    case ELINK_ETH_PHY_DA_TWINAX:
13575        BLOGI(sc, "Found 10Gb Twinax media.\n");
13576        sc->media = IFM_10G_TWINAX;
13577        port_type = PORT_DA;
13578        break;
13579    case ELINK_ETH_PHY_BASE_T:
13580        if (sc->link_params.speed_cap_mask[0] &
13581            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13582            BLOGI(sc, "Found 10GBase-T media.\n");
13583            sc->media = IFM_10G_T;
13584            port_type = PORT_TP;
13585        } else {
13586            BLOGI(sc, "Found 1000Base-T media.\n");
13587            sc->media = IFM_1000_T;
13588            port_type = PORT_TP;
13589        }
13590        break;
13591    case ELINK_ETH_PHY_NOT_PRESENT:
13592        BLOGI(sc, "Media not present.\n");
13593        sc->media = 0;
13594        port_type = PORT_OTHER;
13595        break;
13596    case ELINK_ETH_PHY_UNSPECIFIED:
13597    default:
13598        BLOGI(sc, "Unknown media!\n");
13599        sc->media = 0;
13600        port_type = PORT_OTHER;
13601        break;
13602    }
13603    return port_type;
13604}
13605
13606#define GET_FIELD(value, fname)                     \
13607    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13608#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13609#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13610
13611static int
13612bxe_get_igu_cam_info(struct bxe_softc *sc)
13613{
13614    int pfid = SC_FUNC(sc);
13615    int igu_sb_id;
13616    uint32_t val;
13617    uint8_t fid, igu_sb_cnt = 0;
13618
13619    sc->igu_base_sb = 0xff;
13620
13621    if (CHIP_INT_MODE_IS_BC(sc)) {
13622        int vn = SC_VN(sc);
13623        igu_sb_cnt = sc->igu_sb_cnt;
13624        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13625                           FP_SB_MAX_E1x);
13626        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13627                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13628        return (0);
13629    }
13630
13631    /* IGU in normal mode - read CAM */
13632    for (igu_sb_id = 0;
13633         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13634         igu_sb_id++) {
13635        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13636        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13637            continue;
13638        }
13639        fid = IGU_FID(val);
13640        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13641            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13642                continue;
13643            }
13644            if (IGU_VEC(val) == 0) {
13645                /* default status block */
13646                sc->igu_dsb_id = igu_sb_id;
13647            } else {
13648                if (sc->igu_base_sb == 0xff) {
13649                    sc->igu_base_sb = igu_sb_id;
13650                }
13651                igu_sb_cnt++;
13652            }
13653        }
13654    }
13655
13656    /*
13657     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13658     * that number of CAM entries will not be equal to the value advertised in
13659     * PCI. Driver should use the minimal value of both as the actual status
13660     * block count
13661     */
13662    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13663
13664    if (igu_sb_cnt == 0) {
13665        BLOGE(sc, "CAM configuration error\n");
13666        return (-1);
13667    }
13668
13669    return (0);
13670}
13671
13672/*
13673 * Gather various information from the device config space, the device itself,
13674 * shmem, and the user input.
13675 */
13676static int
13677bxe_get_device_info(struct bxe_softc *sc)
13678{
13679    uint32_t val;
13680    int rc;
13681
13682    /* Get the data for the device */
13683    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13684    sc->devinfo.device_id    = pci_get_device(sc->dev);
13685    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13686    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13687
13688    /* get the chip revision (chip metal comes from pci config space) */
13689    sc->devinfo.chip_id     =
13690    sc->link_params.chip_id =
13691        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13692         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13693         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13694         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13695
13696    /* force 57811 according to MISC register */
13697    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
13698        if (CHIP_IS_57810(sc)) {
13699            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13700                                   (sc->devinfo.chip_id & 0x0000ffff));
13701        } else if (CHIP_IS_57810_MF(sc)) {
13702            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13703                                   (sc->devinfo.chip_id & 0x0000ffff));
13704        }
13705        sc->devinfo.chip_id |= 0x1;
13706    }
13707
13708    BLOGD(sc, DBG_LOAD,
13709          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
13710          sc->devinfo.chip_id,
13711          ((sc->devinfo.chip_id >> 16) & 0xffff),
13712          ((sc->devinfo.chip_id >> 12) & 0xf),
13713          ((sc->devinfo.chip_id >>  4) & 0xff),
13714          ((sc->devinfo.chip_id >>  0) & 0xf));
13715
13716    val = (REG_RD(sc, 0x2874) & 0x55);
13717    if ((sc->devinfo.chip_id & 0x1) ||
13718        (CHIP_IS_E1(sc) && val) ||
13719        (CHIP_IS_E1H(sc) && (val == 0x55))) {
13720        sc->flags |= BXE_ONE_PORT_FLAG;
13721        BLOGD(sc, DBG_LOAD, "single port device\n");
13722    }
13723
13724    /* set the doorbell size */
13725    sc->doorbell_size = (1 << BXE_DB_SHIFT);
13726
13727    /* determine whether the device is in 2 port or 4 port mode */
13728    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
13729    if (CHIP_IS_E2E3(sc)) {
13730        /*
13731         * Read port4mode_en_ovwr[0]:
13732         *   If 1, four port mode is in port4mode_en_ovwr[1].
13733         *   If 0, four port mode is in port4mode_en[0].
13734         */
13735        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
13736        if (val & 1) {
13737            val = ((val >> 1) & 1);
13738        } else {
13739            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
13740        }
13741
13742        sc->devinfo.chip_port_mode =
13743            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
13744
13745        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
13746    }
13747
13748    /* get the function and path info for the device */
13749    bxe_get_function_num(sc);
13750
13751    /* get the shared memory base address */
13752    sc->devinfo.shmem_base     =
13753    sc->link_params.shmem_base =
13754        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
13755    sc->devinfo.shmem2_base =
13756        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
13757                                  MISC_REG_GENERIC_CR_0));
13758
13759    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
13760          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
13761
13762    if (!sc->devinfo.shmem_base) {
13763        /* this should ONLY prevent upcoming shmem reads */
13764        BLOGI(sc, "MCP not active\n");
13765        sc->flags |= BXE_NO_MCP_FLAG;
13766        return (0);
13767    }
13768
13769    /* make sure the shared memory contents are valid */
13770    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
13771    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
13772        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
13773        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
13774        return (0);
13775    }
13776    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
13777
13778    /* get the bootcode version */
13779    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
13780    snprintf(sc->devinfo.bc_ver_str,
13781             sizeof(sc->devinfo.bc_ver_str),
13782             "%d.%d.%d",
13783             ((sc->devinfo.bc_ver >> 24) & 0xff),
13784             ((sc->devinfo.bc_ver >> 16) & 0xff),
13785             ((sc->devinfo.bc_ver >>  8) & 0xff));
13786    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
13787
13788    /* get the bootcode shmem address */
13789    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
13790    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
13791
13792    /* clean indirect addresses as they're not used */
13793    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
13794    if (IS_PF(sc)) {
13795        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
13796        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
13797        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
13798        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
13799        if (CHIP_IS_E1x(sc)) {
13800            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
13801            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
13802            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
13803            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
13804        }
13805
13806        /*
13807         * Enable internal target-read (in case we are probed after PF
13808         * FLR). Must be done prior to any BAR read access. Only for
13809         * 57712 and up
13810         */
13811        if (!CHIP_IS_E1x(sc)) {
13812            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13813        }
13814    }
13815
13816    /* get the nvram size */
13817    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
13818    sc->devinfo.flash_size =
13819        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
13820    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
13821
13822    /* get PCI capabilites */
13823    bxe_probe_pci_caps(sc);
13824
13825    bxe_set_power_state(sc, PCI_PM_D0);
13826
13827    /* get various configuration parameters from shmem */
13828    bxe_get_shmem_info(sc);
13829
13830    if (sc->devinfo.pcie_msix_cap_reg != 0) {
13831        val = pci_read_config(sc->dev,
13832                              (sc->devinfo.pcie_msix_cap_reg +
13833                               PCIR_MSIX_CTRL),
13834                              2);
13835        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
13836    } else {
13837        sc->igu_sb_cnt = 1;
13838    }
13839
13840    sc->igu_base_addr = BAR_IGU_INTMEM;
13841
13842    /* initialize IGU parameters */
13843    if (CHIP_IS_E1x(sc)) {
13844        sc->devinfo.int_block = INT_BLOCK_HC;
13845        sc->igu_dsb_id = DEF_SB_IGU_ID;
13846        sc->igu_base_sb = 0;
13847    } else {
13848        sc->devinfo.int_block = INT_BLOCK_IGU;
13849
13850        /* do not allow device reset during IGU info preocessing */
13851        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13852
13853        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
13854
13855        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13856            int tout = 5000;
13857
13858            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
13859
13860            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
13861            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
13862            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
13863
13864            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13865                tout--;
13866                DELAY(1000);
13867            }
13868
13869            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
13870                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
13871                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13872                return (-1);
13873            }
13874        }
13875
13876        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
13877            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
13878            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
13879        } else {
13880            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
13881        }
13882
13883        rc = bxe_get_igu_cam_info(sc);
13884
13885        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
13886
13887        if (rc) {
13888            return (rc);
13889        }
13890    }
13891
13892    /*
13893     * Get base FW non-default (fast path) status block ID. This value is
13894     * used to initialize the fw_sb_id saved on the fp/queue structure to
13895     * determine the id used by the FW.
13896     */
13897    if (CHIP_IS_E1x(sc)) {
13898        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
13899    } else {
13900        /*
13901         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
13902         * the same queue are indicated on the same IGU SB). So we prefer
13903         * FW and IGU SBs to be the same value.
13904         */
13905        sc->base_fw_ndsb = sc->igu_base_sb;
13906    }
13907
13908    BLOGD(sc, DBG_LOAD,
13909          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
13910          sc->igu_dsb_id, sc->igu_base_sb,
13911          sc->igu_sb_cnt, sc->base_fw_ndsb);
13912
13913    elink_phy_probe(&sc->link_params);
13914
13915    return (0);
13916}
13917
13918static void
13919bxe_link_settings_supported(struct bxe_softc *sc,
13920                            uint32_t         switch_cfg)
13921{
13922    uint32_t cfg_size = 0;
13923    uint32_t idx;
13924    uint8_t port = SC_PORT(sc);
13925
13926    /* aggregation of supported attributes of all external phys */
13927    sc->port.supported[0] = 0;
13928    sc->port.supported[1] = 0;
13929
13930    switch (sc->link_params.num_phys) {
13931    case 1:
13932        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
13933        cfg_size = 1;
13934        break;
13935    case 2:
13936        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
13937        cfg_size = 1;
13938        break;
13939    case 3:
13940        if (sc->link_params.multi_phy_config &
13941            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
13942            sc->port.supported[1] =
13943                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13944            sc->port.supported[0] =
13945                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13946        } else {
13947            sc->port.supported[0] =
13948                sc->link_params.phy[ELINK_EXT_PHY1].supported;
13949            sc->port.supported[1] =
13950                sc->link_params.phy[ELINK_EXT_PHY2].supported;
13951        }
13952        cfg_size = 2;
13953        break;
13954    }
13955
13956    if (!(sc->port.supported[0] || sc->port.supported[1])) {
13957        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
13958              SHMEM_RD(sc,
13959                       dev_info.port_hw_config[port].external_phy_config),
13960              SHMEM_RD(sc,
13961                       dev_info.port_hw_config[port].external_phy_config2));
13962        return;
13963    }
13964
13965    if (CHIP_IS_E3(sc))
13966        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
13967    else {
13968        switch (switch_cfg) {
13969        case ELINK_SWITCH_CFG_1G:
13970            sc->port.phy_addr =
13971                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
13972            break;
13973        case ELINK_SWITCH_CFG_10G:
13974            sc->port.phy_addr =
13975                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
13976            break;
13977        default:
13978            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
13979                  sc->port.link_config[0]);
13980            return;
13981        }
13982    }
13983
13984    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
13985
13986    /* mask what we support according to speed_cap_mask per configuration */
13987    for (idx = 0; idx < cfg_size; idx++) {
13988        if (!(sc->link_params.speed_cap_mask[idx] &
13989              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
13990            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
13991        }
13992
13993        if (!(sc->link_params.speed_cap_mask[idx] &
13994              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
13995            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
13996        }
13997
13998        if (!(sc->link_params.speed_cap_mask[idx] &
13999              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14000            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14001        }
14002
14003        if (!(sc->link_params.speed_cap_mask[idx] &
14004              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14005            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14006        }
14007
14008        if (!(sc->link_params.speed_cap_mask[idx] &
14009              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14010            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14011        }
14012
14013        if (!(sc->link_params.speed_cap_mask[idx] &
14014              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14015            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14016        }
14017
14018        if (!(sc->link_params.speed_cap_mask[idx] &
14019              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14020            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14021        }
14022
14023        if (!(sc->link_params.speed_cap_mask[idx] &
14024              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14025            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14026        }
14027    }
14028
14029    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14030          sc->port.supported[0], sc->port.supported[1]);
14031}
14032
14033static void
14034bxe_link_settings_requested(struct bxe_softc *sc)
14035{
14036    uint32_t link_config;
14037    uint32_t idx;
14038    uint32_t cfg_size = 0;
14039
14040    sc->port.advertising[0] = 0;
14041    sc->port.advertising[1] = 0;
14042
14043    switch (sc->link_params.num_phys) {
14044    case 1:
14045    case 2:
14046        cfg_size = 1;
14047        break;
14048    case 3:
14049        cfg_size = 2;
14050        break;
14051    }
14052
14053    for (idx = 0; idx < cfg_size; idx++) {
14054        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14055        link_config = sc->port.link_config[idx];
14056
14057        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14058        case PORT_FEATURE_LINK_SPEED_AUTO:
14059            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14060                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14061                sc->port.advertising[idx] |= sc->port.supported[idx];
14062                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14063                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14064                    sc->port.advertising[idx] |=
14065                        (ELINK_SUPPORTED_100baseT_Half |
14066                         ELINK_SUPPORTED_100baseT_Full);
14067            } else {
14068                /* force 10G, no AN */
14069                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14070                sc->port.advertising[idx] |=
14071                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14072                continue;
14073            }
14074            break;
14075
14076        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14077            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14078                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14079                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14080                                              ADVERTISED_TP);
14081            } else {
14082                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14083                          "speed_cap_mask=0x%08x\n",
14084                      link_config, sc->link_params.speed_cap_mask[idx]);
14085                return;
14086            }
14087            break;
14088
14089        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14090            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14091                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14092                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14093                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14094                                              ADVERTISED_TP);
14095            } else {
14096                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14097                          "speed_cap_mask=0x%08x\n",
14098                      link_config, sc->link_params.speed_cap_mask[idx]);
14099                return;
14100            }
14101            break;
14102
14103        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14104            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14105                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14106                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14107                                              ADVERTISED_TP);
14108            } else {
14109                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14110                          "speed_cap_mask=0x%08x\n",
14111                      link_config, sc->link_params.speed_cap_mask[idx]);
14112                return;
14113            }
14114            break;
14115
14116        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14117            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14118                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14119                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14120                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14121                                              ADVERTISED_TP);
14122            } else {
14123                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14124                          "speed_cap_mask=0x%08x\n",
14125                      link_config, sc->link_params.speed_cap_mask[idx]);
14126                return;
14127            }
14128            break;
14129
14130        case PORT_FEATURE_LINK_SPEED_1G:
14131            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14132                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14133                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14134                                              ADVERTISED_TP);
14135            } else {
14136                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14137                          "speed_cap_mask=0x%08x\n",
14138                      link_config, sc->link_params.speed_cap_mask[idx]);
14139                return;
14140            }
14141            break;
14142
14143        case PORT_FEATURE_LINK_SPEED_2_5G:
14144            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14145                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14146                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14147                                              ADVERTISED_TP);
14148            } else {
14149                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14150                          "speed_cap_mask=0x%08x\n",
14151                      link_config, sc->link_params.speed_cap_mask[idx]);
14152                return;
14153            }
14154            break;
14155
14156        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14157            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14158                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14159                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14160                                              ADVERTISED_FIBRE);
14161            } else {
14162                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14163                          "speed_cap_mask=0x%08x\n",
14164                      link_config, sc->link_params.speed_cap_mask[idx]);
14165                return;
14166            }
14167            break;
14168
14169        case PORT_FEATURE_LINK_SPEED_20G:
14170            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14171            break;
14172
14173        default:
14174            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14175                      "speed_cap_mask=0x%08x\n",
14176                  link_config, sc->link_params.speed_cap_mask[idx]);
14177            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14178            sc->port.advertising[idx] = sc->port.supported[idx];
14179            break;
14180        }
14181
14182        sc->link_params.req_flow_ctrl[idx] =
14183            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14184
14185        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14186            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14187                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14188            } else {
14189                bxe_set_requested_fc(sc);
14190            }
14191        }
14192
14193        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14194                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14195              sc->link_params.req_line_speed[idx],
14196              sc->link_params.req_duplex[idx],
14197              sc->link_params.req_flow_ctrl[idx],
14198              sc->port.advertising[idx]);
14199    }
14200}
14201
14202static void
14203bxe_get_phy_info(struct bxe_softc *sc)
14204{
14205    uint8_t port = SC_PORT(sc);
14206    uint32_t config = sc->port.config;
14207    uint32_t eee_mode;
14208
14209    /* shmem data already read in bxe_get_shmem_info() */
14210
14211    BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14212                        "link_config0=0x%08x\n",
14213               sc->link_params.lane_config,
14214               sc->link_params.speed_cap_mask[0],
14215               sc->port.link_config[0]);
14216
14217    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14218    bxe_link_settings_requested(sc);
14219
14220    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14221        sc->link_params.feature_config_flags |=
14222            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14223    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14224        sc->link_params.feature_config_flags &=
14225            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14226    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14227        sc->link_params.feature_config_flags |=
14228            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14229    }
14230
14231    /* configure link feature according to nvram value */
14232    eee_mode =
14233        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14234          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14235         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14236    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14237        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14238                                    ELINK_EEE_MODE_ENABLE_LPI |
14239                                    ELINK_EEE_MODE_OUTPUT_TIME);
14240    } else {
14241        sc->link_params.eee_mode = 0;
14242    }
14243
14244    /* get the media type */
14245    bxe_media_detect(sc);
14246}
14247
14248static void
14249bxe_get_params(struct bxe_softc *sc)
14250{
14251    /* get user tunable params */
14252    bxe_get_tunable_params(sc);
14253
14254    /* select the RX and TX ring sizes */
14255    sc->tx_ring_size = TX_BD_USABLE;
14256    sc->rx_ring_size = RX_BD_USABLE;
14257
14258    /* XXX disable WoL */
14259    sc->wol = 0;
14260}
14261
14262static void
14263bxe_set_modes_bitmap(struct bxe_softc *sc)
14264{
14265    uint32_t flags = 0;
14266
14267    if (CHIP_REV_IS_FPGA(sc)) {
14268        SET_FLAGS(flags, MODE_FPGA);
14269    } else if (CHIP_REV_IS_EMUL(sc)) {
14270        SET_FLAGS(flags, MODE_EMUL);
14271    } else {
14272        SET_FLAGS(flags, MODE_ASIC);
14273    }
14274
14275    if (CHIP_IS_MODE_4_PORT(sc)) {
14276        SET_FLAGS(flags, MODE_PORT4);
14277    } else {
14278        SET_FLAGS(flags, MODE_PORT2);
14279    }
14280
14281    if (CHIP_IS_E2(sc)) {
14282        SET_FLAGS(flags, MODE_E2);
14283    } else if (CHIP_IS_E3(sc)) {
14284        SET_FLAGS(flags, MODE_E3);
14285        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14286            SET_FLAGS(flags, MODE_E3_A0);
14287        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14288            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14289        }
14290    }
14291
14292    if (IS_MF(sc)) {
14293        SET_FLAGS(flags, MODE_MF);
14294        switch (sc->devinfo.mf_info.mf_mode) {
14295        case MULTI_FUNCTION_SD:
14296            SET_FLAGS(flags, MODE_MF_SD);
14297            break;
14298        case MULTI_FUNCTION_SI:
14299            SET_FLAGS(flags, MODE_MF_SI);
14300            break;
14301        case MULTI_FUNCTION_AFEX:
14302            SET_FLAGS(flags, MODE_MF_AFEX);
14303            break;
14304        }
14305    } else {
14306        SET_FLAGS(flags, MODE_SF);
14307    }
14308
14309#if defined(__LITTLE_ENDIAN)
14310    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14311#else /* __BIG_ENDIAN */
14312    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14313#endif
14314
14315    INIT_MODE_FLAGS(sc) = flags;
14316}
14317
14318static int
14319bxe_alloc_hsi_mem(struct bxe_softc *sc)
14320{
14321    struct bxe_fastpath *fp;
14322    bus_addr_t busaddr;
14323    int max_agg_queues;
14324    int max_segments;
14325    bus_size_t max_size;
14326    bus_size_t max_seg_size;
14327    char buf[32];
14328    int rc;
14329    int i, j;
14330
14331    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14332
14333    /* allocate the parent bus DMA tag */
14334    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14335                            1,                        /* alignment */
14336                            0,                        /* boundary limit */
14337                            BUS_SPACE_MAXADDR,        /* restricted low */
14338                            BUS_SPACE_MAXADDR,        /* restricted hi */
14339                            NULL,                     /* addr filter() */
14340                            NULL,                     /* addr filter() arg */
14341                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14342                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14343                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14344                            0,                        /* flags */
14345                            NULL,                     /* lock() */
14346                            NULL,                     /* lock() arg */
14347                            &sc->parent_dma_tag);     /* returned dma tag */
14348    if (rc != 0) {
14349        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14350        return (1);
14351    }
14352
14353    /************************/
14354    /* DEFAULT STATUS BLOCK */
14355    /************************/
14356
14357    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14358                      &sc->def_sb_dma, "default status block") != 0) {
14359        /* XXX */
14360        bus_dma_tag_destroy(sc->parent_dma_tag);
14361        return (1);
14362    }
14363
14364    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14365
14366    /***************/
14367    /* EVENT QUEUE */
14368    /***************/
14369
14370    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14371                      &sc->eq_dma, "event queue") != 0) {
14372        /* XXX */
14373        bxe_dma_free(sc, &sc->def_sb_dma);
14374        sc->def_sb = NULL;
14375        bus_dma_tag_destroy(sc->parent_dma_tag);
14376        return (1);
14377    }
14378
14379    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14380
14381    /*************/
14382    /* SLOW PATH */
14383    /*************/
14384
14385    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14386                      &sc->sp_dma, "slow path") != 0) {
14387        /* XXX */
14388        bxe_dma_free(sc, &sc->eq_dma);
14389        sc->eq = NULL;
14390        bxe_dma_free(sc, &sc->def_sb_dma);
14391        sc->def_sb = NULL;
14392        bus_dma_tag_destroy(sc->parent_dma_tag);
14393        return (1);
14394    }
14395
14396    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14397
14398    /*******************/
14399    /* SLOW PATH QUEUE */
14400    /*******************/
14401
14402    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14403                      &sc->spq_dma, "slow path queue") != 0) {
14404        /* XXX */
14405        bxe_dma_free(sc, &sc->sp_dma);
14406        sc->sp = NULL;
14407        bxe_dma_free(sc, &sc->eq_dma);
14408        sc->eq = NULL;
14409        bxe_dma_free(sc, &sc->def_sb_dma);
14410        sc->def_sb = NULL;
14411        bus_dma_tag_destroy(sc->parent_dma_tag);
14412        return (1);
14413    }
14414
14415    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14416
14417    /***************************/
14418    /* FW DECOMPRESSION BUFFER */
14419    /***************************/
14420
14421    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14422                      "fw decompression buffer") != 0) {
14423        /* XXX */
14424        bxe_dma_free(sc, &sc->spq_dma);
14425        sc->spq = NULL;
14426        bxe_dma_free(sc, &sc->sp_dma);
14427        sc->sp = NULL;
14428        bxe_dma_free(sc, &sc->eq_dma);
14429        sc->eq = NULL;
14430        bxe_dma_free(sc, &sc->def_sb_dma);
14431        sc->def_sb = NULL;
14432        bus_dma_tag_destroy(sc->parent_dma_tag);
14433        return (1);
14434    }
14435
14436    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14437
14438    if ((sc->gz_strm =
14439         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14440        /* XXX */
14441        bxe_dma_free(sc, &sc->gz_buf_dma);
14442        sc->gz_buf = NULL;
14443        bxe_dma_free(sc, &sc->spq_dma);
14444        sc->spq = NULL;
14445        bxe_dma_free(sc, &sc->sp_dma);
14446        sc->sp = NULL;
14447        bxe_dma_free(sc, &sc->eq_dma);
14448        sc->eq = NULL;
14449        bxe_dma_free(sc, &sc->def_sb_dma);
14450        sc->def_sb = NULL;
14451        bus_dma_tag_destroy(sc->parent_dma_tag);
14452        return (1);
14453    }
14454
14455    /*************/
14456    /* FASTPATHS */
14457    /*************/
14458
14459    /* allocate DMA memory for each fastpath structure */
14460    for (i = 0; i < sc->num_queues; i++) {
14461        fp = &sc->fp[i];
14462        fp->sc    = sc;
14463        fp->index = i;
14464
14465        /*******************/
14466        /* FP STATUS BLOCK */
14467        /*******************/
14468
14469        snprintf(buf, sizeof(buf), "fp %d status block", i);
14470        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14471                          &fp->sb_dma, buf) != 0) {
14472            /* XXX unwind and free previous fastpath allocations */
14473            BLOGE(sc, "Failed to alloc %s\n", buf);
14474            return (1);
14475        } else {
14476            if (CHIP_IS_E2E3(sc)) {
14477                fp->status_block.e2_sb =
14478                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14479            } else {
14480                fp->status_block.e1x_sb =
14481                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14482            }
14483        }
14484
14485        /******************/
14486        /* FP TX BD CHAIN */
14487        /******************/
14488
14489        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14490        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14491                          &fp->tx_dma, buf) != 0) {
14492            /* XXX unwind and free previous fastpath allocations */
14493            BLOGE(sc, "Failed to alloc %s\n", buf);
14494            return (1);
14495        } else {
14496            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14497        }
14498
14499        /* link together the tx bd chain pages */
14500        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14501            /* index into the tx bd chain array to last entry per page */
14502            struct eth_tx_next_bd *tx_next_bd =
14503                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14504            /* point to the next page and wrap from last page */
14505            busaddr = (fp->tx_dma.paddr +
14506                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14507            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14508            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14509        }
14510
14511        /******************/
14512        /* FP RX BD CHAIN */
14513        /******************/
14514
14515        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14516        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14517                          &fp->rx_dma, buf) != 0) {
14518            /* XXX unwind and free previous fastpath allocations */
14519            BLOGE(sc, "Failed to alloc %s\n", buf);
14520            return (1);
14521        } else {
14522            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14523        }
14524
14525        /* link together the rx bd chain pages */
14526        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14527            /* index into the rx bd chain array to last entry per page */
14528            struct eth_rx_bd *rx_bd =
14529                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14530            /* point to the next page and wrap from last page */
14531            busaddr = (fp->rx_dma.paddr +
14532                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14533            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14534            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14535        }
14536
14537        /*******************/
14538        /* FP RX RCQ CHAIN */
14539        /*******************/
14540
14541        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14542        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14543                          &fp->rcq_dma, buf) != 0) {
14544            /* XXX unwind and free previous fastpath allocations */
14545            BLOGE(sc, "Failed to alloc %s\n", buf);
14546            return (1);
14547        } else {
14548            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14549        }
14550
14551        /* link together the rcq chain pages */
14552        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14553            /* index into the rcq chain array to last entry per page */
14554            struct eth_rx_cqe_next_page *rx_cqe_next =
14555                (struct eth_rx_cqe_next_page *)
14556                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14557            /* point to the next page and wrap from last page */
14558            busaddr = (fp->rcq_dma.paddr +
14559                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14560            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14561            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14562        }
14563
14564        /*******************/
14565        /* FP RX SGE CHAIN */
14566        /*******************/
14567
14568        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14569        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14570                          &fp->rx_sge_dma, buf) != 0) {
14571            /* XXX unwind and free previous fastpath allocations */
14572            BLOGE(sc, "Failed to alloc %s\n", buf);
14573            return (1);
14574        } else {
14575            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14576        }
14577
14578        /* link together the sge chain pages */
14579        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14580            /* index into the rcq chain array to last entry per page */
14581            struct eth_rx_sge *rx_sge =
14582                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14583            /* point to the next page and wrap from last page */
14584            busaddr = (fp->rx_sge_dma.paddr +
14585                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14586            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14587            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14588        }
14589
14590        /***********************/
14591        /* FP TX MBUF DMA MAPS */
14592        /***********************/
14593
14594        /* set required sizes before mapping to conserve resources */
14595        if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) {
14596            max_size     = BXE_TSO_MAX_SIZE;
14597            max_segments = BXE_TSO_MAX_SEGMENTS;
14598            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14599        } else {
14600            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14601            max_segments = BXE_MAX_SEGMENTS;
14602            max_seg_size = MCLBYTES;
14603        }
14604
14605        /* create a dma tag for the tx mbufs */
14606        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14607                                1,                  /* alignment */
14608                                0,                  /* boundary limit */
14609                                BUS_SPACE_MAXADDR,  /* restricted low */
14610                                BUS_SPACE_MAXADDR,  /* restricted hi */
14611                                NULL,               /* addr filter() */
14612                                NULL,               /* addr filter() arg */
14613                                max_size,           /* max map size */
14614                                max_segments,       /* num discontinuous */
14615                                max_seg_size,       /* max seg size */
14616                                0,                  /* flags */
14617                                NULL,               /* lock() */
14618                                NULL,               /* lock() arg */
14619                                &fp->tx_mbuf_tag);  /* returned dma tag */
14620        if (rc != 0) {
14621            /* XXX unwind and free previous fastpath allocations */
14622            BLOGE(sc, "Failed to create dma tag for "
14623                      "'fp %d tx mbufs' (%d)\n", i, rc);
14624            return (1);
14625        }
14626
14627        /* create dma maps for each of the tx mbuf clusters */
14628        for (j = 0; j < TX_BD_TOTAL; j++) {
14629            if (bus_dmamap_create(fp->tx_mbuf_tag,
14630                                  BUS_DMA_NOWAIT,
14631                                  &fp->tx_mbuf_chain[j].m_map)) {
14632                /* XXX unwind and free previous fastpath allocations */
14633                BLOGE(sc, "Failed to create dma map for "
14634                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14635                return (1);
14636            }
14637        }
14638
14639        /***********************/
14640        /* FP RX MBUF DMA MAPS */
14641        /***********************/
14642
14643        /* create a dma tag for the rx mbufs */
14644        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14645                                1,                  /* alignment */
14646                                0,                  /* boundary limit */
14647                                BUS_SPACE_MAXADDR,  /* restricted low */
14648                                BUS_SPACE_MAXADDR,  /* restricted hi */
14649                                NULL,               /* addr filter() */
14650                                NULL,               /* addr filter() arg */
14651                                MJUM9BYTES,         /* max map size */
14652                                1,                  /* num discontinuous */
14653                                MJUM9BYTES,         /* max seg size */
14654                                0,                  /* flags */
14655                                NULL,               /* lock() */
14656                                NULL,               /* lock() arg */
14657                                &fp->rx_mbuf_tag);  /* returned dma tag */
14658        if (rc != 0) {
14659            /* XXX unwind and free previous fastpath allocations */
14660            BLOGE(sc, "Failed to create dma tag for "
14661                      "'fp %d rx mbufs' (%d)\n", i, rc);
14662            return (1);
14663        }
14664
14665        /* create dma maps for each of the rx mbuf clusters */
14666        for (j = 0; j < RX_BD_TOTAL; j++) {
14667            if (bus_dmamap_create(fp->rx_mbuf_tag,
14668                                  BUS_DMA_NOWAIT,
14669                                  &fp->rx_mbuf_chain[j].m_map)) {
14670                /* XXX unwind and free previous fastpath allocations */
14671                BLOGE(sc, "Failed to create dma map for "
14672                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14673                return (1);
14674            }
14675        }
14676
14677        /* create dma map for the spare rx mbuf cluster */
14678        if (bus_dmamap_create(fp->rx_mbuf_tag,
14679                              BUS_DMA_NOWAIT,
14680                              &fp->rx_mbuf_spare_map)) {
14681            /* XXX unwind and free previous fastpath allocations */
14682            BLOGE(sc, "Failed to create dma map for "
14683                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14684            return (1);
14685        }
14686
14687        /***************************/
14688        /* FP RX SGE MBUF DMA MAPS */
14689        /***************************/
14690
14691        /* create a dma tag for the rx sge mbufs */
14692        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14693                                1,                  /* alignment */
14694                                0,                  /* boundary limit */
14695                                BUS_SPACE_MAXADDR,  /* restricted low */
14696                                BUS_SPACE_MAXADDR,  /* restricted hi */
14697                                NULL,               /* addr filter() */
14698                                NULL,               /* addr filter() arg */
14699                                BCM_PAGE_SIZE,      /* max map size */
14700                                1,                  /* num discontinuous */
14701                                BCM_PAGE_SIZE,      /* max seg size */
14702                                0,                  /* flags */
14703                                NULL,               /* lock() */
14704                                NULL,               /* lock() arg */
14705                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
14706        if (rc != 0) {
14707            /* XXX unwind and free previous fastpath allocations */
14708            BLOGE(sc, "Failed to create dma tag for "
14709                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
14710            return (1);
14711        }
14712
14713        /* create dma maps for the rx sge mbuf clusters */
14714        for (j = 0; j < RX_SGE_TOTAL; j++) {
14715            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14716                                  BUS_DMA_NOWAIT,
14717                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
14718                /* XXX unwind and free previous fastpath allocations */
14719                BLOGE(sc, "Failed to create dma map for "
14720                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
14721                return (1);
14722            }
14723        }
14724
14725        /* create dma map for the spare rx sge mbuf cluster */
14726        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
14727                              BUS_DMA_NOWAIT,
14728                              &fp->rx_sge_mbuf_spare_map)) {
14729            /* XXX unwind and free previous fastpath allocations */
14730            BLOGE(sc, "Failed to create dma map for "
14731                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
14732            return (1);
14733        }
14734
14735        /***************************/
14736        /* FP RX TPA MBUF DMA MAPS */
14737        /***************************/
14738
14739        /* create dma maps for the rx tpa mbuf clusters */
14740        max_agg_queues = MAX_AGG_QS(sc);
14741
14742        for (j = 0; j < max_agg_queues; j++) {
14743            if (bus_dmamap_create(fp->rx_mbuf_tag,
14744                                  BUS_DMA_NOWAIT,
14745                                  &fp->rx_tpa_info[j].bd.m_map)) {
14746                /* XXX unwind and free previous fastpath allocations */
14747                BLOGE(sc, "Failed to create dma map for "
14748                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
14749                return (1);
14750            }
14751        }
14752
14753        /* create dma map for the spare rx tpa mbuf cluster */
14754        if (bus_dmamap_create(fp->rx_mbuf_tag,
14755                              BUS_DMA_NOWAIT,
14756                              &fp->rx_tpa_info_mbuf_spare_map)) {
14757            /* XXX unwind and free previous fastpath allocations */
14758            BLOGE(sc, "Failed to create dma map for "
14759                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
14760            return (1);
14761        }
14762
14763        bxe_init_sge_ring_bit_mask(fp);
14764    }
14765
14766    return (0);
14767}
14768
14769static void
14770bxe_free_hsi_mem(struct bxe_softc *sc)
14771{
14772    struct bxe_fastpath *fp;
14773    int max_agg_queues;
14774    int i, j;
14775
14776    if (sc->parent_dma_tag == NULL) {
14777        return; /* assume nothing was allocated */
14778    }
14779
14780    for (i = 0; i < sc->num_queues; i++) {
14781        fp = &sc->fp[i];
14782
14783        /*******************/
14784        /* FP STATUS BLOCK */
14785        /*******************/
14786
14787        bxe_dma_free(sc, &fp->sb_dma);
14788        memset(&fp->status_block, 0, sizeof(fp->status_block));
14789
14790        /******************/
14791        /* FP TX BD CHAIN */
14792        /******************/
14793
14794        bxe_dma_free(sc, &fp->tx_dma);
14795        fp->tx_chain = NULL;
14796
14797        /******************/
14798        /* FP RX BD CHAIN */
14799        /******************/
14800
14801        bxe_dma_free(sc, &fp->rx_dma);
14802        fp->rx_chain = NULL;
14803
14804        /*******************/
14805        /* FP RX RCQ CHAIN */
14806        /*******************/
14807
14808        bxe_dma_free(sc, &fp->rcq_dma);
14809        fp->rcq_chain = NULL;
14810
14811        /*******************/
14812        /* FP RX SGE CHAIN */
14813        /*******************/
14814
14815        bxe_dma_free(sc, &fp->rx_sge_dma);
14816        fp->rx_sge_chain = NULL;
14817
14818        /***********************/
14819        /* FP TX MBUF DMA MAPS */
14820        /***********************/
14821
14822        if (fp->tx_mbuf_tag != NULL) {
14823            for (j = 0; j < TX_BD_TOTAL; j++) {
14824                if (fp->tx_mbuf_chain[j].m_map != NULL) {
14825                    bus_dmamap_unload(fp->tx_mbuf_tag,
14826                                      fp->tx_mbuf_chain[j].m_map);
14827                    bus_dmamap_destroy(fp->tx_mbuf_tag,
14828                                       fp->tx_mbuf_chain[j].m_map);
14829                }
14830            }
14831
14832            bus_dma_tag_destroy(fp->tx_mbuf_tag);
14833            fp->tx_mbuf_tag = NULL;
14834        }
14835
14836        /***********************/
14837        /* FP RX MBUF DMA MAPS */
14838        /***********************/
14839
14840        if (fp->rx_mbuf_tag != NULL) {
14841            for (j = 0; j < RX_BD_TOTAL; j++) {
14842                if (fp->rx_mbuf_chain[j].m_map != NULL) {
14843                    bus_dmamap_unload(fp->rx_mbuf_tag,
14844                                      fp->rx_mbuf_chain[j].m_map);
14845                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14846                                       fp->rx_mbuf_chain[j].m_map);
14847                }
14848            }
14849
14850            if (fp->rx_mbuf_spare_map != NULL) {
14851                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14852                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
14853            }
14854
14855            /***************************/
14856            /* FP RX TPA MBUF DMA MAPS */
14857            /***************************/
14858
14859            max_agg_queues = MAX_AGG_QS(sc);
14860
14861            for (j = 0; j < max_agg_queues; j++) {
14862                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
14863                    bus_dmamap_unload(fp->rx_mbuf_tag,
14864                                      fp->rx_tpa_info[j].bd.m_map);
14865                    bus_dmamap_destroy(fp->rx_mbuf_tag,
14866                                       fp->rx_tpa_info[j].bd.m_map);
14867                }
14868            }
14869
14870            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
14871                bus_dmamap_unload(fp->rx_mbuf_tag,
14872                                  fp->rx_tpa_info_mbuf_spare_map);
14873                bus_dmamap_destroy(fp->rx_mbuf_tag,
14874                                   fp->rx_tpa_info_mbuf_spare_map);
14875            }
14876
14877            bus_dma_tag_destroy(fp->rx_mbuf_tag);
14878            fp->rx_mbuf_tag = NULL;
14879        }
14880
14881        /***************************/
14882        /* FP RX SGE MBUF DMA MAPS */
14883        /***************************/
14884
14885        if (fp->rx_sge_mbuf_tag != NULL) {
14886            for (j = 0; j < RX_SGE_TOTAL; j++) {
14887                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
14888                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14889                                      fp->rx_sge_mbuf_chain[j].m_map);
14890                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14891                                       fp->rx_sge_mbuf_chain[j].m_map);
14892                }
14893            }
14894
14895            if (fp->rx_sge_mbuf_spare_map != NULL) {
14896                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
14897                                  fp->rx_sge_mbuf_spare_map);
14898                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
14899                                   fp->rx_sge_mbuf_spare_map);
14900            }
14901
14902            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
14903            fp->rx_sge_mbuf_tag = NULL;
14904        }
14905    }
14906
14907    /***************************/
14908    /* FW DECOMPRESSION BUFFER */
14909    /***************************/
14910
14911    bxe_dma_free(sc, &sc->gz_buf_dma);
14912    sc->gz_buf = NULL;
14913    free(sc->gz_strm, M_DEVBUF);
14914    sc->gz_strm = NULL;
14915
14916    /*******************/
14917    /* SLOW PATH QUEUE */
14918    /*******************/
14919
14920    bxe_dma_free(sc, &sc->spq_dma);
14921    sc->spq = NULL;
14922
14923    /*************/
14924    /* SLOW PATH */
14925    /*************/
14926
14927    bxe_dma_free(sc, &sc->sp_dma);
14928    sc->sp = NULL;
14929
14930    /***************/
14931    /* EVENT QUEUE */
14932    /***************/
14933
14934    bxe_dma_free(sc, &sc->eq_dma);
14935    sc->eq = NULL;
14936
14937    /************************/
14938    /* DEFAULT STATUS BLOCK */
14939    /************************/
14940
14941    bxe_dma_free(sc, &sc->def_sb_dma);
14942    sc->def_sb = NULL;
14943
14944    bus_dma_tag_destroy(sc->parent_dma_tag);
14945    sc->parent_dma_tag = NULL;
14946}
14947
14948/*
14949 * Previous driver DMAE transaction may have occurred when pre-boot stage
14950 * ended and boot began. This would invalidate the addresses of the
14951 * transaction, resulting in was-error bit set in the PCI causing all
14952 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
14953 * the interrupt which detected this from the pglueb and the was-done bit
14954 */
14955static void
14956bxe_prev_interrupted_dmae(struct bxe_softc *sc)
14957{
14958    uint32_t val;
14959
14960    if (!CHIP_IS_E1x(sc)) {
14961        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
14962        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
14963            BLOGD(sc, DBG_LOAD,
14964                  "Clearing 'was-error' bit that was set in pglueb");
14965            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
14966        }
14967    }
14968}
14969
14970static int
14971bxe_prev_mcp_done(struct bxe_softc *sc)
14972{
14973    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
14974                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
14975    if (!rc) {
14976        BLOGE(sc, "MCP response failure, aborting\n");
14977        return (-1);
14978    }
14979
14980    return (0);
14981}
14982
14983static struct bxe_prev_list_node *
14984bxe_prev_path_get_entry(struct bxe_softc *sc)
14985{
14986    struct bxe_prev_list_node *tmp;
14987
14988    LIST_FOREACH(tmp, &bxe_prev_list, node) {
14989        if ((sc->pcie_bus == tmp->bus) &&
14990            (sc->pcie_device == tmp->slot) &&
14991            (SC_PATH(sc) == tmp->path)) {
14992            return (tmp);
14993        }
14994    }
14995
14996    return (NULL);
14997}
14998
14999static uint8_t
15000bxe_prev_is_path_marked(struct bxe_softc *sc)
15001{
15002    struct bxe_prev_list_node *tmp;
15003    int rc = FALSE;
15004
15005    mtx_lock(&bxe_prev_mtx);
15006
15007    tmp = bxe_prev_path_get_entry(sc);
15008    if (tmp) {
15009        if (tmp->aer) {
15010            BLOGD(sc, DBG_LOAD,
15011                  "Path %d/%d/%d was marked by AER\n",
15012                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15013        } else {
15014            rc = TRUE;
15015            BLOGD(sc, DBG_LOAD,
15016                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15017                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15018        }
15019    }
15020
15021    mtx_unlock(&bxe_prev_mtx);
15022
15023    return (rc);
15024}
15025
15026static int
15027bxe_prev_mark_path(struct bxe_softc *sc,
15028                   uint8_t          after_undi)
15029{
15030    struct bxe_prev_list_node *tmp;
15031
15032    mtx_lock(&bxe_prev_mtx);
15033
15034    /* Check whether the entry for this path already exists */
15035    tmp = bxe_prev_path_get_entry(sc);
15036    if (tmp) {
15037        if (!tmp->aer) {
15038            BLOGD(sc, DBG_LOAD,
15039                  "Re-marking AER in path %d/%d/%d\n",
15040                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15041        } else {
15042            BLOGD(sc, DBG_LOAD,
15043                  "Removing AER indication from path %d/%d/%d\n",
15044                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15045            tmp->aer = 0;
15046        }
15047
15048        mtx_unlock(&bxe_prev_mtx);
15049        return (0);
15050    }
15051
15052    mtx_unlock(&bxe_prev_mtx);
15053
15054    /* Create an entry for this path and add it */
15055    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15056                 (M_NOWAIT | M_ZERO));
15057    if (!tmp) {
15058        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15059        return (-1);
15060    }
15061
15062    tmp->bus  = sc->pcie_bus;
15063    tmp->slot = sc->pcie_device;
15064    tmp->path = SC_PATH(sc);
15065    tmp->aer  = 0;
15066    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15067
15068    mtx_lock(&bxe_prev_mtx);
15069
15070    BLOGD(sc, DBG_LOAD,
15071          "Marked path %d/%d/%d - finished previous unload\n",
15072          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15073    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15074
15075    mtx_unlock(&bxe_prev_mtx);
15076
15077    return (0);
15078}
15079
15080static int
15081bxe_do_flr(struct bxe_softc *sc)
15082{
15083    int i;
15084
15085    /* only E2 and onwards support FLR */
15086    if (CHIP_IS_E1x(sc)) {
15087        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15088        return (-1);
15089    }
15090
15091    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15092    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15093        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15094              sc->devinfo.bc_ver);
15095        return (-1);
15096    }
15097
15098    /* Wait for Transaction Pending bit clean */
15099    for (i = 0; i < 4; i++) {
15100        if (i) {
15101            DELAY(((1 << (i - 1)) * 100) * 1000);
15102        }
15103
15104        if (!bxe_is_pcie_pending(sc)) {
15105            goto clear;
15106        }
15107    }
15108
15109    BLOGE(sc, "PCIE transaction is not cleared, "
15110              "proceeding with reset anyway\n");
15111
15112clear:
15113
15114    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15115    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15116
15117    return (0);
15118}
15119
15120struct bxe_mac_vals {
15121    uint32_t xmac_addr;
15122    uint32_t xmac_val;
15123    uint32_t emac_addr;
15124    uint32_t emac_val;
15125    uint32_t umac_addr;
15126    uint32_t umac_val;
15127    uint32_t bmac_addr;
15128    uint32_t bmac_val[2];
15129};
15130
15131static void
15132bxe_prev_unload_close_mac(struct bxe_softc *sc,
15133                          struct bxe_mac_vals *vals)
15134{
15135    uint32_t val, base_addr, offset, mask, reset_reg;
15136    uint8_t mac_stopped = FALSE;
15137    uint8_t port = SC_PORT(sc);
15138    uint32_t wb_data[2];
15139
15140    /* reset addresses as they also mark which values were changed */
15141    vals->bmac_addr = 0;
15142    vals->umac_addr = 0;
15143    vals->xmac_addr = 0;
15144    vals->emac_addr = 0;
15145
15146    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15147
15148    if (!CHIP_IS_E3(sc)) {
15149        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15150        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15151        if ((mask & reset_reg) && val) {
15152            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15153            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15154                                    : NIG_REG_INGRESS_BMAC0_MEM;
15155            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15156                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15157
15158            /*
15159             * use rd/wr since we cannot use dmae. This is safe
15160             * since MCP won't access the bus due to the request
15161             * to unload, and no function on the path can be
15162             * loaded at this time.
15163             */
15164            wb_data[0] = REG_RD(sc, base_addr + offset);
15165            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15166            vals->bmac_addr = base_addr + offset;
15167            vals->bmac_val[0] = wb_data[0];
15168            vals->bmac_val[1] = wb_data[1];
15169            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15170            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15171            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15172        }
15173
15174        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15175        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15176        vals->emac_val = REG_RD(sc, vals->emac_addr);
15177        REG_WR(sc, vals->emac_addr, 0);
15178        mac_stopped = TRUE;
15179    } else {
15180        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15181            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15182            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15183            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15184            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15185            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15186            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15187            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15188            REG_WR(sc, vals->xmac_addr, 0);
15189            mac_stopped = TRUE;
15190        }
15191
15192        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15193        if (mask & reset_reg) {
15194            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15195            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15196            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15197            vals->umac_val = REG_RD(sc, vals->umac_addr);
15198            REG_WR(sc, vals->umac_addr, 0);
15199            mac_stopped = TRUE;
15200        }
15201    }
15202
15203    if (mac_stopped) {
15204        DELAY(20000);
15205    }
15206}
15207
15208#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15209#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15210#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15211#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15212
15213static void
15214bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15215                         uint8_t          port,
15216                         uint8_t          inc)
15217{
15218    uint16_t rcq, bd;
15219    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15220
15221    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15222    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15223
15224    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15225    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15226
15227    BLOGD(sc, DBG_LOAD,
15228          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15229          port, bd, rcq);
15230}
15231
15232static int
15233bxe_prev_unload_common(struct bxe_softc *sc)
15234{
15235    uint32_t reset_reg, tmp_reg = 0, rc;
15236    uint8_t prev_undi = FALSE;
15237    struct bxe_mac_vals mac_vals;
15238    uint32_t timer_count = 1000;
15239    uint32_t prev_brb;
15240
15241    /*
15242     * It is possible a previous function received 'common' answer,
15243     * but hasn't loaded yet, therefore creating a scenario of
15244     * multiple functions receiving 'common' on the same path.
15245     */
15246    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15247
15248    memset(&mac_vals, 0, sizeof(mac_vals));
15249
15250    if (bxe_prev_is_path_marked(sc)) {
15251        return (bxe_prev_mcp_done(sc));
15252    }
15253
15254    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15255
15256    /* Reset should be performed after BRB is emptied */
15257    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15258        /* Close the MAC Rx to prevent BRB from filling up */
15259        bxe_prev_unload_close_mac(sc, &mac_vals);
15260
15261        /* close LLH filters towards the BRB */
15262        elink_set_rx_filter(&sc->link_params, 0);
15263
15264        /*
15265         * Check if the UNDI driver was previously loaded.
15266         * UNDI driver initializes CID offset for normal bell to 0x7
15267         */
15268        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15269            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15270            if (tmp_reg == 0x7) {
15271                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15272                prev_undi = TRUE;
15273                /* clear the UNDI indication */
15274                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15275                /* clear possible idle check errors */
15276                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15277            }
15278        }
15279
15280        /* wait until BRB is empty */
15281        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15282        while (timer_count) {
15283            prev_brb = tmp_reg;
15284
15285            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15286            if (!tmp_reg) {
15287                break;
15288            }
15289
15290            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15291
15292            /* reset timer as long as BRB actually gets emptied */
15293            if (prev_brb > tmp_reg) {
15294                timer_count = 1000;
15295            } else {
15296                timer_count--;
15297            }
15298
15299            /* If UNDI resides in memory, manually increment it */
15300            if (prev_undi) {
15301                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15302            }
15303
15304            DELAY(10);
15305        }
15306
15307        if (!timer_count) {
15308            BLOGE(sc, "Failed to empty BRB\n");
15309        }
15310    }
15311
15312    /* No packets are in the pipeline, path is ready for reset */
15313    bxe_reset_common(sc);
15314
15315    if (mac_vals.xmac_addr) {
15316        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15317    }
15318    if (mac_vals.umac_addr) {
15319        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15320    }
15321    if (mac_vals.emac_addr) {
15322        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15323    }
15324    if (mac_vals.bmac_addr) {
15325        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15326        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15327    }
15328
15329    rc = bxe_prev_mark_path(sc, prev_undi);
15330    if (rc) {
15331        bxe_prev_mcp_done(sc);
15332        return (rc);
15333    }
15334
15335    return (bxe_prev_mcp_done(sc));
15336}
15337
15338static int
15339bxe_prev_unload_uncommon(struct bxe_softc *sc)
15340{
15341    int rc;
15342
15343    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15344
15345    /* Test if previous unload process was already finished for this path */
15346    if (bxe_prev_is_path_marked(sc)) {
15347        return (bxe_prev_mcp_done(sc));
15348    }
15349
15350    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15351
15352    /*
15353     * If function has FLR capabilities, and existing FW version matches
15354     * the one required, then FLR will be sufficient to clean any residue
15355     * left by previous driver
15356     */
15357    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15358    if (!rc) {
15359        /* fw version is good */
15360        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15361        rc = bxe_do_flr(sc);
15362    }
15363
15364    if (!rc) {
15365        /* FLR was performed */
15366        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15367        return (0);
15368    }
15369
15370    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15371
15372    /* Close the MCP request, return failure*/
15373    rc = bxe_prev_mcp_done(sc);
15374    if (!rc) {
15375        rc = BXE_PREV_WAIT_NEEDED;
15376    }
15377
15378    return (rc);
15379}
15380
15381static int
15382bxe_prev_unload(struct bxe_softc *sc)
15383{
15384    int time_counter = 10;
15385    uint32_t fw, hw_lock_reg, hw_lock_val;
15386    uint32_t rc = 0;
15387
15388    /*
15389     * Clear HW from errors which may have resulted from an interrupted
15390     * DMAE transaction.
15391     */
15392    bxe_prev_interrupted_dmae(sc);
15393
15394    /* Release previously held locks */
15395    hw_lock_reg =
15396        (SC_FUNC(sc) <= 5) ?
15397            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15398            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15399
15400    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15401    if (hw_lock_val) {
15402        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15403            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15404            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15405                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15406        }
15407        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15408        REG_WR(sc, hw_lock_reg, 0xffffffff);
15409    } else {
15410        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15411    }
15412
15413    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15414        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15415        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15416    }
15417
15418    do {
15419        /* Lock MCP using an unload request */
15420        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15421        if (!fw) {
15422            BLOGE(sc, "MCP response failure, aborting\n");
15423            rc = -1;
15424            break;
15425        }
15426
15427        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15428            rc = bxe_prev_unload_common(sc);
15429            break;
15430        }
15431
15432        /* non-common reply from MCP night require looping */
15433        rc = bxe_prev_unload_uncommon(sc);
15434        if (rc != BXE_PREV_WAIT_NEEDED) {
15435            break;
15436        }
15437
15438        DELAY(20000);
15439    } while (--time_counter);
15440
15441    if (!time_counter || rc) {
15442        BLOGE(sc, "Failed to unload previous driver!"
15443            " time_counter %d rc %d\n", time_counter, rc);
15444        rc = -1;
15445    }
15446
15447    return (rc);
15448}
15449
15450void
15451bxe_dcbx_set_state(struct bxe_softc *sc,
15452                   uint8_t          dcb_on,
15453                   uint32_t         dcbx_enabled)
15454{
15455    if (!CHIP_IS_E1x(sc)) {
15456        sc->dcb_state = dcb_on;
15457        sc->dcbx_enabled = dcbx_enabled;
15458    } else {
15459        sc->dcb_state = FALSE;
15460        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15461    }
15462    BLOGD(sc, DBG_LOAD,
15463          "DCB state [%s:%s]\n",
15464          dcb_on ? "ON" : "OFF",
15465          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15466          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15467          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15468          "on-chip with negotiation" : "invalid");
15469}
15470
15471/* must be called after sriov-enable */
15472static int
15473bxe_set_qm_cid_count(struct bxe_softc *sc)
15474{
15475    int cid_count = BXE_L2_MAX_CID(sc);
15476
15477    if (IS_SRIOV(sc)) {
15478        cid_count += BXE_VF_CIDS;
15479    }
15480
15481    if (CNIC_SUPPORT(sc)) {
15482        cid_count += CNIC_CID_MAX;
15483    }
15484
15485    return (roundup(cid_count, QM_CID_ROUND));
15486}
15487
15488static void
15489bxe_init_multi_cos(struct bxe_softc *sc)
15490{
15491    int pri, cos;
15492
15493    uint32_t pri_map = 0; /* XXX change to user config */
15494
15495    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15496        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15497        if (cos < sc->max_cos) {
15498            sc->prio_to_cos[pri] = cos;
15499        } else {
15500            BLOGW(sc, "Invalid COS %d for priority %d "
15501                      "(max COS is %d), setting to 0\n",
15502                  cos, pri, (sc->max_cos - 1));
15503            sc->prio_to_cos[pri] = 0;
15504        }
15505    }
15506}
15507
15508static int
15509bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15510{
15511    struct bxe_softc *sc;
15512    int error, result;
15513
15514    result = 0;
15515    error = sysctl_handle_int(oidp, &result, 0, req);
15516
15517    if (error || !req->newptr) {
15518        return (error);
15519    }
15520
15521    if (result == 1) {
15522        uint32_t  temp;
15523        sc = (struct bxe_softc *)arg1;
15524
15525        BLOGI(sc, "... dumping driver state ...\n");
15526        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15527        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15528    }
15529
15530    return (error);
15531}
15532
15533static int
15534bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15535{
15536    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15537    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15538    uint32_t *offset;
15539    uint64_t value = 0;
15540    int index = (int)arg2;
15541
15542    if (index >= BXE_NUM_ETH_STATS) {
15543        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15544        return (-1);
15545    }
15546
15547    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15548
15549    switch (bxe_eth_stats_arr[index].size) {
15550    case 4:
15551        value = (uint64_t)*offset;
15552        break;
15553    case 8:
15554        value = HILO_U64(*offset, *(offset + 1));
15555        break;
15556    default:
15557        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15558              index, bxe_eth_stats_arr[index].size);
15559        return (-1);
15560    }
15561
15562    return (sysctl_handle_64(oidp, &value, 0, req));
15563}
15564
15565static int
15566bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15567{
15568    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15569    uint32_t *eth_stats;
15570    uint32_t *offset;
15571    uint64_t value = 0;
15572    uint32_t q_stat = (uint32_t)arg2;
15573    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15574    uint32_t index = (q_stat & 0xffff);
15575
15576    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15577
15578    if (index >= BXE_NUM_ETH_Q_STATS) {
15579        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15580        return (-1);
15581    }
15582
15583    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15584
15585    switch (bxe_eth_q_stats_arr[index].size) {
15586    case 4:
15587        value = (uint64_t)*offset;
15588        break;
15589    case 8:
15590        value = HILO_U64(*offset, *(offset + 1));
15591        break;
15592    default:
15593        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15594              index, bxe_eth_q_stats_arr[index].size);
15595        return (-1);
15596    }
15597
15598    return (sysctl_handle_64(oidp, &value, 0, req));
15599}
15600
15601static void
15602bxe_add_sysctls(struct bxe_softc *sc)
15603{
15604    struct sysctl_ctx_list *ctx;
15605    struct sysctl_oid_list *children;
15606    struct sysctl_oid *queue_top, *queue;
15607    struct sysctl_oid_list *queue_top_children, *queue_children;
15608    char queue_num_buf[32];
15609    uint32_t q_stat;
15610    int i, j;
15611
15612    ctx = device_get_sysctl_ctx(sc->dev);
15613    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15614
15615    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
15616                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
15617                      "version");
15618
15619    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
15620             BCM_5710_FW_MAJOR_VERSION,
15621             BCM_5710_FW_MINOR_VERSION,
15622             BCM_5710_FW_REVISION_VERSION,
15623             BCM_5710_FW_ENGINEERING_VERSION);
15624
15625    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
15626        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
15627         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
15628         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
15629         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
15630                                                                "Unknown"));
15631    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
15632                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
15633                    "multifunction vnics per port");
15634
15635    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
15636        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
15637         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
15638         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
15639                                              "???GT/s"),
15640        sc->devinfo.pcie_link_width);
15641
15642    sc->debug = bxe_debug;
15643
15644#if __FreeBSD_version >= 900000
15645    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15646                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
15647                      "bootcode version");
15648    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15649                      CTLFLAG_RD, sc->fw_ver_str, 0,
15650                      "firmware version");
15651    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15652                      CTLFLAG_RD, sc->mf_mode_str, 0,
15653                      "multifunction mode");
15654    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15655                      CTLFLAG_RD, sc->mac_addr_str, 0,
15656                      "mac address");
15657    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15658                      CTLFLAG_RD, sc->pci_link_str, 0,
15659                      "pci link status");
15660    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
15661                    CTLFLAG_RW, &sc->debug,
15662                    "debug logging mode");
15663#else
15664    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
15665                      CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0,
15666                      "bootcode version");
15667    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
15668                      CTLFLAG_RD, &sc->fw_ver_str, 0,
15669                      "firmware version");
15670    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
15671                      CTLFLAG_RD, &sc->mf_mode_str, 0,
15672                      "multifunction mode");
15673    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
15674                      CTLFLAG_RD, &sc->mac_addr_str, 0,
15675                      "mac address");
15676    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
15677                      CTLFLAG_RD, &sc->pci_link_str, 0,
15678                      "pci link status");
15679    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug",
15680                    CTLFLAG_RW, &sc->debug, 0,
15681                    "debug logging mode");
15682#endif /* #if __FreeBSD_version >= 900000 */
15683
15684    sc->trigger_grcdump = 0;
15685    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
15686                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
15687                   "trigger grcdump should be invoked"
15688                   "  before collecting grcdump");
15689
15690    sc->grcdump_started = 0;
15691    sc->grcdump_done = 0;
15692    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
15693                   CTLFLAG_RD, &sc->grcdump_done, 0,
15694                   "set by driver when grcdump is done");
15695
15696    sc->rx_budget = bxe_rx_budget;
15697    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
15698                    CTLFLAG_RW, &sc->rx_budget, 0,
15699                    "rx processing budget");
15700
15701    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
15702                    CTLTYPE_UINT | CTLFLAG_RW, sc, 0,
15703                    bxe_sysctl_state, "IU", "dump driver state");
15704
15705    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
15706        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
15707                        bxe_eth_stats_arr[i].string,
15708                        CTLTYPE_U64 | CTLFLAG_RD, sc, i,
15709                        bxe_sysctl_eth_stat, "LU",
15710                        bxe_eth_stats_arr[i].string);
15711    }
15712
15713    /* add a new parent node for all queues "dev.bxe.#.queue" */
15714    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
15715                                CTLFLAG_RD, NULL, "queue");
15716    queue_top_children = SYSCTL_CHILDREN(queue_top);
15717
15718    for (i = 0; i < sc->num_queues; i++) {
15719        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
15720        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
15721        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
15722                                queue_num_buf, CTLFLAG_RD, NULL,
15723                                "single queue");
15724        queue_children = SYSCTL_CHILDREN(queue);
15725
15726        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
15727            q_stat = ((i << 16) | j);
15728            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
15729                            bxe_eth_q_stats_arr[j].string,
15730                            CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat,
15731                            bxe_sysctl_eth_q_stat, "LU",
15732                            bxe_eth_q_stats_arr[j].string);
15733        }
15734    }
15735}
15736
15737static int
15738bxe_alloc_buf_rings(struct bxe_softc *sc)
15739{
15740#if __FreeBSD_version >= 800000
15741
15742    int i;
15743    struct bxe_fastpath *fp;
15744
15745    for (i = 0; i < sc->num_queues; i++) {
15746
15747        fp = &sc->fp[i];
15748
15749        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
15750                                   M_NOWAIT, &fp->tx_mtx);
15751        if (fp->tx_br == NULL)
15752            return (-1);
15753    }
15754#endif
15755    return (0);
15756}
15757
15758static void
15759bxe_free_buf_rings(struct bxe_softc *sc)
15760{
15761#if __FreeBSD_version >= 800000
15762
15763    int i;
15764    struct bxe_fastpath *fp;
15765
15766    for (i = 0; i < sc->num_queues; i++) {
15767
15768        fp = &sc->fp[i];
15769
15770        if (fp->tx_br) {
15771            buf_ring_free(fp->tx_br, M_DEVBUF);
15772            fp->tx_br = NULL;
15773        }
15774    }
15775
15776#endif
15777}
15778
15779static void
15780bxe_init_fp_mutexs(struct bxe_softc *sc)
15781{
15782    int i;
15783    struct bxe_fastpath *fp;
15784
15785    for (i = 0; i < sc->num_queues; i++) {
15786
15787        fp = &sc->fp[i];
15788
15789        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
15790            "bxe%d_fp%d_tx_lock", sc->unit, i);
15791        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
15792
15793        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
15794            "bxe%d_fp%d_rx_lock", sc->unit, i);
15795        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
15796    }
15797}
15798
15799static void
15800bxe_destroy_fp_mutexs(struct bxe_softc *sc)
15801{
15802    int i;
15803    struct bxe_fastpath *fp;
15804
15805    for (i = 0; i < sc->num_queues; i++) {
15806
15807        fp = &sc->fp[i];
15808
15809        if (mtx_initialized(&fp->tx_mtx)) {
15810            mtx_destroy(&fp->tx_mtx);
15811        }
15812
15813        if (mtx_initialized(&fp->rx_mtx)) {
15814            mtx_destroy(&fp->rx_mtx);
15815        }
15816    }
15817}
15818
15819
15820/*
15821 * Device attach function.
15822 *
15823 * Allocates device resources, performs secondary chip identification, and
15824 * initializes driver instance variables. This function is called from driver
15825 * load after a successful probe.
15826 *
15827 * Returns:
15828 *   0 = Success, >0 = Failure
15829 */
15830static int
15831bxe_attach(device_t dev)
15832{
15833    struct bxe_softc *sc;
15834
15835    sc = device_get_softc(dev);
15836
15837    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
15838
15839    sc->state = BXE_STATE_CLOSED;
15840
15841    sc->dev  = dev;
15842    sc->unit = device_get_unit(dev);
15843
15844    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
15845
15846    sc->pcie_bus    = pci_get_bus(dev);
15847    sc->pcie_device = pci_get_slot(dev);
15848    sc->pcie_func   = pci_get_function(dev);
15849
15850    /* enable bus master capability */
15851    pci_enable_busmaster(dev);
15852
15853    /* get the BARs */
15854    if (bxe_allocate_bars(sc) != 0) {
15855        return (ENXIO);
15856    }
15857
15858    /* initialize the mutexes */
15859    bxe_init_mutexes(sc);
15860
15861    /* prepare the periodic callout */
15862    callout_init(&sc->periodic_callout, 0);
15863
15864    /* prepare the chip taskqueue */
15865    sc->chip_tq_flags = CHIP_TQ_NONE;
15866    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
15867             "bxe%d_chip_tq", sc->unit);
15868    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
15869    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
15870                                   taskqueue_thread_enqueue,
15871                                   &sc->chip_tq);
15872    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
15873                            "%s", sc->chip_tq_name);
15874
15875    /* get device info and set params */
15876    if (bxe_get_device_info(sc) != 0) {
15877        BLOGE(sc, "getting device info\n");
15878        bxe_deallocate_bars(sc);
15879        pci_disable_busmaster(dev);
15880        return (ENXIO);
15881    }
15882
15883    /* get final misc params */
15884    bxe_get_params(sc);
15885
15886    /* set the default MTU (changed via ifconfig) */
15887    sc->mtu = ETHERMTU;
15888
15889    bxe_set_modes_bitmap(sc);
15890
15891    /* XXX
15892     * If in AFEX mode and the function is configured for FCoE
15893     * then bail... no L2 allowed.
15894     */
15895
15896    /* get phy settings from shmem and 'and' against admin settings */
15897    bxe_get_phy_info(sc);
15898
15899    /* initialize the FreeBSD ifnet interface */
15900    if (bxe_init_ifnet(sc) != 0) {
15901        bxe_release_mutexes(sc);
15902        bxe_deallocate_bars(sc);
15903        pci_disable_busmaster(dev);
15904        return (ENXIO);
15905    }
15906
15907    if (bxe_add_cdev(sc) != 0) {
15908        if (sc->ifnet != NULL) {
15909            ether_ifdetach(sc->ifnet);
15910        }
15911        ifmedia_removeall(&sc->ifmedia);
15912        bxe_release_mutexes(sc);
15913        bxe_deallocate_bars(sc);
15914        pci_disable_busmaster(dev);
15915        return (ENXIO);
15916    }
15917
15918    /* allocate device interrupts */
15919    if (bxe_interrupt_alloc(sc) != 0) {
15920        bxe_del_cdev(sc);
15921        if (sc->ifnet != NULL) {
15922            ether_ifdetach(sc->ifnet);
15923        }
15924        ifmedia_removeall(&sc->ifmedia);
15925        bxe_release_mutexes(sc);
15926        bxe_deallocate_bars(sc);
15927        pci_disable_busmaster(dev);
15928        return (ENXIO);
15929    }
15930
15931    bxe_init_fp_mutexs(sc);
15932
15933    if (bxe_alloc_buf_rings(sc) != 0) {
15934	bxe_free_buf_rings(sc);
15935        bxe_interrupt_free(sc);
15936        bxe_del_cdev(sc);
15937        if (sc->ifnet != NULL) {
15938            ether_ifdetach(sc->ifnet);
15939        }
15940        ifmedia_removeall(&sc->ifmedia);
15941        bxe_release_mutexes(sc);
15942        bxe_deallocate_bars(sc);
15943        pci_disable_busmaster(dev);
15944        return (ENXIO);
15945    }
15946
15947    /* allocate ilt */
15948    if (bxe_alloc_ilt_mem(sc) != 0) {
15949	bxe_free_buf_rings(sc);
15950        bxe_interrupt_free(sc);
15951        bxe_del_cdev(sc);
15952        if (sc->ifnet != NULL) {
15953            ether_ifdetach(sc->ifnet);
15954        }
15955        ifmedia_removeall(&sc->ifmedia);
15956        bxe_release_mutexes(sc);
15957        bxe_deallocate_bars(sc);
15958        pci_disable_busmaster(dev);
15959        return (ENXIO);
15960    }
15961
15962    /* allocate the host hardware/software hsi structures */
15963    if (bxe_alloc_hsi_mem(sc) != 0) {
15964        bxe_free_ilt_mem(sc);
15965	bxe_free_buf_rings(sc);
15966        bxe_interrupt_free(sc);
15967        bxe_del_cdev(sc);
15968        if (sc->ifnet != NULL) {
15969            ether_ifdetach(sc->ifnet);
15970        }
15971        ifmedia_removeall(&sc->ifmedia);
15972        bxe_release_mutexes(sc);
15973        bxe_deallocate_bars(sc);
15974        pci_disable_busmaster(dev);
15975        return (ENXIO);
15976    }
15977
15978    /* need to reset chip if UNDI was active */
15979    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
15980        /* init fw_seq */
15981        sc->fw_seq =
15982            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
15983             DRV_MSG_SEQ_NUMBER_MASK);
15984        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
15985        bxe_prev_unload(sc);
15986    }
15987
15988#if 1
15989    /* XXX */
15990    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
15991#else
15992    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
15993        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
15994        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
15995        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
15996        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
15997        bxe_dcbx_init_params(sc);
15998    } else {
15999        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16000    }
16001#endif
16002
16003    /* calculate qm_cid_count */
16004    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16005    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16006
16007    sc->max_cos = 1;
16008    bxe_init_multi_cos(sc);
16009
16010    bxe_add_sysctls(sc);
16011
16012    return (0);
16013}
16014
16015/*
16016 * Device detach function.
16017 *
16018 * Stops the controller, resets the controller, and releases resources.
16019 *
16020 * Returns:
16021 *   0 = Success, >0 = Failure
16022 */
16023static int
16024bxe_detach(device_t dev)
16025{
16026    struct bxe_softc *sc;
16027    struct ifnet *ifp;
16028
16029    sc = device_get_softc(dev);
16030
16031    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16032
16033    ifp = sc->ifnet;
16034    if (ifp != NULL && ifp->if_vlantrunk != NULL) {
16035        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16036        return(EBUSY);
16037    }
16038
16039    bxe_del_cdev(sc);
16040
16041    /* stop the periodic callout */
16042    bxe_periodic_stop(sc);
16043
16044    /* stop the chip taskqueue */
16045    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16046    if (sc->chip_tq) {
16047        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16048        taskqueue_free(sc->chip_tq);
16049        sc->chip_tq = NULL;
16050    }
16051
16052    /* stop and reset the controller if it was open */
16053    if (sc->state != BXE_STATE_CLOSED) {
16054        BXE_CORE_LOCK(sc);
16055        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16056        sc->state = BXE_STATE_DISABLED;
16057        BXE_CORE_UNLOCK(sc);
16058    }
16059
16060    /* release the network interface */
16061    if (ifp != NULL) {
16062        ether_ifdetach(ifp);
16063    }
16064    ifmedia_removeall(&sc->ifmedia);
16065
16066    /* XXX do the following based on driver state... */
16067
16068    /* free the host hardware/software hsi structures */
16069    bxe_free_hsi_mem(sc);
16070
16071    /* free ilt */
16072    bxe_free_ilt_mem(sc);
16073
16074    bxe_free_buf_rings(sc);
16075
16076    /* release the interrupts */
16077    bxe_interrupt_free(sc);
16078
16079    /* Release the mutexes*/
16080    bxe_destroy_fp_mutexs(sc);
16081    bxe_release_mutexes(sc);
16082
16083
16084    /* Release the PCIe BAR mapped memory */
16085    bxe_deallocate_bars(sc);
16086
16087    /* Release the FreeBSD interface. */
16088    if (sc->ifnet != NULL) {
16089        if_free(sc->ifnet);
16090    }
16091
16092    pci_disable_busmaster(dev);
16093
16094    return (0);
16095}
16096
16097/*
16098 * Device shutdown function.
16099 *
16100 * Stops and resets the controller.
16101 *
16102 * Returns:
16103 *   Nothing
16104 */
16105static int
16106bxe_shutdown(device_t dev)
16107{
16108    struct bxe_softc *sc;
16109
16110    sc = device_get_softc(dev);
16111
16112    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16113
16114    /* stop the periodic callout */
16115    bxe_periodic_stop(sc);
16116
16117    BXE_CORE_LOCK(sc);
16118    bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16119    BXE_CORE_UNLOCK(sc);
16120
16121    return (0);
16122}
16123
16124void
16125bxe_igu_ack_sb(struct bxe_softc *sc,
16126               uint8_t          igu_sb_id,
16127               uint8_t          segment,
16128               uint16_t         index,
16129               uint8_t          op,
16130               uint8_t          update)
16131{
16132    uint32_t igu_addr = sc->igu_base_addr;
16133    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16134    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16135}
16136
16137static void
16138bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16139                     uint8_t          func,
16140                     uint8_t          idu_sb_id,
16141                     uint8_t          is_pf)
16142{
16143    uint32_t data, ctl, cnt = 100;
16144    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16145    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16146    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16147    uint32_t sb_bit =  1 << (idu_sb_id%32);
16148    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16149    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16150
16151    /* Not supported in BC mode */
16152    if (CHIP_INT_MODE_IS_BC(sc)) {
16153        return;
16154    }
16155
16156    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16157             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16158            IGU_REGULAR_CLEANUP_SET |
16159            IGU_REGULAR_BCLEANUP);
16160
16161    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16162           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16163           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16164
16165    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16166            data, igu_addr_data);
16167    REG_WR(sc, igu_addr_data, data);
16168
16169    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16170                      BUS_SPACE_BARRIER_WRITE);
16171    mb();
16172
16173    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16174            ctl, igu_addr_ctl);
16175    REG_WR(sc, igu_addr_ctl, ctl);
16176
16177    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16178                      BUS_SPACE_BARRIER_WRITE);
16179    mb();
16180
16181    /* wait for clean up to finish */
16182    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16183        DELAY(20000);
16184    }
16185
16186    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16187        BLOGD(sc, DBG_LOAD,
16188              "Unable to finish IGU cleanup: "
16189              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16190              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16191    }
16192}
16193
16194static void
16195bxe_igu_clear_sb(struct bxe_softc *sc,
16196                 uint8_t          idu_sb_id)
16197{
16198    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16199}
16200
16201
16202
16203
16204
16205
16206
16207/*******************/
16208/* ECORE CALLBACKS */
16209/*******************/
16210
16211static void
16212bxe_reset_common(struct bxe_softc *sc)
16213{
16214    uint32_t val = 0x1400;
16215
16216    /* reset_common */
16217    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16218
16219    if (CHIP_IS_E3(sc)) {
16220        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16221        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16222    }
16223
16224    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16225}
16226
16227static void
16228bxe_common_init_phy(struct bxe_softc *sc)
16229{
16230    uint32_t shmem_base[2];
16231    uint32_t shmem2_base[2];
16232
16233    /* Avoid common init in case MFW supports LFA */
16234    if (SHMEM2_RD(sc, size) >
16235        (uint32_t)offsetof(struct shmem2_region,
16236                           lfa_host_addr[SC_PORT(sc)])) {
16237        return;
16238    }
16239
16240    shmem_base[0]  = sc->devinfo.shmem_base;
16241    shmem2_base[0] = sc->devinfo.shmem2_base;
16242
16243    if (!CHIP_IS_E1x(sc)) {
16244        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16245        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16246    }
16247
16248    bxe_acquire_phy_lock(sc);
16249    elink_common_init_phy(sc, shmem_base, shmem2_base,
16250                          sc->devinfo.chip_id, 0);
16251    bxe_release_phy_lock(sc);
16252}
16253
16254static void
16255bxe_pf_disable(struct bxe_softc *sc)
16256{
16257    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16258
16259    val &= ~IGU_PF_CONF_FUNC_EN;
16260
16261    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16262    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16263    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16264}
16265
16266static void
16267bxe_init_pxp(struct bxe_softc *sc)
16268{
16269    uint16_t devctl;
16270    int r_order, w_order;
16271
16272    devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2);
16273
16274    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16275
16276    w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5);
16277
16278    if (sc->mrrs == -1) {
16279        r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12);
16280    } else {
16281        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16282        r_order = sc->mrrs;
16283    }
16284
16285    ecore_init_pxp_arb(sc, r_order, w_order);
16286}
16287
16288static uint32_t
16289bxe_get_pretend_reg(struct bxe_softc *sc)
16290{
16291    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16292    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16293    return (base + (SC_ABS_FUNC(sc)) * stride);
16294}
16295
16296/*
16297 * Called only on E1H or E2.
16298 * When pretending to be PF, the pretend value is the function number 0..7.
16299 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16300 * combination.
16301 */
16302static int
16303bxe_pretend_func(struct bxe_softc *sc,
16304                 uint16_t         pretend_func_val)
16305{
16306    uint32_t pretend_reg;
16307
16308    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16309        return (-1);
16310    }
16311
16312    /* get my own pretend register */
16313    pretend_reg = bxe_get_pretend_reg(sc);
16314    REG_WR(sc, pretend_reg, pretend_func_val);
16315    REG_RD(sc, pretend_reg);
16316    return (0);
16317}
16318
16319static void
16320bxe_iov_init_dmae(struct bxe_softc *sc)
16321{
16322    return;
16323}
16324
16325static void
16326bxe_iov_init_dq(struct bxe_softc *sc)
16327{
16328    return;
16329}
16330
16331/* send a NIG loopback debug packet */
16332static void
16333bxe_lb_pckt(struct bxe_softc *sc)
16334{
16335    uint32_t wb_write[3];
16336
16337    /* Ethernet source and destination addresses */
16338    wb_write[0] = 0x55555555;
16339    wb_write[1] = 0x55555555;
16340    wb_write[2] = 0x20;     /* SOP */
16341    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16342
16343    /* NON-IP protocol */
16344    wb_write[0] = 0x09000000;
16345    wb_write[1] = 0x55555555;
16346    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16347    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16348}
16349
16350/*
16351 * Some of the internal memories are not directly readable from the driver.
16352 * To test them we send debug packets.
16353 */
16354static int
16355bxe_int_mem_test(struct bxe_softc *sc)
16356{
16357    int factor;
16358    int count, i;
16359    uint32_t val = 0;
16360
16361    if (CHIP_REV_IS_FPGA(sc)) {
16362        factor = 120;
16363    } else if (CHIP_REV_IS_EMUL(sc)) {
16364        factor = 200;
16365    } else {
16366        factor = 1;
16367    }
16368
16369    /* disable inputs of parser neighbor blocks */
16370    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16371    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16372    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16373    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16374
16375    /*  write 0 to parser credits for CFC search request */
16376    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16377
16378    /* send Ethernet packet */
16379    bxe_lb_pckt(sc);
16380
16381    /* TODO do i reset NIG statistic? */
16382    /* Wait until NIG register shows 1 packet of size 0x10 */
16383    count = 1000 * factor;
16384    while (count) {
16385        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16386        val = *BXE_SP(sc, wb_data[0]);
16387        if (val == 0x10) {
16388            break;
16389        }
16390
16391        DELAY(10000);
16392        count--;
16393    }
16394
16395    if (val != 0x10) {
16396        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16397        return (-1);
16398    }
16399
16400    /* wait until PRS register shows 1 packet */
16401    count = (1000 * factor);
16402    while (count) {
16403        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16404        if (val == 1) {
16405            break;
16406        }
16407
16408        DELAY(10000);
16409        count--;
16410    }
16411
16412    if (val != 0x1) {
16413        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16414        return (-2);
16415    }
16416
16417    /* Reset and init BRB, PRS */
16418    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16419    DELAY(50000);
16420    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16421    DELAY(50000);
16422    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16423    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16424
16425    /* Disable inputs of parser neighbor blocks */
16426    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16427    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16428    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16429    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16430
16431    /* Write 0 to parser credits for CFC search request */
16432    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16433
16434    /* send 10 Ethernet packets */
16435    for (i = 0; i < 10; i++) {
16436        bxe_lb_pckt(sc);
16437    }
16438
16439    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16440    count = (1000 * factor);
16441    while (count) {
16442        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16443        val = *BXE_SP(sc, wb_data[0]);
16444        if (val == 0xb0) {
16445            break;
16446        }
16447
16448        DELAY(10000);
16449        count--;
16450    }
16451
16452    if (val != 0xb0) {
16453        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16454        return (-3);
16455    }
16456
16457    /* Wait until PRS register shows 2 packets */
16458    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16459    if (val != 2) {
16460        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16461    }
16462
16463    /* Write 1 to parser credits for CFC search request */
16464    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16465
16466    /* Wait until PRS register shows 3 packets */
16467    DELAY(10000 * factor);
16468
16469    /* Wait until NIG register shows 1 packet of size 0x10 */
16470    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16471    if (val != 3) {
16472        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16473    }
16474
16475    /* clear NIG EOP FIFO */
16476    for (i = 0; i < 11; i++) {
16477        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16478    }
16479
16480    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16481    if (val != 1) {
16482        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16483        return (-4);
16484    }
16485
16486    /* Reset and init BRB, PRS, NIG */
16487    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16488    DELAY(50000);
16489    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16490    DELAY(50000);
16491    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16492    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16493    if (!CNIC_SUPPORT(sc)) {
16494        /* set NIC mode */
16495        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16496    }
16497
16498    /* Enable inputs of parser neighbor blocks */
16499    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16500    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16501    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16502    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16503
16504    return (0);
16505}
16506
16507static void
16508bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16509{
16510    int is_required;
16511    uint32_t val;
16512    int port;
16513
16514    is_required = 0;
16515    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16516           SHARED_HW_CFG_FAN_FAILURE_MASK);
16517
16518    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16519        is_required = 1;
16520    }
16521    /*
16522     * The fan failure mechanism is usually related to the PHY type since
16523     * the power consumption of the board is affected by the PHY. Currently,
16524     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16525     */
16526    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16527        for (port = PORT_0; port < PORT_MAX; port++) {
16528            is_required |= elink_fan_failure_det_req(sc,
16529                                                     sc->devinfo.shmem_base,
16530                                                     sc->devinfo.shmem2_base,
16531                                                     port);
16532        }
16533    }
16534
16535    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16536
16537    if (is_required == 0) {
16538        return;
16539    }
16540
16541    /* Fan failure is indicated by SPIO 5 */
16542    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16543
16544    /* set to active low mode */
16545    val = REG_RD(sc, MISC_REG_SPIO_INT);
16546    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16547    REG_WR(sc, MISC_REG_SPIO_INT, val);
16548
16549    /* enable interrupt to signal the IGU */
16550    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16551    val |= MISC_SPIO_SPIO5;
16552    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16553}
16554
16555static void
16556bxe_enable_blocks_attention(struct bxe_softc *sc)
16557{
16558    uint32_t val;
16559
16560    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16561    if (!CHIP_IS_E1x(sc)) {
16562        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16563    } else {
16564        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16565    }
16566    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16567    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16568    /*
16569     * mask read length error interrupts in brb for parser
16570     * (parsing unit and 'checksum and crc' unit)
16571     * these errors are legal (PU reads fixed length and CAC can cause
16572     * read length error on truncated packets)
16573     */
16574    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16575    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16576    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16577    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16578    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16579    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16580/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16581/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16582    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16583    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16584    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16585/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16586/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16587    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16588    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16589    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16590    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16591/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16592/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16593
16594    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16595           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16596           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16597    if (!CHIP_IS_E1x(sc)) {
16598        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16599                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16600    }
16601    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16602
16603    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16604    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16605    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16606/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16607
16608    if (!CHIP_IS_E1x(sc)) {
16609        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16610        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16611    }
16612
16613    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16614    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16615/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16616    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16617}
16618
16619/**
16620 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16621 *
16622 * @sc:     driver handle
16623 */
16624static int
16625bxe_init_hw_common(struct bxe_softc *sc)
16626{
16627    uint8_t abs_func_id;
16628    uint32_t val;
16629
16630    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
16631          SC_ABS_FUNC(sc));
16632
16633    /*
16634     * take the RESET lock to protect undi_unload flow from accessing
16635     * registers while we are resetting the chip
16636     */
16637    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16638
16639    bxe_reset_common(sc);
16640
16641    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
16642
16643    val = 0xfffc;
16644    if (CHIP_IS_E3(sc)) {
16645        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16646        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16647    }
16648
16649    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
16650
16651    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
16652
16653    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
16654    BLOGD(sc, DBG_LOAD, "after misc block init\n");
16655
16656    if (!CHIP_IS_E1x(sc)) {
16657        /*
16658         * 4-port mode or 2-port mode we need to turn off master-enable for
16659         * everyone. After that we turn it back on for self. So, we disregard
16660         * multi-function, and always disable all functions on the given path,
16661         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
16662         */
16663        for (abs_func_id = SC_PATH(sc);
16664             abs_func_id < (E2_FUNC_MAX * 2);
16665             abs_func_id += 2) {
16666            if (abs_func_id == SC_ABS_FUNC(sc)) {
16667                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
16668                continue;
16669            }
16670
16671            bxe_pretend_func(sc, abs_func_id);
16672
16673            /* clear pf enable */
16674            bxe_pf_disable(sc);
16675
16676            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16677        }
16678    }
16679
16680    BLOGD(sc, DBG_LOAD, "after pf disable\n");
16681
16682    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
16683
16684    if (CHIP_IS_E1(sc)) {
16685        /*
16686         * enable HW interrupt from PXP on USDM overflow
16687         * bit 16 on INT_MASK_0
16688         */
16689        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16690    }
16691
16692    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
16693    bxe_init_pxp(sc);
16694
16695#ifdef __BIG_ENDIAN
16696    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
16697    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
16698    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
16699    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
16700    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
16701    /* make sure this value is 0 */
16702    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
16703
16704    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
16705    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
16706    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
16707    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
16708    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
16709#endif
16710
16711    ecore_ilt_init_page_size(sc, INITOP_SET);
16712
16713    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
16714        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
16715    }
16716
16717    /* let the HW do it's magic... */
16718    DELAY(100000);
16719
16720    /* finish PXP init */
16721    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
16722    if (val != 1) {
16723        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
16724            val);
16725        return (-1);
16726    }
16727    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
16728    if (val != 1) {
16729        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
16730        return (-1);
16731    }
16732
16733    BLOGD(sc, DBG_LOAD, "after pxp init\n");
16734
16735    /*
16736     * Timer bug workaround for E2 only. We need to set the entire ILT to have
16737     * entries with value "0" and valid bit on. This needs to be done by the
16738     * first PF that is loaded in a path (i.e. common phase)
16739     */
16740    if (!CHIP_IS_E1x(sc)) {
16741/*
16742 * In E2 there is a bug in the timers block that can cause function 6 / 7
16743 * (i.e. vnic3) to start even if it is marked as "scan-off".
16744 * This occurs when a different function (func2,3) is being marked
16745 * as "scan-off". Real-life scenario for example: if a driver is being
16746 * load-unloaded while func6,7 are down. This will cause the timer to access
16747 * the ilt, translate to a logical address and send a request to read/write.
16748 * Since the ilt for the function that is down is not valid, this will cause
16749 * a translation error which is unrecoverable.
16750 * The Workaround is intended to make sure that when this happens nothing
16751 * fatal will occur. The workaround:
16752 *  1.  First PF driver which loads on a path will:
16753 *      a.  After taking the chip out of reset, by using pretend,
16754 *          it will write "0" to the following registers of
16755 *          the other vnics.
16756 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16757 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
16758 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
16759 *          And for itself it will write '1' to
16760 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
16761 *          dmae-operations (writing to pram for example.)
16762 *          note: can be done for only function 6,7 but cleaner this
16763 *            way.
16764 *      b.  Write zero+valid to the entire ILT.
16765 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
16766 *          VNIC3 (of that port). The range allocated will be the
16767 *          entire ILT. This is needed to prevent  ILT range error.
16768 *  2.  Any PF driver load flow:
16769 *      a.  ILT update with the physical addresses of the allocated
16770 *          logical pages.
16771 *      b.  Wait 20msec. - note that this timeout is needed to make
16772 *          sure there are no requests in one of the PXP internal
16773 *          queues with "old" ILT addresses.
16774 *      c.  PF enable in the PGLC.
16775 *      d.  Clear the was_error of the PF in the PGLC. (could have
16776 *          occurred while driver was down)
16777 *      e.  PF enable in the CFC (WEAK + STRONG)
16778 *      f.  Timers scan enable
16779 *  3.  PF driver unload flow:
16780 *      a.  Clear the Timers scan_en.
16781 *      b.  Polling for scan_on=0 for that PF.
16782 *      c.  Clear the PF enable bit in the PXP.
16783 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
16784 *      e.  Write zero+valid to all ILT entries (The valid bit must
16785 *          stay set)
16786 *      f.  If this is VNIC 3 of a port then also init
16787 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
16788 *          to the last enrty in the ILT.
16789 *
16790 *      Notes:
16791 *      Currently the PF error in the PGLC is non recoverable.
16792 *      In the future the there will be a recovery routine for this error.
16793 *      Currently attention is masked.
16794 *      Having an MCP lock on the load/unload process does not guarantee that
16795 *      there is no Timer disable during Func6/7 enable. This is because the
16796 *      Timers scan is currently being cleared by the MCP on FLR.
16797 *      Step 2.d can be done only for PF6/7 and the driver can also check if
16798 *      there is error before clearing it. But the flow above is simpler and
16799 *      more general.
16800 *      All ILT entries are written by zero+valid and not just PF6/7
16801 *      ILT entries since in the future the ILT entries allocation for
16802 *      PF-s might be dynamic.
16803 */
16804        struct ilt_client_info ilt_cli;
16805        struct ecore_ilt ilt;
16806
16807        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
16808        memset(&ilt, 0, sizeof(struct ecore_ilt));
16809
16810        /* initialize dummy TM client */
16811        ilt_cli.start      = 0;
16812        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
16813        ilt_cli.client_num = ILT_CLIENT_TM;
16814
16815        /*
16816         * Step 1: set zeroes to all ilt page entries with valid bit on
16817         * Step 2: set the timers first/last ilt entry to point
16818         * to the entire range to prevent ILT range error for 3rd/4th
16819         * vnic (this code assumes existence of the vnic)
16820         *
16821         * both steps performed by call to ecore_ilt_client_init_op()
16822         * with dummy TM client
16823         *
16824         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
16825         * and his brother are split registers
16826         */
16827
16828        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
16829        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
16830        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
16831
16832        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
16833        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
16834        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
16835    }
16836
16837    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
16838    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
16839
16840    if (!CHIP_IS_E1x(sc)) {
16841        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
16842                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
16843
16844        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
16845        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
16846
16847        /* let the HW do it's magic... */
16848        do {
16849            DELAY(200000);
16850            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
16851        } while (factor-- && (val != 1));
16852
16853        if (val != 1) {
16854            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
16855            return (-1);
16856        }
16857    }
16858
16859    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
16860
16861    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
16862
16863    bxe_iov_init_dmae(sc);
16864
16865    /* clean the DMAE memory */
16866    sc->dmae_ready = 1;
16867    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
16868
16869    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
16870
16871    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
16872
16873    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
16874
16875    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
16876
16877    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
16878    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
16879    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
16880    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
16881
16882    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
16883
16884    /* QM queues pointers table */
16885    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
16886
16887    /* soft reset pulse */
16888    REG_WR(sc, QM_REG_SOFT_RESET, 1);
16889    REG_WR(sc, QM_REG_SOFT_RESET, 0);
16890
16891    if (CNIC_SUPPORT(sc))
16892        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
16893
16894    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
16895    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
16896    if (!CHIP_REV_IS_SLOW(sc)) {
16897        /* enable hw interrupt from doorbell Q */
16898        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16899    }
16900
16901    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16902
16903    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16904    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
16905
16906    if (!CHIP_IS_E1(sc)) {
16907        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
16908    }
16909
16910    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
16911        if (IS_MF_AFEX(sc)) {
16912            /*
16913             * configure that AFEX and VLAN headers must be
16914             * received in AFEX mode
16915             */
16916            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
16917            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
16918            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
16919            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
16920            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
16921        } else {
16922            /*
16923             * Bit-map indicating which L2 hdrs may appear
16924             * after the basic Ethernet header
16925             */
16926            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
16927                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16928        }
16929    }
16930
16931    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
16932    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
16933    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
16934    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
16935
16936    if (!CHIP_IS_E1x(sc)) {
16937        /* reset VFC memories */
16938        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16939               VFC_MEMORIES_RST_REG_CAM_RST |
16940               VFC_MEMORIES_RST_REG_RAM_RST);
16941        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
16942               VFC_MEMORIES_RST_REG_CAM_RST |
16943               VFC_MEMORIES_RST_REG_RAM_RST);
16944
16945        DELAY(20000);
16946    }
16947
16948    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
16949    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
16950    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
16951    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
16952
16953    /* sync semi rtc */
16954    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
16955           0x80000000);
16956    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
16957           0x80000000);
16958
16959    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
16960    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
16961    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
16962
16963    if (!CHIP_IS_E1x(sc)) {
16964        if (IS_MF_AFEX(sc)) {
16965            /*
16966             * configure that AFEX and VLAN headers must be
16967             * sent in AFEX mode
16968             */
16969            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
16970            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
16971            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
16972            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
16973            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
16974        } else {
16975            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
16976                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
16977        }
16978    }
16979
16980    REG_WR(sc, SRC_REG_SOFT_RST, 1);
16981
16982    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
16983
16984    if (CNIC_SUPPORT(sc)) {
16985        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
16986        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
16987        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
16988        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
16989        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
16990        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
16991        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
16992        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
16993        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
16994        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
16995    }
16996    REG_WR(sc, SRC_REG_SOFT_RST, 0);
16997
16998    if (sizeof(union cdu_context) != 1024) {
16999        /* we currently assume that a context is 1024 bytes */
17000        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17001              (long)sizeof(union cdu_context));
17002    }
17003
17004    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17005    val = (4 << 24) + (0 << 12) + 1024;
17006    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17007
17008    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17009
17010    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17011    /* enable context validation interrupt from CFC */
17012    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17013
17014    /* set the thresholds to prevent CFC/CDU race */
17015    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17016    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17017
17018    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17019        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17020    }
17021
17022    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17023    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17024
17025    /* Reset PCIE errors for debug */
17026    REG_WR(sc, 0x2814, 0xffffffff);
17027    REG_WR(sc, 0x3820, 0xffffffff);
17028
17029    if (!CHIP_IS_E1x(sc)) {
17030        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17031               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17032                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17033        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17034               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17035                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17036                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17037        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17038               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17039                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17040                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17041    }
17042
17043    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17044
17045    if (!CHIP_IS_E1(sc)) {
17046        /* in E3 this done in per-port section */
17047        if (!CHIP_IS_E3(sc))
17048            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17049    }
17050
17051    if (CHIP_IS_E1H(sc)) {
17052        /* not applicable for E2 (and above ...) */
17053        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17054    }
17055
17056    if (CHIP_REV_IS_SLOW(sc)) {
17057        DELAY(200000);
17058    }
17059
17060    /* finish CFC init */
17061    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17062    if (val != 1) {
17063        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17064        return (-1);
17065    }
17066    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17067    if (val != 1) {
17068        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17069        return (-1);
17070    }
17071    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17072    if (val != 1) {
17073        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17074        return (-1);
17075    }
17076    REG_WR(sc, CFC_REG_DEBUG0, 0);
17077
17078    if (CHIP_IS_E1(sc)) {
17079        /* read NIG statistic to see if this is our first up since powerup */
17080        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17081        val = *BXE_SP(sc, wb_data[0]);
17082
17083        /* do internal memory self test */
17084        if ((val == 0) && bxe_int_mem_test(sc)) {
17085            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17086            return (-1);
17087        }
17088    }
17089
17090    bxe_setup_fan_failure_detection(sc);
17091
17092    /* clear PXP2 attentions */
17093    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17094
17095    bxe_enable_blocks_attention(sc);
17096
17097    if (!CHIP_REV_IS_SLOW(sc)) {
17098        ecore_enable_blocks_parity(sc);
17099    }
17100
17101    if (!BXE_NOMCP(sc)) {
17102        if (CHIP_IS_E1x(sc)) {
17103            bxe_common_init_phy(sc);
17104        }
17105    }
17106
17107    return (0);
17108}
17109
17110/**
17111 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17112 *
17113 * @sc:     driver handle
17114 */
17115static int
17116bxe_init_hw_common_chip(struct bxe_softc *sc)
17117{
17118    int rc = bxe_init_hw_common(sc);
17119
17120    if (rc) {
17121        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17122        return (rc);
17123    }
17124
17125    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17126    if (!BXE_NOMCP(sc)) {
17127        bxe_common_init_phy(sc);
17128    }
17129
17130    return (0);
17131}
17132
17133static int
17134bxe_init_hw_port(struct bxe_softc *sc)
17135{
17136    int port = SC_PORT(sc);
17137    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17138    uint32_t low, high;
17139    uint32_t val;
17140
17141    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17142
17143    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17144
17145    ecore_init_block(sc, BLOCK_MISC, init_phase);
17146    ecore_init_block(sc, BLOCK_PXP, init_phase);
17147    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17148
17149    /*
17150     * Timers bug workaround: disables the pf_master bit in pglue at
17151     * common phase, we need to enable it here before any dmae access are
17152     * attempted. Therefore we manually added the enable-master to the
17153     * port phase (it also happens in the function phase)
17154     */
17155    if (!CHIP_IS_E1x(sc)) {
17156        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17157    }
17158
17159    ecore_init_block(sc, BLOCK_ATC, init_phase);
17160    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17161    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17162    ecore_init_block(sc, BLOCK_QM, init_phase);
17163
17164    ecore_init_block(sc, BLOCK_TCM, init_phase);
17165    ecore_init_block(sc, BLOCK_UCM, init_phase);
17166    ecore_init_block(sc, BLOCK_CCM, init_phase);
17167    ecore_init_block(sc, BLOCK_XCM, init_phase);
17168
17169    /* QM cid (connection) count */
17170    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17171
17172    if (CNIC_SUPPORT(sc)) {
17173        ecore_init_block(sc, BLOCK_TM, init_phase);
17174        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17175        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17176    }
17177
17178    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17179
17180    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17181
17182    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17183        if (IS_MF(sc)) {
17184            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17185        } else if (sc->mtu > 4096) {
17186            if (BXE_ONE_PORT(sc)) {
17187                low = 160;
17188            } else {
17189                val = sc->mtu;
17190                /* (24*1024 + val*4)/256 */
17191                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17192            }
17193        } else {
17194            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17195        }
17196        high = (low + 56); /* 14*1024/256 */
17197        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17198        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17199    }
17200
17201    if (CHIP_IS_MODE_4_PORT(sc)) {
17202        REG_WR(sc, SC_PORT(sc) ?
17203               BRB1_REG_MAC_GUARANTIED_1 :
17204               BRB1_REG_MAC_GUARANTIED_0, 40);
17205    }
17206
17207    ecore_init_block(sc, BLOCK_PRS, init_phase);
17208    if (CHIP_IS_E3B0(sc)) {
17209        if (IS_MF_AFEX(sc)) {
17210            /* configure headers for AFEX mode */
17211            REG_WR(sc, SC_PORT(sc) ?
17212                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17213                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17214            REG_WR(sc, SC_PORT(sc) ?
17215                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17216                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17217            REG_WR(sc, SC_PORT(sc) ?
17218                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17219                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17220        } else {
17221            /* Ovlan exists only if we are in multi-function +
17222             * switch-dependent mode, in switch-independent there
17223             * is no ovlan headers
17224             */
17225            REG_WR(sc, SC_PORT(sc) ?
17226                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17227                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17228                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17229        }
17230    }
17231
17232    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17233    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17234    ecore_init_block(sc, BLOCK_USDM, init_phase);
17235    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17236
17237    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17238    ecore_init_block(sc, BLOCK_USEM, init_phase);
17239    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17240    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17241
17242    ecore_init_block(sc, BLOCK_UPB, init_phase);
17243    ecore_init_block(sc, BLOCK_XPB, init_phase);
17244
17245    ecore_init_block(sc, BLOCK_PBF, init_phase);
17246
17247    if (CHIP_IS_E1x(sc)) {
17248        /* configure PBF to work without PAUSE mtu 9000 */
17249        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17250
17251        /* update threshold */
17252        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17253        /* update init credit */
17254        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17255
17256        /* probe changes */
17257        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17258        DELAY(50);
17259        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17260    }
17261
17262    if (CNIC_SUPPORT(sc)) {
17263        ecore_init_block(sc, BLOCK_SRC, init_phase);
17264    }
17265
17266    ecore_init_block(sc, BLOCK_CDU, init_phase);
17267    ecore_init_block(sc, BLOCK_CFC, init_phase);
17268
17269    if (CHIP_IS_E1(sc)) {
17270        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17271        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17272    }
17273    ecore_init_block(sc, BLOCK_HC, init_phase);
17274
17275    ecore_init_block(sc, BLOCK_IGU, init_phase);
17276
17277    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17278    /* init aeu_mask_attn_func_0/1:
17279     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17280     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17281     *             bits 4-7 are used for "per vn group attention" */
17282    val = IS_MF(sc) ? 0xF7 : 0x7;
17283    /* Enable DCBX attention for all but E1 */
17284    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17285    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17286
17287    ecore_init_block(sc, BLOCK_NIG, init_phase);
17288
17289    if (!CHIP_IS_E1x(sc)) {
17290        /* Bit-map indicating which L2 hdrs may appear after the
17291         * basic Ethernet header
17292         */
17293        if (IS_MF_AFEX(sc)) {
17294            REG_WR(sc, SC_PORT(sc) ?
17295                   NIG_REG_P1_HDRS_AFTER_BASIC :
17296                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17297        } else {
17298            REG_WR(sc, SC_PORT(sc) ?
17299                   NIG_REG_P1_HDRS_AFTER_BASIC :
17300                   NIG_REG_P0_HDRS_AFTER_BASIC,
17301                   IS_MF_SD(sc) ? 7 : 6);
17302        }
17303
17304        if (CHIP_IS_E3(sc)) {
17305            REG_WR(sc, SC_PORT(sc) ?
17306                   NIG_REG_LLH1_MF_MODE :
17307                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17308        }
17309    }
17310    if (!CHIP_IS_E3(sc)) {
17311        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17312    }
17313
17314    if (!CHIP_IS_E1(sc)) {
17315        /* 0x2 disable mf_ov, 0x1 enable */
17316        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17317               (IS_MF_SD(sc) ? 0x1 : 0x2));
17318
17319        if (!CHIP_IS_E1x(sc)) {
17320            val = 0;
17321            switch (sc->devinfo.mf_info.mf_mode) {
17322            case MULTI_FUNCTION_SD:
17323                val = 1;
17324                break;
17325            case MULTI_FUNCTION_SI:
17326            case MULTI_FUNCTION_AFEX:
17327                val = 2;
17328                break;
17329            }
17330
17331            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17332                        NIG_REG_LLH0_CLS_TYPE), val);
17333        }
17334        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17335        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17336        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17337    }
17338
17339    /* If SPIO5 is set to generate interrupts, enable it for this port */
17340    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17341    if (val & MISC_SPIO_SPIO5) {
17342        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17343                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17344        val = REG_RD(sc, reg_addr);
17345        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17346        REG_WR(sc, reg_addr, val);
17347    }
17348
17349    return (0);
17350}
17351
17352static uint32_t
17353bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17354                       uint32_t         reg,
17355                       uint32_t         expected,
17356                       uint32_t         poll_count)
17357{
17358    uint32_t cur_cnt = poll_count;
17359    uint32_t val;
17360
17361    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17362        DELAY(FLR_WAIT_INTERVAL);
17363    }
17364
17365    return (val);
17366}
17367
17368static int
17369bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17370                              uint32_t         reg,
17371                              char             *msg,
17372                              uint32_t         poll_cnt)
17373{
17374    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17375
17376    if (val != 0) {
17377        BLOGE(sc, "%s usage count=%d\n", msg, val);
17378        return (1);
17379    }
17380
17381    return (0);
17382}
17383
17384/* Common routines with VF FLR cleanup */
17385static uint32_t
17386bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17387{
17388    /* adjust polling timeout */
17389    if (CHIP_REV_IS_EMUL(sc)) {
17390        return (FLR_POLL_CNT * 2000);
17391    }
17392
17393    if (CHIP_REV_IS_FPGA(sc)) {
17394        return (FLR_POLL_CNT * 120);
17395    }
17396
17397    return (FLR_POLL_CNT);
17398}
17399
17400static int
17401bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17402                           uint32_t         poll_cnt)
17403{
17404    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17405    if (bxe_flr_clnup_poll_hw_counter(sc,
17406                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17407                                      "CFC PF usage counter timed out",
17408                                      poll_cnt)) {
17409        return (1);
17410    }
17411
17412    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17413    if (bxe_flr_clnup_poll_hw_counter(sc,
17414                                      DORQ_REG_PF_USAGE_CNT,
17415                                      "DQ PF usage counter timed out",
17416                                      poll_cnt)) {
17417        return (1);
17418    }
17419
17420    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17421    if (bxe_flr_clnup_poll_hw_counter(sc,
17422                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17423                                      "QM PF usage counter timed out",
17424                                      poll_cnt)) {
17425        return (1);
17426    }
17427
17428    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17429    if (bxe_flr_clnup_poll_hw_counter(sc,
17430                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17431                                      "Timers VNIC usage counter timed out",
17432                                      poll_cnt)) {
17433        return (1);
17434    }
17435
17436    if (bxe_flr_clnup_poll_hw_counter(sc,
17437                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17438                                      "Timers NUM_SCANS usage counter timed out",
17439                                      poll_cnt)) {
17440        return (1);
17441    }
17442
17443    /* Wait DMAE PF usage counter to zero */
17444    if (bxe_flr_clnup_poll_hw_counter(sc,
17445                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17446                                      "DMAE dommand register timed out",
17447                                      poll_cnt)) {
17448        return (1);
17449    }
17450
17451    return (0);
17452}
17453
17454#define OP_GEN_PARAM(param)                                            \
17455    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17456#define OP_GEN_TYPE(type)                                           \
17457    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17458#define OP_GEN_AGG_VECT(index)                                             \
17459    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17460
17461static int
17462bxe_send_final_clnup(struct bxe_softc *sc,
17463                     uint8_t          clnup_func,
17464                     uint32_t         poll_cnt)
17465{
17466    uint32_t op_gen_command = 0;
17467    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17468                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17469    int ret = 0;
17470
17471    if (REG_RD(sc, comp_addr)) {
17472        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17473        return (1);
17474    }
17475
17476    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17477    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17478    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17479    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17480
17481    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17482    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17483
17484    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17485        BLOGE(sc, "FW final cleanup did not succeed\n");
17486        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17487              (REG_RD(sc, comp_addr)));
17488        bxe_panic(sc, ("FLR cleanup failed\n"));
17489        return (1);
17490    }
17491
17492    /* Zero completion for nxt FLR */
17493    REG_WR(sc, comp_addr, 0);
17494
17495    return (ret);
17496}
17497
17498static void
17499bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17500                       struct pbf_pN_buf_regs *regs,
17501                       uint32_t               poll_count)
17502{
17503    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17504    uint32_t cur_cnt = poll_count;
17505
17506    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17507    crd = crd_start = REG_RD(sc, regs->crd);
17508    init_crd = REG_RD(sc, regs->init_crd);
17509
17510    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17511    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17512    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17513
17514    while ((crd != init_crd) &&
17515           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17516            (init_crd - crd_start))) {
17517        if (cur_cnt--) {
17518            DELAY(FLR_WAIT_INTERVAL);
17519            crd = REG_RD(sc, regs->crd);
17520            crd_freed = REG_RD(sc, regs->crd_freed);
17521        } else {
17522            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17523            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17524            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17525            break;
17526        }
17527    }
17528
17529    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17530          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17531}
17532
17533static void
17534bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17535                       struct pbf_pN_cmd_regs *regs,
17536                       uint32_t               poll_count)
17537{
17538    uint32_t occup, to_free, freed, freed_start;
17539    uint32_t cur_cnt = poll_count;
17540
17541    occup = to_free = REG_RD(sc, regs->lines_occup);
17542    freed = freed_start = REG_RD(sc, regs->lines_freed);
17543
17544    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17545    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17546
17547    while (occup &&
17548           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17549        if (cur_cnt--) {
17550            DELAY(FLR_WAIT_INTERVAL);
17551            occup = REG_RD(sc, regs->lines_occup);
17552            freed = REG_RD(sc, regs->lines_freed);
17553        } else {
17554            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17555            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17556            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17557            break;
17558        }
17559    }
17560
17561    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17562          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17563}
17564
17565static void
17566bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17567{
17568    struct pbf_pN_cmd_regs cmd_regs[] = {
17569        {0, (CHIP_IS_E3B0(sc)) ?
17570            PBF_REG_TQ_OCCUPANCY_Q0 :
17571            PBF_REG_P0_TQ_OCCUPANCY,
17572            (CHIP_IS_E3B0(sc)) ?
17573            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17574            PBF_REG_P0_TQ_LINES_FREED_CNT},
17575        {1, (CHIP_IS_E3B0(sc)) ?
17576            PBF_REG_TQ_OCCUPANCY_Q1 :
17577            PBF_REG_P1_TQ_OCCUPANCY,
17578            (CHIP_IS_E3B0(sc)) ?
17579            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17580            PBF_REG_P1_TQ_LINES_FREED_CNT},
17581        {4, (CHIP_IS_E3B0(sc)) ?
17582            PBF_REG_TQ_OCCUPANCY_LB_Q :
17583            PBF_REG_P4_TQ_OCCUPANCY,
17584            (CHIP_IS_E3B0(sc)) ?
17585            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17586            PBF_REG_P4_TQ_LINES_FREED_CNT}
17587    };
17588
17589    struct pbf_pN_buf_regs buf_regs[] = {
17590        {0, (CHIP_IS_E3B0(sc)) ?
17591            PBF_REG_INIT_CRD_Q0 :
17592            PBF_REG_P0_INIT_CRD ,
17593            (CHIP_IS_E3B0(sc)) ?
17594            PBF_REG_CREDIT_Q0 :
17595            PBF_REG_P0_CREDIT,
17596            (CHIP_IS_E3B0(sc)) ?
17597            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17598            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17599        {1, (CHIP_IS_E3B0(sc)) ?
17600            PBF_REG_INIT_CRD_Q1 :
17601            PBF_REG_P1_INIT_CRD,
17602            (CHIP_IS_E3B0(sc)) ?
17603            PBF_REG_CREDIT_Q1 :
17604            PBF_REG_P1_CREDIT,
17605            (CHIP_IS_E3B0(sc)) ?
17606            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17607            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17608        {4, (CHIP_IS_E3B0(sc)) ?
17609            PBF_REG_INIT_CRD_LB_Q :
17610            PBF_REG_P4_INIT_CRD,
17611            (CHIP_IS_E3B0(sc)) ?
17612            PBF_REG_CREDIT_LB_Q :
17613            PBF_REG_P4_CREDIT,
17614            (CHIP_IS_E3B0(sc)) ?
17615            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17616            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17617    };
17618
17619    int i;
17620
17621    /* Verify the command queues are flushed P0, P1, P4 */
17622    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
17623        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
17624    }
17625
17626    /* Verify the transmission buffers are flushed P0, P1, P4 */
17627    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
17628        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
17629    }
17630}
17631
17632static void
17633bxe_hw_enable_status(struct bxe_softc *sc)
17634{
17635    uint32_t val;
17636
17637    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
17638    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
17639
17640    val = REG_RD(sc, PBF_REG_DISABLE_PF);
17641    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
17642
17643    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
17644    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
17645
17646    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
17647    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
17648
17649    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
17650    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
17651
17652    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
17653    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
17654
17655    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
17656    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
17657
17658    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
17659    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
17660}
17661
17662static int
17663bxe_pf_flr_clnup(struct bxe_softc *sc)
17664{
17665    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
17666
17667    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
17668
17669    /* Re-enable PF target read access */
17670    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
17671
17672    /* Poll HW usage counters */
17673    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
17674    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
17675        return (-1);
17676    }
17677
17678    /* Zero the igu 'trailing edge' and 'leading edge' */
17679
17680    /* Send the FW cleanup command */
17681    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
17682        return (-1);
17683    }
17684
17685    /* ATC cleanup */
17686
17687    /* Verify TX hw is flushed */
17688    bxe_tx_hw_flushed(sc, poll_cnt);
17689
17690    /* Wait 100ms (not adjusted according to platform) */
17691    DELAY(100000);
17692
17693    /* Verify no pending pci transactions */
17694    if (bxe_is_pcie_pending(sc)) {
17695        BLOGE(sc, "PCIE Transactions still pending\n");
17696    }
17697
17698    /* Debug */
17699    bxe_hw_enable_status(sc);
17700
17701    /*
17702     * Master enable - Due to WB DMAE writes performed before this
17703     * register is re-initialized as part of the regular function init
17704     */
17705    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17706
17707    return (0);
17708}
17709
17710static int
17711bxe_init_hw_func(struct bxe_softc *sc)
17712{
17713    int port = SC_PORT(sc);
17714    int func = SC_FUNC(sc);
17715    int init_phase = PHASE_PF0 + func;
17716    struct ecore_ilt *ilt = sc->ilt;
17717    uint16_t cdu_ilt_start;
17718    uint32_t addr, val;
17719    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
17720    int i, main_mem_width, rc;
17721
17722    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
17723
17724    /* FLR cleanup */
17725    if (!CHIP_IS_E1x(sc)) {
17726        rc = bxe_pf_flr_clnup(sc);
17727        if (rc) {
17728            BLOGE(sc, "FLR cleanup failed!\n");
17729            // XXX bxe_fw_dump(sc);
17730            // XXX bxe_idle_chk(sc);
17731            return (rc);
17732        }
17733    }
17734
17735    /* set MSI reconfigure capability */
17736    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17737        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
17738        val = REG_RD(sc, addr);
17739        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
17740        REG_WR(sc, addr, val);
17741    }
17742
17743    ecore_init_block(sc, BLOCK_PXP, init_phase);
17744    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17745
17746    ilt = sc->ilt;
17747    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
17748
17749    for (i = 0; i < L2_ILT_LINES(sc); i++) {
17750        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
17751        ilt->lines[cdu_ilt_start + i].page_mapping =
17752            sc->context[i].vcxt_dma.paddr;
17753        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
17754    }
17755    ecore_ilt_init_op(sc, INITOP_SET);
17756
17757    /* Set NIC mode */
17758    REG_WR(sc, PRS_REG_NIC_MODE, 1);
17759    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
17760
17761    if (!CHIP_IS_E1x(sc)) {
17762        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
17763
17764        /* Turn on a single ISR mode in IGU if driver is going to use
17765         * INT#x or MSI
17766         */
17767        if (sc->interrupt_mode != INTR_MODE_MSIX) {
17768            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
17769        }
17770
17771        /*
17772         * Timers workaround bug: function init part.
17773         * Need to wait 20msec after initializing ILT,
17774         * needed to make sure there are no requests in
17775         * one of the PXP internal queues with "old" ILT addresses
17776         */
17777        DELAY(20000);
17778
17779        /*
17780         * Master enable - Due to WB DMAE writes performed before this
17781         * register is re-initialized as part of the regular function
17782         * init
17783         */
17784        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17785        /* Enable the function in IGU */
17786        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
17787    }
17788
17789    sc->dmae_ready = 1;
17790
17791    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17792
17793    if (!CHIP_IS_E1x(sc))
17794        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
17795
17796    ecore_init_block(sc, BLOCK_ATC, init_phase);
17797    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17798    ecore_init_block(sc, BLOCK_NIG, init_phase);
17799    ecore_init_block(sc, BLOCK_SRC, init_phase);
17800    ecore_init_block(sc, BLOCK_MISC, init_phase);
17801    ecore_init_block(sc, BLOCK_TCM, init_phase);
17802    ecore_init_block(sc, BLOCK_UCM, init_phase);
17803    ecore_init_block(sc, BLOCK_CCM, init_phase);
17804    ecore_init_block(sc, BLOCK_XCM, init_phase);
17805    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17806    ecore_init_block(sc, BLOCK_USEM, init_phase);
17807    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17808    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17809
17810    if (!CHIP_IS_E1x(sc))
17811        REG_WR(sc, QM_REG_PF_EN, 1);
17812
17813    if (!CHIP_IS_E1x(sc)) {
17814        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17815        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17816        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17817        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
17818    }
17819    ecore_init_block(sc, BLOCK_QM, init_phase);
17820
17821    ecore_init_block(sc, BLOCK_TM, init_phase);
17822    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17823
17824    bxe_iov_init_dq(sc);
17825
17826    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17827    ecore_init_block(sc, BLOCK_PRS, init_phase);
17828    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17829    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17830    ecore_init_block(sc, BLOCK_USDM, init_phase);
17831    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17832    ecore_init_block(sc, BLOCK_UPB, init_phase);
17833    ecore_init_block(sc, BLOCK_XPB, init_phase);
17834    ecore_init_block(sc, BLOCK_PBF, init_phase);
17835    if (!CHIP_IS_E1x(sc))
17836        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
17837
17838    ecore_init_block(sc, BLOCK_CDU, init_phase);
17839
17840    ecore_init_block(sc, BLOCK_CFC, init_phase);
17841
17842    if (!CHIP_IS_E1x(sc))
17843        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
17844
17845    if (IS_MF(sc)) {
17846        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
17847        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
17848    }
17849
17850    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17851
17852    /* HC init per function */
17853    if (sc->devinfo.int_block == INT_BLOCK_HC) {
17854        if (CHIP_IS_E1H(sc)) {
17855            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17856
17857            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17858            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17859        }
17860        ecore_init_block(sc, BLOCK_HC, init_phase);
17861
17862    } else {
17863        int num_segs, sb_idx, prod_offset;
17864
17865        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
17866
17867        if (!CHIP_IS_E1x(sc)) {
17868            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
17869            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
17870        }
17871
17872        ecore_init_block(sc, BLOCK_IGU, init_phase);
17873
17874        if (!CHIP_IS_E1x(sc)) {
17875            int dsb_idx = 0;
17876            /**
17877             * Producer memory:
17878             * E2 mode: address 0-135 match to the mapping memory;
17879             * 136 - PF0 default prod; 137 - PF1 default prod;
17880             * 138 - PF2 default prod; 139 - PF3 default prod;
17881             * 140 - PF0 attn prod;    141 - PF1 attn prod;
17882             * 142 - PF2 attn prod;    143 - PF3 attn prod;
17883             * 144-147 reserved.
17884             *
17885             * E1.5 mode - In backward compatible mode;
17886             * for non default SB; each even line in the memory
17887             * holds the U producer and each odd line hold
17888             * the C producer. The first 128 producers are for
17889             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
17890             * producers are for the DSB for each PF.
17891             * Each PF has five segments: (the order inside each
17892             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
17893             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
17894             * 144-147 attn prods;
17895             */
17896            /* non-default-status-blocks */
17897            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17898                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
17899            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
17900                prod_offset = (sc->igu_base_sb + sb_idx) *
17901                    num_segs;
17902
17903                for (i = 0; i < num_segs; i++) {
17904                    addr = IGU_REG_PROD_CONS_MEMORY +
17905                            (prod_offset + i) * 4;
17906                    REG_WR(sc, addr, 0);
17907                }
17908                /* send consumer update with value 0 */
17909                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
17910                           USTORM_ID, 0, IGU_INT_NOP, 1);
17911                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
17912            }
17913
17914            /* default-status-blocks */
17915            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
17916                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
17917
17918            if (CHIP_IS_MODE_4_PORT(sc))
17919                dsb_idx = SC_FUNC(sc);
17920            else
17921                dsb_idx = SC_VN(sc);
17922
17923            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
17924                       IGU_BC_BASE_DSB_PROD + dsb_idx :
17925                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
17926
17927            /*
17928             * igu prods come in chunks of E1HVN_MAX (4) -
17929             * does not matters what is the current chip mode
17930             */
17931            for (i = 0; i < (num_segs * E1HVN_MAX);
17932                 i += E1HVN_MAX) {
17933                addr = IGU_REG_PROD_CONS_MEMORY +
17934                            (prod_offset + i)*4;
17935                REG_WR(sc, addr, 0);
17936            }
17937            /* send consumer update with 0 */
17938            if (CHIP_INT_MODE_IS_BC(sc)) {
17939                bxe_ack_sb(sc, sc->igu_dsb_id,
17940                           USTORM_ID, 0, IGU_INT_NOP, 1);
17941                bxe_ack_sb(sc, sc->igu_dsb_id,
17942                           CSTORM_ID, 0, IGU_INT_NOP, 1);
17943                bxe_ack_sb(sc, sc->igu_dsb_id,
17944                           XSTORM_ID, 0, IGU_INT_NOP, 1);
17945                bxe_ack_sb(sc, sc->igu_dsb_id,
17946                           TSTORM_ID, 0, IGU_INT_NOP, 1);
17947                bxe_ack_sb(sc, sc->igu_dsb_id,
17948                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
17949            } else {
17950                bxe_ack_sb(sc, sc->igu_dsb_id,
17951                           USTORM_ID, 0, IGU_INT_NOP, 1);
17952                bxe_ack_sb(sc, sc->igu_dsb_id,
17953                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
17954            }
17955            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
17956
17957            /* !!! these should become driver const once
17958               rf-tool supports split-68 const */
17959            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
17960            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
17961            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
17962            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
17963            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
17964            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
17965        }
17966    }
17967
17968    /* Reset PCIE errors for debug */
17969    REG_WR(sc, 0x2114, 0xffffffff);
17970    REG_WR(sc, 0x2120, 0xffffffff);
17971
17972    if (CHIP_IS_E1x(sc)) {
17973        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
17974        main_mem_base = HC_REG_MAIN_MEMORY +
17975                SC_PORT(sc) * (main_mem_size * 4);
17976        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
17977        main_mem_width = 8;
17978
17979        val = REG_RD(sc, main_mem_prty_clr);
17980        if (val) {
17981            BLOGD(sc, DBG_LOAD,
17982                  "Parity errors in HC block during function init (0x%x)!\n",
17983                  val);
17984        }
17985
17986        /* Clear "false" parity errors in MSI-X table */
17987        for (i = main_mem_base;
17988             i < main_mem_base + main_mem_size * 4;
17989             i += main_mem_width) {
17990            bxe_read_dmae(sc, i, main_mem_width / 4);
17991            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
17992                           i, main_mem_width / 4);
17993        }
17994        /* Clear HC parity attention */
17995        REG_RD(sc, main_mem_prty_clr);
17996    }
17997
17998#if 1
17999    /* Enable STORMs SP logging */
18000    REG_WR8(sc, BAR_USTRORM_INTMEM +
18001           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18002    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18003           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18004    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18005           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18006    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18007           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18008#endif
18009
18010    elink_phy_probe(&sc->link_params);
18011
18012    return (0);
18013}
18014
18015static void
18016bxe_link_reset(struct bxe_softc *sc)
18017{
18018    if (!BXE_NOMCP(sc)) {
18019	bxe_acquire_phy_lock(sc);
18020        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18021	bxe_release_phy_lock(sc);
18022    } else {
18023        if (!CHIP_REV_IS_SLOW(sc)) {
18024            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18025        }
18026    }
18027}
18028
18029static void
18030bxe_reset_port(struct bxe_softc *sc)
18031{
18032    int port = SC_PORT(sc);
18033    uint32_t val;
18034
18035    /* reset physical Link */
18036    bxe_link_reset(sc);
18037
18038    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18039
18040    /* Do not rcv packets to BRB */
18041    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18042    /* Do not direct rcv packets that are not for MCP to the BRB */
18043    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18044               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18045
18046    /* Configure AEU */
18047    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18048
18049    DELAY(100000);
18050
18051    /* Check for BRB port occupancy */
18052    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18053    if (val) {
18054        BLOGD(sc, DBG_LOAD,
18055              "BRB1 is not empty, %d blocks are occupied\n", val);
18056    }
18057
18058    /* TODO: Close Doorbell port? */
18059}
18060
18061static void
18062bxe_ilt_wr(struct bxe_softc *sc,
18063           uint32_t         index,
18064           bus_addr_t       addr)
18065{
18066    int reg;
18067    uint32_t wb_write[2];
18068
18069    if (CHIP_IS_E1(sc)) {
18070        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18071    } else {
18072        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18073    }
18074
18075    wb_write[0] = ONCHIP_ADDR1(addr);
18076    wb_write[1] = ONCHIP_ADDR2(addr);
18077    REG_WR_DMAE(sc, reg, wb_write, 2);
18078}
18079
18080static void
18081bxe_clear_func_ilt(struct bxe_softc *sc,
18082                   uint32_t         func)
18083{
18084    uint32_t i, base = FUNC_ILT_BASE(func);
18085    for (i = base; i < base + ILT_PER_FUNC; i++) {
18086        bxe_ilt_wr(sc, i, 0);
18087    }
18088}
18089
18090static void
18091bxe_reset_func(struct bxe_softc *sc)
18092{
18093    struct bxe_fastpath *fp;
18094    int port = SC_PORT(sc);
18095    int func = SC_FUNC(sc);
18096    int i;
18097
18098    /* Disable the function in the FW */
18099    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18100    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18101    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18102    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18103
18104    /* FP SBs */
18105    FOR_EACH_ETH_QUEUE(sc, i) {
18106        fp = &sc->fp[i];
18107        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18108                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18109                SB_DISABLED);
18110    }
18111
18112    /* SP SB */
18113    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18114            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18115            SB_DISABLED);
18116
18117    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18118        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18119    }
18120
18121    /* Configure IGU */
18122    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18123        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18124        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18125    } else {
18126        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18127        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18128    }
18129
18130    if (CNIC_LOADED(sc)) {
18131        /* Disable Timer scan */
18132        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18133        /*
18134         * Wait for at least 10ms and up to 2 second for the timers
18135         * scan to complete
18136         */
18137        for (i = 0; i < 200; i++) {
18138            DELAY(10000);
18139            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18140                break;
18141        }
18142    }
18143
18144    /* Clear ILT */
18145    bxe_clear_func_ilt(sc, func);
18146
18147    /*
18148     * Timers workaround bug for E2: if this is vnic-3,
18149     * we need to set the entire ilt range for this timers.
18150     */
18151    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18152        struct ilt_client_info ilt_cli;
18153        /* use dummy TM client */
18154        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18155        ilt_cli.start = 0;
18156        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18157        ilt_cli.client_num = ILT_CLIENT_TM;
18158
18159        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18160    }
18161
18162    /* this assumes that reset_port() called before reset_func()*/
18163    if (!CHIP_IS_E1x(sc)) {
18164        bxe_pf_disable(sc);
18165    }
18166
18167    sc->dmae_ready = 0;
18168}
18169
18170static int
18171bxe_gunzip_init(struct bxe_softc *sc)
18172{
18173    return (0);
18174}
18175
18176static void
18177bxe_gunzip_end(struct bxe_softc *sc)
18178{
18179    return;
18180}
18181
18182static int
18183bxe_init_firmware(struct bxe_softc *sc)
18184{
18185    if (CHIP_IS_E1(sc)) {
18186        ecore_init_e1_firmware(sc);
18187        sc->iro_array = e1_iro_arr;
18188    } else if (CHIP_IS_E1H(sc)) {
18189        ecore_init_e1h_firmware(sc);
18190        sc->iro_array = e1h_iro_arr;
18191    } else if (!CHIP_IS_E1x(sc)) {
18192        ecore_init_e2_firmware(sc);
18193        sc->iro_array = e2_iro_arr;
18194    } else {
18195        BLOGE(sc, "Unsupported chip revision\n");
18196        return (-1);
18197    }
18198
18199    return (0);
18200}
18201
18202static void
18203bxe_release_firmware(struct bxe_softc *sc)
18204{
18205    /* Do nothing */
18206    return;
18207}
18208
18209static int
18210ecore_gunzip(struct bxe_softc *sc,
18211             const uint8_t    *zbuf,
18212             int              len)
18213{
18214    /* XXX : Implement... */
18215    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18216    return (FALSE);
18217}
18218
18219static void
18220ecore_reg_wr_ind(struct bxe_softc *sc,
18221                 uint32_t         addr,
18222                 uint32_t         val)
18223{
18224    bxe_reg_wr_ind(sc, addr, val);
18225}
18226
18227static void
18228ecore_write_dmae_phys_len(struct bxe_softc *sc,
18229                          bus_addr_t       phys_addr,
18230                          uint32_t         addr,
18231                          uint32_t         len)
18232{
18233    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18234}
18235
18236void
18237ecore_storm_memset_struct(struct bxe_softc *sc,
18238                          uint32_t         addr,
18239                          size_t           size,
18240                          uint32_t         *data)
18241{
18242    uint8_t i;
18243    for (i = 0; i < size/4; i++) {
18244        REG_WR(sc, addr + (i * 4), data[i]);
18245    }
18246}
18247
18248
18249/*
18250 * character device - ioctl interface definitions
18251 */
18252
18253
18254#include "bxe_dump.h"
18255#include "bxe_ioctl.h"
18256#include <sys/conf.h>
18257
18258static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18259                struct thread *td);
18260
18261static struct cdevsw bxe_cdevsw = {
18262    .d_version = D_VERSION,
18263    .d_ioctl = bxe_eioctl,
18264    .d_name = "bxecnic",
18265};
18266
18267#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18268
18269
18270#define DUMP_ALL_PRESETS        0x1FFF
18271#define DUMP_MAX_PRESETS        13
18272#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18273#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18274#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18275#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18276#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18277
18278#define IS_REG_IN_PRESET(presets, idx)  \
18279                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18280
18281
18282static int
18283bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18284{
18285    if (CHIP_IS_E1(sc))
18286        return dump_num_registers[0][preset-1];
18287    else if (CHIP_IS_E1H(sc))
18288        return dump_num_registers[1][preset-1];
18289    else if (CHIP_IS_E2(sc))
18290        return dump_num_registers[2][preset-1];
18291    else if (CHIP_IS_E3A0(sc))
18292        return dump_num_registers[3][preset-1];
18293    else if (CHIP_IS_E3B0(sc))
18294        return dump_num_registers[4][preset-1];
18295    else
18296        return 0;
18297}
18298
18299static int
18300bxe_get_total_regs_len32(struct bxe_softc *sc)
18301{
18302    uint32_t preset_idx;
18303    int regdump_len32 = 0;
18304
18305
18306    /* Calculate the total preset regs length */
18307    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18308        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18309    }
18310
18311    return regdump_len32;
18312}
18313
18314static const uint32_t *
18315__bxe_get_page_addr_ar(struct bxe_softc *sc)
18316{
18317    if (CHIP_IS_E2(sc))
18318        return page_vals_e2;
18319    else if (CHIP_IS_E3(sc))
18320        return page_vals_e3;
18321    else
18322        return NULL;
18323}
18324
18325static uint32_t
18326__bxe_get_page_reg_num(struct bxe_softc *sc)
18327{
18328    if (CHIP_IS_E2(sc))
18329        return PAGE_MODE_VALUES_E2;
18330    else if (CHIP_IS_E3(sc))
18331        return PAGE_MODE_VALUES_E3;
18332    else
18333        return 0;
18334}
18335
18336static const uint32_t *
18337__bxe_get_page_write_ar(struct bxe_softc *sc)
18338{
18339    if (CHIP_IS_E2(sc))
18340        return page_write_regs_e2;
18341    else if (CHIP_IS_E3(sc))
18342        return page_write_regs_e3;
18343    else
18344        return NULL;
18345}
18346
18347static uint32_t
18348__bxe_get_page_write_num(struct bxe_softc *sc)
18349{
18350    if (CHIP_IS_E2(sc))
18351        return PAGE_WRITE_REGS_E2;
18352    else if (CHIP_IS_E3(sc))
18353        return PAGE_WRITE_REGS_E3;
18354    else
18355        return 0;
18356}
18357
18358static const struct reg_addr *
18359__bxe_get_page_read_ar(struct bxe_softc *sc)
18360{
18361    if (CHIP_IS_E2(sc))
18362        return page_read_regs_e2;
18363    else if (CHIP_IS_E3(sc))
18364        return page_read_regs_e3;
18365    else
18366        return NULL;
18367}
18368
18369static uint32_t
18370__bxe_get_page_read_num(struct bxe_softc *sc)
18371{
18372    if (CHIP_IS_E2(sc))
18373        return PAGE_READ_REGS_E2;
18374    else if (CHIP_IS_E3(sc))
18375        return PAGE_READ_REGS_E3;
18376    else
18377        return 0;
18378}
18379
18380static bool
18381bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18382{
18383    if (CHIP_IS_E1(sc))
18384        return IS_E1_REG(reg_info->chips);
18385    else if (CHIP_IS_E1H(sc))
18386        return IS_E1H_REG(reg_info->chips);
18387    else if (CHIP_IS_E2(sc))
18388        return IS_E2_REG(reg_info->chips);
18389    else if (CHIP_IS_E3A0(sc))
18390        return IS_E3A0_REG(reg_info->chips);
18391    else if (CHIP_IS_E3B0(sc))
18392        return IS_E3B0_REG(reg_info->chips);
18393    else
18394        return 0;
18395}
18396
18397static bool
18398bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18399{
18400    if (CHIP_IS_E1(sc))
18401        return IS_E1_REG(wreg_info->chips);
18402    else if (CHIP_IS_E1H(sc))
18403        return IS_E1H_REG(wreg_info->chips);
18404    else if (CHIP_IS_E2(sc))
18405        return IS_E2_REG(wreg_info->chips);
18406    else if (CHIP_IS_E3A0(sc))
18407        return IS_E3A0_REG(wreg_info->chips);
18408    else if (CHIP_IS_E3B0(sc))
18409        return IS_E3B0_REG(wreg_info->chips);
18410    else
18411        return 0;
18412}
18413
18414/**
18415 * bxe_read_pages_regs - read "paged" registers
18416 *
18417 * @bp          device handle
18418 * @p           output buffer
18419 *
18420 * Reads "paged" memories: memories that may only be read by first writing to a
18421 * specific address ("write address") and then reading from a specific address
18422 * ("read address"). There may be more than one write address per "page" and
18423 * more than one read address per write address.
18424 */
18425static void
18426bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18427{
18428    uint32_t i, j, k, n;
18429
18430    /* addresses of the paged registers */
18431    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18432    /* number of paged registers */
18433    int num_pages = __bxe_get_page_reg_num(sc);
18434    /* write addresses */
18435    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18436    /* number of write addresses */
18437    int write_num = __bxe_get_page_write_num(sc);
18438    /* read addresses info */
18439    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18440    /* number of read addresses */
18441    int read_num = __bxe_get_page_read_num(sc);
18442    uint32_t addr, size;
18443
18444    for (i = 0; i < num_pages; i++) {
18445        for (j = 0; j < write_num; j++) {
18446            REG_WR(sc, write_addr[j], page_addr[i]);
18447
18448            for (k = 0; k < read_num; k++) {
18449                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18450                    size = read_addr[k].size;
18451                    for (n = 0; n < size; n++) {
18452                        addr = read_addr[k].addr + n*4;
18453                        *p++ = REG_RD(sc, addr);
18454                    }
18455                }
18456            }
18457        }
18458    }
18459    return;
18460}
18461
18462
18463static int
18464bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18465{
18466    uint32_t i, j, addr;
18467    const struct wreg_addr *wreg_addr_p = NULL;
18468
18469    if (CHIP_IS_E1(sc))
18470        wreg_addr_p = &wreg_addr_e1;
18471    else if (CHIP_IS_E1H(sc))
18472        wreg_addr_p = &wreg_addr_e1h;
18473    else if (CHIP_IS_E2(sc))
18474        wreg_addr_p = &wreg_addr_e2;
18475    else if (CHIP_IS_E3A0(sc))
18476        wreg_addr_p = &wreg_addr_e3;
18477    else if (CHIP_IS_E3B0(sc))
18478        wreg_addr_p = &wreg_addr_e3b0;
18479    else
18480        return (-1);
18481
18482    /* Read the idle_chk registers */
18483    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18484        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18485            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18486            for (j = 0; j < idle_reg_addrs[i].size; j++)
18487                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18488        }
18489    }
18490
18491    /* Read the regular registers */
18492    for (i = 0; i < REGS_COUNT; i++) {
18493        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18494            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18495            for (j = 0; j < reg_addrs[i].size; j++)
18496                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18497        }
18498    }
18499
18500    /* Read the CAM registers */
18501    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18502        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18503        for (i = 0; i < wreg_addr_p->size; i++) {
18504            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18505
18506            /* In case of wreg_addr register, read additional
18507               registers from read_regs array
18508             */
18509            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18510                addr = *(wreg_addr_p->read_regs);
18511                *p++ = REG_RD(sc, addr + j*4);
18512            }
18513        }
18514    }
18515
18516    /* Paged registers are supported in E2 & E3 only */
18517    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18518        /* Read "paged" registers */
18519        bxe_read_pages_regs(sc, p, preset);
18520    }
18521
18522    return 0;
18523}
18524
18525int
18526bxe_grc_dump(struct bxe_softc *sc)
18527{
18528    int rval = 0;
18529    uint32_t preset_idx;
18530    uint8_t *buf;
18531    uint32_t size;
18532    struct  dump_header *d_hdr;
18533    uint32_t i;
18534    uint32_t reg_val;
18535    uint32_t reg_addr;
18536    uint32_t cmd_offset;
18537    int context_size;
18538    int allocated;
18539    struct ecore_ilt *ilt = SC_ILT(sc);
18540    struct bxe_fastpath *fp;
18541    struct ilt_client_info *ilt_cli;
18542    int grc_dump_size;
18543
18544
18545    if (sc->grcdump_done || sc->grcdump_started)
18546	return (rval);
18547
18548    sc->grcdump_started = 1;
18549    BLOGI(sc, "Started collecting grcdump\n");
18550
18551    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18552                sizeof(struct  dump_header);
18553
18554    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18555
18556    if (sc->grc_dump == NULL) {
18557        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18558        return(ENOMEM);
18559    }
18560
18561
18562
18563    /* Disable parity attentions as long as following dump may
18564     * cause false alarms by reading never written registers. We
18565     * will re-enable parity attentions right after the dump.
18566     */
18567
18568    /* Disable parity on path 0 */
18569    bxe_pretend_func(sc, 0);
18570
18571    ecore_disable_blocks_parity(sc);
18572
18573    /* Disable parity on path 1 */
18574    bxe_pretend_func(sc, 1);
18575    ecore_disable_blocks_parity(sc);
18576
18577    /* Return to current function */
18578    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18579
18580    buf = sc->grc_dump;
18581    d_hdr = sc->grc_dump;
18582
18583    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18584    d_hdr->version = BNX2X_DUMP_VERSION;
18585    d_hdr->preset = DUMP_ALL_PRESETS;
18586
18587    if (CHIP_IS_E1(sc)) {
18588        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18589    } else if (CHIP_IS_E1H(sc)) {
18590        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18591    } else if (CHIP_IS_E2(sc)) {
18592        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18593                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18594    } else if (CHIP_IS_E3A0(sc)) {
18595        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18596                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18597    } else if (CHIP_IS_E3B0(sc)) {
18598        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18599                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18600    }
18601
18602    buf += sizeof(struct  dump_header);
18603
18604    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18605
18606        /* Skip presets with IOR */
18607        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18608            (preset_idx == 11))
18609            continue;
18610
18611        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18612
18613	if (rval)
18614            break;
18615
18616        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18617
18618        buf += size;
18619    }
18620
18621    bxe_pretend_func(sc, 0);
18622    ecore_clear_blocks_parity(sc);
18623    ecore_enable_blocks_parity(sc);
18624
18625    bxe_pretend_func(sc, 1);
18626    ecore_clear_blocks_parity(sc);
18627    ecore_enable_blocks_parity(sc);
18628
18629    /* Return to current function */
18630    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18631
18632
18633    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
18634    for (i = 0, allocated = 0; allocated < context_size; i++) {
18635
18636        BLOGI(sc, "cdu_context i %d paddr %#jx vaddr %p size 0x%zx\n", i,
18637            (uintmax_t)sc->context[i].vcxt_dma.paddr,
18638            sc->context[i].vcxt_dma.vaddr,
18639            sc->context[i].size);
18640        allocated += sc->context[i].size;
18641    }
18642    BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
18643        (uintmax_t)sc->fw_stats_req_mapping,
18644        (uintmax_t)sc->fw_stats_data_mapping,
18645        sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18646    BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
18647        (void *)sc->def_sb_dma.paddr, sc->def_sb,
18648        sizeof(struct host_sp_status_block));
18649    BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
18650        (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
18651    BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
18652        (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
18653        sizeof(struct bxe_slowpath));
18654    BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
18655        (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
18656    BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
18657        (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
18658        FW_BUF_SIZE);
18659    for (i = 0; i < sc->num_queues; i++) {
18660        fp = &sc->fp[i];
18661        BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18662            (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
18663            sizeof(union bxe_host_hc_status_block));
18664        BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18665            (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
18666            (BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
18667        BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18668            (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
18669            (BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
18670        BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
18671            (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
18672            (BCM_PAGE_SIZE * RCQ_NUM_PAGES));
18673        BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
18674            (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
18675            (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
18676    }
18677
18678    ilt_cli = &ilt->clients[1];
18679    for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
18680        BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
18681            (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
18682            ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
18683    }
18684
18685
18686    cmd_offset = DMAE_REG_CMD_MEM;
18687    for (i = 0; i < 224; i++) {
18688        reg_addr = (cmd_offset +(i * 4));
18689        reg_val = REG_RD(sc, reg_addr);
18690        BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
18691            reg_addr, reg_val);
18692    }
18693
18694
18695    BLOGI(sc, "Collection of grcdump done\n");
18696    sc->grcdump_done = 1;
18697    return(rval);
18698}
18699
18700static int
18701bxe_add_cdev(struct bxe_softc *sc)
18702{
18703    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
18704
18705    if (sc->eeprom == NULL) {
18706        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
18707        return (-1);
18708    }
18709
18710    sc->ioctl_dev = make_dev(&bxe_cdevsw,
18711                            sc->ifnet->if_dunit,
18712                            UID_ROOT,
18713                            GID_WHEEL,
18714                            0600,
18715                            "%s",
18716                            if_name(sc->ifnet));
18717
18718    if (sc->ioctl_dev == NULL) {
18719        free(sc->eeprom, M_DEVBUF);
18720        sc->eeprom = NULL;
18721        return (-1);
18722    }
18723
18724    sc->ioctl_dev->si_drv1 = sc;
18725
18726    return (0);
18727}
18728
18729static void
18730bxe_del_cdev(struct bxe_softc *sc)
18731{
18732    if (sc->ioctl_dev != NULL)
18733        destroy_dev(sc->ioctl_dev);
18734
18735    if (sc->eeprom != NULL) {
18736        free(sc->eeprom, M_DEVBUF);
18737        sc->eeprom = NULL;
18738    }
18739    sc->ioctl_dev = NULL;
18740
18741    return;
18742}
18743
18744static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
18745{
18746
18747    if ((sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) == 0)
18748        return FALSE;
18749
18750    return TRUE;
18751}
18752
18753
18754static int
18755bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18756{
18757    int rval = 0;
18758
18759    if(!bxe_is_nvram_accessible(sc)) {
18760        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18761        return (-EAGAIN);
18762    }
18763    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
18764
18765
18766   return (rval);
18767}
18768
18769static int
18770bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
18771{
18772    int rval = 0;
18773
18774    if(!bxe_is_nvram_accessible(sc)) {
18775        BLOGW(sc, "Cannot access eeprom when interface is down\n");
18776        return (-EAGAIN);
18777    }
18778    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
18779
18780   return (rval);
18781}
18782
18783static int
18784bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
18785{
18786    int rval = 0;
18787
18788    switch (eeprom->eeprom_cmd) {
18789
18790    case BXE_EEPROM_CMD_SET_EEPROM:
18791
18792        rval = copyin(eeprom->eeprom_data, sc->eeprom,
18793                       eeprom->eeprom_data_len);
18794
18795        if (rval)
18796            break;
18797
18798        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18799                       eeprom->eeprom_data_len);
18800        break;
18801
18802    case BXE_EEPROM_CMD_GET_EEPROM:
18803
18804        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
18805                       eeprom->eeprom_data_len);
18806
18807        if (rval) {
18808            break;
18809        }
18810
18811        rval = copyout(sc->eeprom, eeprom->eeprom_data,
18812                       eeprom->eeprom_data_len);
18813        break;
18814
18815    default:
18816            rval = EINVAL;
18817            break;
18818    }
18819
18820    if (rval) {
18821        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
18822    }
18823
18824    return (rval);
18825}
18826
18827static int
18828bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
18829{
18830    uint32_t ext_phy_config;
18831    int port = SC_PORT(sc);
18832    int cfg_idx = bxe_get_link_cfg_idx(sc);
18833
18834    dev_p->supported = sc->port.supported[cfg_idx] |
18835            (sc->port.supported[cfg_idx ^ 1] &
18836            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
18837    dev_p->advertising = sc->port.advertising[cfg_idx];
18838    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
18839        ELINK_ETH_PHY_SFP_1G_FIBER) {
18840        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
18841        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
18842    }
18843    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
18844        !(sc->flags & BXE_MF_FUNC_DIS)) {
18845        dev_p->duplex = sc->link_vars.duplex;
18846        if (IS_MF(sc) && !BXE_NOMCP(sc))
18847            dev_p->speed = bxe_get_mf_speed(sc);
18848        else
18849            dev_p->speed = sc->link_vars.line_speed;
18850    } else {
18851        dev_p->duplex = DUPLEX_UNKNOWN;
18852        dev_p->speed = SPEED_UNKNOWN;
18853    }
18854
18855    dev_p->port = bxe_media_detect(sc);
18856
18857    ext_phy_config = SHMEM_RD(sc,
18858                         dev_info.port_hw_config[port].external_phy_config);
18859    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
18860        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
18861        dev_p->phy_address =  sc->port.phy_addr;
18862    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18863            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
18864        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
18865            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
18866        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
18867    else
18868        dev_p->phy_address = 0;
18869
18870    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
18871        dev_p->autoneg = AUTONEG_ENABLE;
18872    else
18873       dev_p->autoneg = AUTONEG_DISABLE;
18874
18875
18876    return 0;
18877}
18878
18879static int
18880bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18881        struct thread *td)
18882{
18883    struct bxe_softc    *sc;
18884    int                 rval = 0;
18885    device_t            pci_dev;
18886    bxe_grcdump_t       *dump = NULL;
18887    int grc_dump_size;
18888    bxe_drvinfo_t   *drv_infop = NULL;
18889    bxe_dev_setting_t  *dev_p;
18890    bxe_dev_setting_t  dev_set;
18891    bxe_get_regs_t  *reg_p;
18892    bxe_reg_rdw_t *reg_rdw_p;
18893    bxe_pcicfg_rdw_t *cfg_rdw_p;
18894    bxe_perm_mac_addr_t *mac_addr_p;
18895
18896
18897    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
18898        return ENXIO;
18899
18900    pci_dev= sc->dev;
18901
18902    dump = (bxe_grcdump_t *)data;
18903
18904    switch(cmd) {
18905
18906        case BXE_GRC_DUMP_SIZE:
18907            dump->pci_func = sc->pcie_func;
18908            dump->grcdump_size =
18909                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18910                     sizeof(struct  dump_header);
18911            break;
18912
18913        case BXE_GRC_DUMP:
18914
18915            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18916                                sizeof(struct  dump_header);
18917            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
18918                (dump->grcdump_size < grc_dump_size)) {
18919                rval = EINVAL;
18920                break;
18921            }
18922
18923            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
18924                (!sc->grcdump_started)) {
18925                rval =  bxe_grc_dump(sc);
18926            }
18927
18928            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
18929                (sc->grc_dump != NULL))  {
18930                dump->grcdump_dwords = grc_dump_size >> 2;
18931                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
18932                free(sc->grc_dump, M_DEVBUF);
18933                sc->grc_dump = NULL;
18934                sc->grcdump_started = 0;
18935                sc->grcdump_done = 0;
18936            }
18937
18938            break;
18939
18940        case BXE_DRV_INFO:
18941            drv_infop = (bxe_drvinfo_t *)data;
18942            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
18943            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
18944                BXE_DRIVER_VERSION);
18945            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
18946                sc->devinfo.bc_ver_str);
18947            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
18948                "%s", sc->fw_ver_str);
18949            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
18950            drv_infop->reg_dump_len =
18951                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
18952                    + sizeof(struct  dump_header);
18953            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
18954                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
18955            break;
18956
18957        case BXE_DEV_SETTING:
18958            dev_p = (bxe_dev_setting_t *)data;
18959            bxe_get_settings(sc, &dev_set);
18960            dev_p->supported = dev_set.supported;
18961            dev_p->advertising = dev_set.advertising;
18962            dev_p->speed = dev_set.speed;
18963            dev_p->duplex = dev_set.duplex;
18964            dev_p->port = dev_set.port;
18965            dev_p->phy_address = dev_set.phy_address;
18966            dev_p->autoneg = dev_set.autoneg;
18967
18968            break;
18969
18970        case BXE_GET_REGS:
18971
18972            reg_p = (bxe_get_regs_t *)data;
18973            grc_dump_size = reg_p->reg_buf_len;
18974
18975            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
18976                bxe_grc_dump(sc);
18977            }
18978            if((sc->grcdump_done) && (sc->grcdump_started) &&
18979                (sc->grc_dump != NULL))  {
18980                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
18981                free(sc->grc_dump, M_DEVBUF);
18982                sc->grc_dump = NULL;
18983                sc->grcdump_started = 0;
18984                sc->grcdump_done = 0;
18985            }
18986
18987            break;
18988
18989        case BXE_RDW_REG:
18990            reg_rdw_p = (bxe_reg_rdw_t *)data;
18991            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
18992                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
18993                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
18994
18995            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
18996                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
18997                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
18998
18999            break;
19000
19001        case BXE_RDW_PCICFG:
19002            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19003            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19004
19005                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19006                                         cfg_rdw_p->cfg_width);
19007
19008            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19009                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19010                            cfg_rdw_p->cfg_width);
19011            } else {
19012                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19013            }
19014            break;
19015
19016        case BXE_MAC_ADDR:
19017            mac_addr_p = (bxe_perm_mac_addr_t *)data;
19018            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19019                sc->mac_addr_str);
19020            break;
19021
19022        case BXE_EEPROM:
19023            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19024            break;
19025
19026
19027        default:
19028            break;
19029    }
19030
19031    return (rval);
19032}
19033