1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30#define BXE_DRIVER_VERSION "1.78.91"
31
32#include "bxe.h"
33#include "ecore_sp.h"
34#include "ecore_init.h"
35#include "ecore_init_ops.h"
36
37#include "57710_int_offsets.h"
38#include "57711_int_offsets.h"
39#include "57712_int_offsets.h"
40
41/*
42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these
43 * explicitly here for older kernels that don't include this changeset.
44 */
45#ifndef CTLTYPE_U64
46#define CTLTYPE_U64      CTLTYPE_QUAD
47#define sysctl_handle_64 sysctl_handle_quad
48#endif
49
50/*
51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these
52 * here as zero(0) for older kernels that don't include this changeset
53 * thereby masking the functionality.
54 */
55#ifndef CSUM_TCP_IPV6
56#define CSUM_TCP_IPV6 0
57#define CSUM_UDP_IPV6 0
58#endif
59
60#define BXE_DEF_SB_ATT_IDX 0x0001
61#define BXE_DEF_SB_IDX     0x0002
62
63/*
64 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
65 * function HW initialization.
66 */
67#define FLR_WAIT_USEC     10000 /* 10 msecs */
68#define FLR_WAIT_INTERVAL 50    /* usecs */
69#define FLR_POLL_CNT      (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */
70
71struct pbf_pN_buf_regs {
72    int pN;
73    uint32_t init_crd;
74    uint32_t crd;
75    uint32_t crd_freed;
76};
77
78struct pbf_pN_cmd_regs {
79    int pN;
80    uint32_t lines_occup;
81    uint32_t lines_freed;
82};
83
84/*
85 * PCI Device ID Table used by bxe_probe().
86 */
87#define BXE_DEVDESC_MAX 64
88static struct bxe_device_type bxe_devs[] = {
89    {
90        BRCM_VENDORID,
91        CHIP_NUM_57710,
92        PCI_ANY_ID, PCI_ANY_ID,
93        "QLogic NetXtreme II BCM57710 10GbE"
94    },
95    {
96        BRCM_VENDORID,
97        CHIP_NUM_57711,
98        PCI_ANY_ID, PCI_ANY_ID,
99        "QLogic NetXtreme II BCM57711 10GbE"
100    },
101    {
102        BRCM_VENDORID,
103        CHIP_NUM_57711E,
104        PCI_ANY_ID, PCI_ANY_ID,
105        "QLogic NetXtreme II BCM57711E 10GbE"
106    },
107    {
108        BRCM_VENDORID,
109        CHIP_NUM_57712,
110        PCI_ANY_ID, PCI_ANY_ID,
111        "QLogic NetXtreme II BCM57712 10GbE"
112    },
113    {
114        BRCM_VENDORID,
115        CHIP_NUM_57712_MF,
116        PCI_ANY_ID, PCI_ANY_ID,
117        "QLogic NetXtreme II BCM57712 MF 10GbE"
118    },
119    {
120        BRCM_VENDORID,
121        CHIP_NUM_57800,
122        PCI_ANY_ID, PCI_ANY_ID,
123        "QLogic NetXtreme II BCM57800 10GbE"
124    },
125    {
126        BRCM_VENDORID,
127        CHIP_NUM_57800_MF,
128        PCI_ANY_ID, PCI_ANY_ID,
129        "QLogic NetXtreme II BCM57800 MF 10GbE"
130    },
131    {
132        BRCM_VENDORID,
133        CHIP_NUM_57810,
134        PCI_ANY_ID, PCI_ANY_ID,
135        "QLogic NetXtreme II BCM57810 10GbE"
136    },
137    {
138        BRCM_VENDORID,
139        CHIP_NUM_57810_MF,
140        PCI_ANY_ID, PCI_ANY_ID,
141        "QLogic NetXtreme II BCM57810 MF 10GbE"
142    },
143    {
144        BRCM_VENDORID,
145        CHIP_NUM_57811,
146        PCI_ANY_ID, PCI_ANY_ID,
147        "QLogic NetXtreme II BCM57811 10GbE"
148    },
149    {
150        BRCM_VENDORID,
151        CHIP_NUM_57811_MF,
152        PCI_ANY_ID, PCI_ANY_ID,
153        "QLogic NetXtreme II BCM57811 MF 10GbE"
154    },
155    {
156        BRCM_VENDORID,
157        CHIP_NUM_57840_4_10,
158        PCI_ANY_ID, PCI_ANY_ID,
159        "QLogic NetXtreme II BCM57840 4x10GbE"
160    },
161    {
162        QLOGIC_VENDORID,
163        CHIP_NUM_57840_4_10,
164        PCI_ANY_ID, PCI_ANY_ID,
165        "QLogic NetXtreme II BCM57840 4x10GbE"
166    },
167    {
168        BRCM_VENDORID,
169        CHIP_NUM_57840_2_20,
170        PCI_ANY_ID, PCI_ANY_ID,
171        "QLogic NetXtreme II BCM57840 2x20GbE"
172    },
173    {
174        BRCM_VENDORID,
175        CHIP_NUM_57840_MF,
176        PCI_ANY_ID, PCI_ANY_ID,
177        "QLogic NetXtreme II BCM57840 MF 10GbE"
178    },
179    {
180        0, 0, 0, 0, NULL
181    }
182};
183
184MALLOC_DECLARE(M_BXE_ILT);
185MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer");
186
187/*
188 * FreeBSD device entry points.
189 */
190static int bxe_probe(device_t);
191static int bxe_attach(device_t);
192static int bxe_detach(device_t);
193static int bxe_shutdown(device_t);
194
195
196/*
197 * FreeBSD KLD module/device interface event handler method.
198 */
199static device_method_t bxe_methods[] = {
200    /* Device interface (device_if.h) */
201    DEVMETHOD(device_probe,     bxe_probe),
202    DEVMETHOD(device_attach,    bxe_attach),
203    DEVMETHOD(device_detach,    bxe_detach),
204    DEVMETHOD(device_shutdown,  bxe_shutdown),
205    /* Bus interface (bus_if.h) */
206    DEVMETHOD(bus_print_child,  bus_generic_print_child),
207    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
208    KOBJMETHOD_END
209};
210
211/*
212 * FreeBSD KLD Module data declaration
213 */
214static driver_t bxe_driver = {
215    "bxe",                   /* module name */
216    bxe_methods,             /* event handler */
217    sizeof(struct bxe_softc) /* extra data */
218};
219
220MODULE_DEPEND(bxe, pci, 1, 1, 1);
221MODULE_DEPEND(bxe, ether, 1, 1, 1);
222DRIVER_MODULE(bxe, pci, bxe_driver, 0, 0);
223
224DEBUGNET_DEFINE(bxe);
225
226/* resources needed for unloading a previously loaded device */
227
228#define BXE_PREV_WAIT_NEEDED 1
229struct mtx bxe_prev_mtx;
230MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF);
231struct bxe_prev_list_node {
232    LIST_ENTRY(bxe_prev_list_node) node;
233    uint8_t bus;
234    uint8_t slot;
235    uint8_t path;
236    uint8_t aer; /* XXX automatic error recovery */
237    uint8_t undi;
238};
239static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list);
240
241static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
242
243/* Tunable device values... */
244
245SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
246    "bxe driver parameters");
247
248/* Debug */
249unsigned long bxe_debug = 0;
250SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN,
251             &bxe_debug, 0, "Debug logging mode");
252
253/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
254static int bxe_interrupt_mode = INTR_MODE_MSIX;
255SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN,
256           &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
257
258/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */
259static int bxe_queue_count = 4;
260SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN,
261           &bxe_queue_count, 0, "Multi-Queue queue count");
262
263/* max number of buffers per queue (default RX_BD_USABLE) */
264static int bxe_max_rx_bufs = 0;
265SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN,
266           &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue");
267
268/* Host interrupt coalescing RX tick timer (usecs) */
269static int bxe_hc_rx_ticks = 25;
270SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN,
271           &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks");
272
273/* Host interrupt coalescing TX tick timer (usecs) */
274static int bxe_hc_tx_ticks = 50;
275SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN,
276           &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks");
277
278/* Maximum number of Rx packets to process at a time */
279static int bxe_rx_budget = 0xffffffff;
280SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_RDTUN,
281           &bxe_rx_budget, 0, "Rx processing budget");
282
283/* Maximum LRO aggregation size */
284static int bxe_max_aggregation_size = 0;
285SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_RDTUN,
286           &bxe_max_aggregation_size, 0, "max aggregation size");
287
288/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
289static int bxe_mrrs = -1;
290SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN,
291           &bxe_mrrs, 0, "PCIe maximum read request size");
292
293/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */
294static int bxe_autogreeen = 0;
295SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN,
296           &bxe_autogreeen, 0, "AutoGrEEEn support");
297
298/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
299static int bxe_udp_rss = 0;
300SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN,
301           &bxe_udp_rss, 0, "UDP RSS support");
302
303
304#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */
305
306#define STATS_OFFSET32(stat_name)                   \
307    (offsetof(struct bxe_eth_stats, stat_name) / 4)
308
309#define Q_STATS_OFFSET32(stat_name)                   \
310    (offsetof(struct bxe_eth_q_stats, stat_name) / 4)
311
312static const struct {
313    uint32_t offset;
314    uint32_t size;
315    uint32_t flags;
316#define STATS_FLAGS_PORT  1
317#define STATS_FLAGS_FUNC  2 /* MF only cares about function stats */
318#define STATS_FLAGS_BOTH  (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
319    char string[STAT_NAME_LEN];
320} bxe_eth_stats_arr[] = {
321    { STATS_OFFSET32(total_bytes_received_hi),
322                8, STATS_FLAGS_BOTH, "rx_bytes" },
323    { STATS_OFFSET32(error_bytes_received_hi),
324                8, STATS_FLAGS_BOTH, "rx_error_bytes" },
325    { STATS_OFFSET32(total_unicast_packets_received_hi),
326                8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
327    { STATS_OFFSET32(total_multicast_packets_received_hi),
328                8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
329    { STATS_OFFSET32(total_broadcast_packets_received_hi),
330                8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
331    { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
332                8, STATS_FLAGS_PORT, "rx_crc_errors" },
333    { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
334                8, STATS_FLAGS_PORT, "rx_align_errors" },
335    { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
336                8, STATS_FLAGS_PORT, "rx_undersize_packets" },
337    { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
338                8, STATS_FLAGS_PORT, "rx_oversize_packets" },
339    { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
340                8, STATS_FLAGS_PORT, "rx_fragments" },
341    { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
342                8, STATS_FLAGS_PORT, "rx_jabbers" },
343    { STATS_OFFSET32(no_buff_discard_hi),
344                8, STATS_FLAGS_BOTH, "rx_discards" },
345    { STATS_OFFSET32(mac_filter_discard),
346                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
347    { STATS_OFFSET32(mf_tag_discard),
348                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
349    { STATS_OFFSET32(pfc_frames_received_hi),
350                8, STATS_FLAGS_PORT, "pfc_frames_received" },
351    { STATS_OFFSET32(pfc_frames_sent_hi),
352                8, STATS_FLAGS_PORT, "pfc_frames_sent" },
353    { STATS_OFFSET32(brb_drop_hi),
354                8, STATS_FLAGS_PORT, "rx_brb_discard" },
355    { STATS_OFFSET32(brb_truncate_hi),
356                8, STATS_FLAGS_PORT, "rx_brb_truncate" },
357    { STATS_OFFSET32(pause_frames_received_hi),
358                8, STATS_FLAGS_PORT, "rx_pause_frames" },
359    { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
360                8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
361    { STATS_OFFSET32(nig_timer_max),
362                4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
363    { STATS_OFFSET32(total_bytes_transmitted_hi),
364                8, STATS_FLAGS_BOTH, "tx_bytes" },
365    { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
366                8, STATS_FLAGS_PORT, "tx_error_bytes" },
367    { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
368                8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
369    { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
370                8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
371    { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
372                8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
373    { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
374                8, STATS_FLAGS_PORT, "tx_mac_errors" },
375    { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
376                8, STATS_FLAGS_PORT, "tx_carrier_errors" },
377    { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
378                8, STATS_FLAGS_PORT, "tx_single_collisions" },
379    { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
380                8, STATS_FLAGS_PORT, "tx_multi_collisions" },
381    { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
382                8, STATS_FLAGS_PORT, "tx_deferred" },
383    { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
384                8, STATS_FLAGS_PORT, "tx_excess_collisions" },
385    { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
386                8, STATS_FLAGS_PORT, "tx_late_collisions" },
387    { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
388                8, STATS_FLAGS_PORT, "tx_total_collisions" },
389    { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
390                8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
391    { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
392                8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
393    { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
394                8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
395    { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
396                8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
397    { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
398                8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
399    { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
400                8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
401    { STATS_OFFSET32(etherstatspktsover1522octets_hi),
402                8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
403    { STATS_OFFSET32(pause_frames_sent_hi),
404                8, STATS_FLAGS_PORT, "tx_pause_frames" },
405    { STATS_OFFSET32(total_tpa_aggregations_hi),
406                8, STATS_FLAGS_FUNC, "tpa_aggregations" },
407    { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
408                8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
409    { STATS_OFFSET32(total_tpa_bytes_hi),
410                8, STATS_FLAGS_FUNC, "tpa_bytes"},
411    { STATS_OFFSET32(eee_tx_lpi),
412                4, STATS_FLAGS_PORT, "eee_tx_lpi"},
413    { STATS_OFFSET32(rx_calls),
414                4, STATS_FLAGS_FUNC, "rx_calls"},
415    { STATS_OFFSET32(rx_pkts),
416                4, STATS_FLAGS_FUNC, "rx_pkts"},
417    { STATS_OFFSET32(rx_tpa_pkts),
418                4, STATS_FLAGS_FUNC, "rx_tpa_pkts"},
419    { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
420                4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"},
421    { STATS_OFFSET32(rx_bxe_service_rxsgl),
422                4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"},
423    { STATS_OFFSET32(rx_jumbo_sge_pkts),
424                4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"},
425    { STATS_OFFSET32(rx_soft_errors),
426                4, STATS_FLAGS_FUNC, "rx_soft_errors"},
427    { STATS_OFFSET32(rx_hw_csum_errors),
428                4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"},
429    { STATS_OFFSET32(rx_ofld_frames_csum_ip),
430                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"},
431    { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
432                4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"},
433    { STATS_OFFSET32(rx_budget_reached),
434                4, STATS_FLAGS_FUNC, "rx_budget_reached"},
435    { STATS_OFFSET32(tx_pkts),
436                4, STATS_FLAGS_FUNC, "tx_pkts"},
437    { STATS_OFFSET32(tx_soft_errors),
438                4, STATS_FLAGS_FUNC, "tx_soft_errors"},
439    { STATS_OFFSET32(tx_ofld_frames_csum_ip),
440                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"},
441    { STATS_OFFSET32(tx_ofld_frames_csum_tcp),
442                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"},
443    { STATS_OFFSET32(tx_ofld_frames_csum_udp),
444                4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"},
445    { STATS_OFFSET32(tx_ofld_frames_lso),
446                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"},
447    { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
448                4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"},
449    { STATS_OFFSET32(tx_encap_failures),
450                4, STATS_FLAGS_FUNC, "tx_encap_failures"},
451    { STATS_OFFSET32(tx_hw_queue_full),
452                4, STATS_FLAGS_FUNC, "tx_hw_queue_full"},
453    { STATS_OFFSET32(tx_hw_max_queue_depth),
454                4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"},
455    { STATS_OFFSET32(tx_dma_mapping_failure),
456                4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"},
457    { STATS_OFFSET32(tx_max_drbr_queue_depth),
458                4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"},
459    { STATS_OFFSET32(tx_window_violation_std),
460                4, STATS_FLAGS_FUNC, "tx_window_violation_std"},
461    { STATS_OFFSET32(tx_window_violation_tso),
462                4, STATS_FLAGS_FUNC, "tx_window_violation_tso"},
463    { STATS_OFFSET32(tx_chain_lost_mbuf),
464                4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"},
465    { STATS_OFFSET32(tx_frames_deferred),
466                4, STATS_FLAGS_FUNC, "tx_frames_deferred"},
467    { STATS_OFFSET32(tx_queue_xoff),
468                4, STATS_FLAGS_FUNC, "tx_queue_xoff"},
469    { STATS_OFFSET32(mbuf_defrag_attempts),
470                4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"},
471    { STATS_OFFSET32(mbuf_defrag_failures),
472                4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"},
473    { STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
474                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"},
475    { STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
476                4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"},
477    { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
478                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"},
479    { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
480                4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"},
481    { STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
482                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"},
483    { STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
484                4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"},
485    { STATS_OFFSET32(mbuf_alloc_tx),
486                4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"},
487    { STATS_OFFSET32(mbuf_alloc_rx),
488                4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"},
489    { STATS_OFFSET32(mbuf_alloc_sge),
490                4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"},
491    { STATS_OFFSET32(mbuf_alloc_tpa),
492                4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"},
493    { STATS_OFFSET32(tx_queue_full_return),
494                4, STATS_FLAGS_FUNC, "tx_queue_full_return"},
495    { STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
496                4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"},
497    { STATS_OFFSET32(tx_request_link_down_failures),
498                4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"},
499    { STATS_OFFSET32(bd_avail_too_less_failures),
500                4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"},
501    { STATS_OFFSET32(tx_mq_not_empty),
502                4, STATS_FLAGS_FUNC, "tx_mq_not_empty"},
503    { STATS_OFFSET32(nsegs_path1_errors),
504                4, STATS_FLAGS_FUNC, "nsegs_path1_errors"},
505    { STATS_OFFSET32(nsegs_path2_errors),
506                4, STATS_FLAGS_FUNC, "nsegs_path2_errors"}
507
508
509};
510
511static const struct {
512    uint32_t offset;
513    uint32_t size;
514    char string[STAT_NAME_LEN];
515} bxe_eth_q_stats_arr[] = {
516    { Q_STATS_OFFSET32(total_bytes_received_hi),
517                8, "rx_bytes" },
518    { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
519                8, "rx_ucast_packets" },
520    { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
521                8, "rx_mcast_packets" },
522    { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
523                8, "rx_bcast_packets" },
524    { Q_STATS_OFFSET32(no_buff_discard_hi),
525                8, "rx_discards" },
526    { Q_STATS_OFFSET32(total_bytes_transmitted_hi),
527                8, "tx_bytes" },
528    { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
529                8, "tx_ucast_packets" },
530    { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
531                8, "tx_mcast_packets" },
532    { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
533                8, "tx_bcast_packets" },
534    { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
535                8, "tpa_aggregations" },
536    { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
537                8, "tpa_aggregated_frames"},
538    { Q_STATS_OFFSET32(total_tpa_bytes_hi),
539                8, "tpa_bytes"},
540    { Q_STATS_OFFSET32(rx_calls),
541                4, "rx_calls"},
542    { Q_STATS_OFFSET32(rx_pkts),
543                4, "rx_pkts"},
544    { Q_STATS_OFFSET32(rx_tpa_pkts),
545                4, "rx_tpa_pkts"},
546    { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts),
547                4, "rx_erroneous_jumbo_sge_pkts"},
548    { Q_STATS_OFFSET32(rx_bxe_service_rxsgl),
549                4, "rx_bxe_service_rxsgl"},
550    { Q_STATS_OFFSET32(rx_jumbo_sge_pkts),
551                4, "rx_jumbo_sge_pkts"},
552    { Q_STATS_OFFSET32(rx_soft_errors),
553                4, "rx_soft_errors"},
554    { Q_STATS_OFFSET32(rx_hw_csum_errors),
555                4, "rx_hw_csum_errors"},
556    { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip),
557                4, "rx_ofld_frames_csum_ip"},
558    { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp),
559                4, "rx_ofld_frames_csum_tcp_udp"},
560    { Q_STATS_OFFSET32(rx_budget_reached),
561                4, "rx_budget_reached"},
562    { Q_STATS_OFFSET32(tx_pkts),
563                4, "tx_pkts"},
564    { Q_STATS_OFFSET32(tx_soft_errors),
565                4, "tx_soft_errors"},
566    { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip),
567                4, "tx_ofld_frames_csum_ip"},
568    { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp),
569                4, "tx_ofld_frames_csum_tcp"},
570    { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp),
571                4, "tx_ofld_frames_csum_udp"},
572    { Q_STATS_OFFSET32(tx_ofld_frames_lso),
573                4, "tx_ofld_frames_lso"},
574    { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits),
575                4, "tx_ofld_frames_lso_hdr_splits"},
576    { Q_STATS_OFFSET32(tx_encap_failures),
577                4, "tx_encap_failures"},
578    { Q_STATS_OFFSET32(tx_hw_queue_full),
579                4, "tx_hw_queue_full"},
580    { Q_STATS_OFFSET32(tx_hw_max_queue_depth),
581                4, "tx_hw_max_queue_depth"},
582    { Q_STATS_OFFSET32(tx_dma_mapping_failure),
583                4, "tx_dma_mapping_failure"},
584    { Q_STATS_OFFSET32(tx_max_drbr_queue_depth),
585                4, "tx_max_drbr_queue_depth"},
586    { Q_STATS_OFFSET32(tx_window_violation_std),
587                4, "tx_window_violation_std"},
588    { Q_STATS_OFFSET32(tx_window_violation_tso),
589                4, "tx_window_violation_tso"},
590    { Q_STATS_OFFSET32(tx_chain_lost_mbuf),
591                4, "tx_chain_lost_mbuf"},
592    { Q_STATS_OFFSET32(tx_frames_deferred),
593                4, "tx_frames_deferred"},
594    { Q_STATS_OFFSET32(tx_queue_xoff),
595                4, "tx_queue_xoff"},
596    { Q_STATS_OFFSET32(mbuf_defrag_attempts),
597                4, "mbuf_defrag_attempts"},
598    { Q_STATS_OFFSET32(mbuf_defrag_failures),
599                4, "mbuf_defrag_failures"},
600    { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed),
601                4, "mbuf_rx_bd_alloc_failed"},
602    { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed),
603                4, "mbuf_rx_bd_mapping_failed"},
604    { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed),
605                4, "mbuf_rx_tpa_alloc_failed"},
606    { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed),
607                4, "mbuf_rx_tpa_mapping_failed"},
608    { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed),
609                4, "mbuf_rx_sge_alloc_failed"},
610    { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed),
611                4, "mbuf_rx_sge_mapping_failed"},
612    { Q_STATS_OFFSET32(mbuf_alloc_tx),
613                4, "mbuf_alloc_tx"},
614    { Q_STATS_OFFSET32(mbuf_alloc_rx),
615                4, "mbuf_alloc_rx"},
616    { Q_STATS_OFFSET32(mbuf_alloc_sge),
617                4, "mbuf_alloc_sge"},
618    { Q_STATS_OFFSET32(mbuf_alloc_tpa),
619                4, "mbuf_alloc_tpa"},
620    { Q_STATS_OFFSET32(tx_queue_full_return),
621                4, "tx_queue_full_return"},
622    { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures),
623                4, "bxe_tx_mq_sc_state_failures"},
624    { Q_STATS_OFFSET32(tx_request_link_down_failures),
625                4, "tx_request_link_down_failures"},
626    { Q_STATS_OFFSET32(bd_avail_too_less_failures),
627                4, "bd_avail_too_less_failures"},
628    { Q_STATS_OFFSET32(tx_mq_not_empty),
629                4, "tx_mq_not_empty"},
630    { Q_STATS_OFFSET32(nsegs_path1_errors),
631                4, "nsegs_path1_errors"},
632    { Q_STATS_OFFSET32(nsegs_path2_errors),
633                4, "nsegs_path2_errors"}
634
635
636};
637
638#define BXE_NUM_ETH_STATS   ARRAY_SIZE(bxe_eth_stats_arr)
639#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr)
640
641
642static void    bxe_cmng_fns_init(struct bxe_softc *sc,
643                                 uint8_t          read_cfg,
644                                 uint8_t          cmng_type);
645static int     bxe_get_cmng_fns_mode(struct bxe_softc *sc);
646static void    storm_memset_cmng(struct bxe_softc *sc,
647                                 struct cmng_init *cmng,
648                                 uint8_t          port);
649static void    bxe_set_reset_global(struct bxe_softc *sc);
650static void    bxe_set_reset_in_progress(struct bxe_softc *sc);
651static uint8_t bxe_reset_is_done(struct bxe_softc *sc,
652                                 int              engine);
653static uint8_t bxe_clear_pf_load(struct bxe_softc *sc);
654static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc,
655                                   uint8_t          *global,
656                                   uint8_t          print);
657static void    bxe_int_disable(struct bxe_softc *sc);
658static int     bxe_release_leader_lock(struct bxe_softc *sc);
659static void    bxe_pf_disable(struct bxe_softc *sc);
660static void    bxe_free_fp_buffers(struct bxe_softc *sc);
661static inline void bxe_update_rx_prod(struct bxe_softc    *sc,
662                                      struct bxe_fastpath *fp,
663                                      uint16_t            rx_bd_prod,
664                                      uint16_t            rx_cq_prod,
665                                      uint16_t            rx_sge_prod);
666static void    bxe_link_report_locked(struct bxe_softc *sc);
667static void    bxe_link_report(struct bxe_softc *sc);
668static void    bxe_link_status_update(struct bxe_softc *sc);
669static void    bxe_periodic_callout_func(void *xsc);
670static void    bxe_periodic_start(struct bxe_softc *sc);
671static void    bxe_periodic_stop(struct bxe_softc *sc);
672static int     bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
673                                    uint16_t prev_index,
674                                    uint16_t index);
675static int     bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
676                                     int                 queue);
677static int     bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
678                                     uint16_t            index);
679static uint8_t bxe_txeof(struct bxe_softc *sc,
680                         struct bxe_fastpath *fp);
681static void    bxe_task_fp(struct bxe_fastpath *fp);
682static __noinline void bxe_dump_mbuf(struct bxe_softc *sc,
683                                     struct mbuf      *m,
684                                     uint8_t          contents);
685static int     bxe_alloc_mem(struct bxe_softc *sc);
686static void    bxe_free_mem(struct bxe_softc *sc);
687static int     bxe_alloc_fw_stats_mem(struct bxe_softc *sc);
688static void    bxe_free_fw_stats_mem(struct bxe_softc *sc);
689static int     bxe_interrupt_attach(struct bxe_softc *sc);
690static void    bxe_interrupt_detach(struct bxe_softc *sc);
691static void    bxe_set_rx_mode(struct bxe_softc *sc);
692static int     bxe_init_locked(struct bxe_softc *sc);
693static int     bxe_stop_locked(struct bxe_softc *sc);
694static void    bxe_sp_err_timeout_task(void *arg, int pending);
695void           bxe_parity_recover(struct bxe_softc *sc);
696void           bxe_handle_error(struct bxe_softc *sc);
697static __noinline int bxe_nic_load(struct bxe_softc *sc,
698                                   int              load_mode);
699static __noinline int bxe_nic_unload(struct bxe_softc *sc,
700                                     uint32_t         unload_mode,
701                                     uint8_t          keep_link);
702
703static void bxe_handle_sp_tq(void *context, int pending);
704static void bxe_handle_fp_tq(void *context, int pending);
705
706static int bxe_add_cdev(struct bxe_softc *sc);
707static void bxe_del_cdev(struct bxe_softc *sc);
708int bxe_grc_dump(struct bxe_softc *sc);
709static int bxe_alloc_buf_rings(struct bxe_softc *sc);
710static void bxe_free_buf_rings(struct bxe_softc *sc);
711
712/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */
713uint32_t
714calc_crc32(uint8_t  *crc32_packet,
715           uint32_t crc32_length,
716           uint32_t crc32_seed,
717           uint8_t  complement)
718{
719   uint32_t byte         = 0;
720   uint32_t bit          = 0;
721   uint8_t  msb          = 0;
722   uint32_t temp         = 0;
723   uint32_t shft         = 0;
724   uint8_t  current_byte = 0;
725   uint32_t crc32_result = crc32_seed;
726   const uint32_t CRC32_POLY = 0x1edc6f41;
727
728   if ((crc32_packet == NULL) ||
729       (crc32_length == 0) ||
730       ((crc32_length % 8) != 0))
731    {
732        return (crc32_result);
733    }
734
735    for (byte = 0; byte < crc32_length; byte = byte + 1)
736    {
737        current_byte = crc32_packet[byte];
738        for (bit = 0; bit < 8; bit = bit + 1)
739        {
740            /* msb = crc32_result[31]; */
741            msb = (uint8_t)(crc32_result >> 31);
742
743            crc32_result = crc32_result << 1;
744
745            /* it (msb != current_byte[bit]) */
746            if (msb != (0x1 & (current_byte >> bit)))
747            {
748                crc32_result = crc32_result ^ CRC32_POLY;
749                /* crc32_result[0] = 1 */
750                crc32_result |= 1;
751            }
752        }
753    }
754
755    /* Last step is to:
756     * 1. "mirror" every bit
757     * 2. swap the 4 bytes
758     * 3. complement each bit
759     */
760
761    /* Mirror */
762    temp = crc32_result;
763    shft = sizeof(crc32_result) * 8 - 1;
764
765    for (crc32_result >>= 1; crc32_result; crc32_result >>= 1)
766    {
767        temp <<= 1;
768        temp |= crc32_result & 1;
769        shft-- ;
770    }
771
772    /* temp[31-bit] = crc32_result[bit] */
773    temp <<= shft;
774
775    /* Swap */
776    /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
777    {
778        uint32_t t0, t1, t2, t3;
779        t0 = (0x000000ff & (temp >> 24));
780        t1 = (0x0000ff00 & (temp >> 8));
781        t2 = (0x00ff0000 & (temp << 8));
782        t3 = (0xff000000 & (temp << 24));
783        crc32_result = t0 | t1 | t2 | t3;
784    }
785
786    /* Complement */
787    if (complement)
788    {
789        crc32_result = ~crc32_result;
790    }
791
792    return (crc32_result);
793}
794
795int
796bxe_test_bit(int                    nr,
797             volatile unsigned long *addr)
798{
799    return ((atomic_load_acq_long(addr) & (1 << nr)) != 0);
800}
801
802void
803bxe_set_bit(unsigned int           nr,
804            volatile unsigned long *addr)
805{
806    atomic_set_acq_long(addr, (1 << nr));
807}
808
809void
810bxe_clear_bit(int                    nr,
811              volatile unsigned long *addr)
812{
813    atomic_clear_acq_long(addr, (1 << nr));
814}
815
816int
817bxe_test_and_set_bit(int                    nr,
818                       volatile unsigned long *addr)
819{
820    unsigned long x;
821    nr = (1 << nr);
822    do {
823        x = *addr;
824    } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0);
825    // if (x & nr) bit_was_set; else bit_was_not_set;
826    return (x & nr);
827}
828
829int
830bxe_test_and_clear_bit(int                    nr,
831                       volatile unsigned long *addr)
832{
833    unsigned long x;
834    nr = (1 << nr);
835    do {
836        x = *addr;
837    } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0);
838    // if (x & nr) bit_was_set; else bit_was_not_set;
839    return (x & nr);
840}
841
842int
843bxe_cmpxchg(volatile int *addr,
844            int          old,
845            int          new)
846{
847    int x;
848    do {
849        x = *addr;
850    } while (atomic_cmpset_acq_int(addr, old, new) == 0);
851    return (x);
852}
853
854/*
855 * Get DMA memory from the OS.
856 *
857 * Validates that the OS has provided DMA buffers in response to a
858 * bus_dmamap_load call and saves the physical address of those buffers.
859 * When the callback is used the OS will return 0 for the mapping function
860 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
861 * failures back to the caller.
862 *
863 * Returns:
864 *   Nothing.
865 */
866static void
867bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
868{
869    struct bxe_dma *dma = arg;
870
871    if (error) {
872        dma->paddr = 0;
873        dma->nseg  = 0;
874        BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
875    } else {
876        dma->paddr = segs->ds_addr;
877        dma->nseg  = nseg;
878    }
879}
880
881/*
882 * Allocate a block of memory and map it for DMA. No partial completions
883 * allowed and release any resources acquired if we can't acquire all
884 * resources.
885 *
886 * Returns:
887 *   0 = Success, !0 = Failure
888 */
889int
890bxe_dma_alloc(struct bxe_softc *sc,
891              bus_size_t       size,
892              struct bxe_dma   *dma,
893              const char       *msg)
894{
895    int rc;
896
897    if (dma->size > 0) {
898        BLOGE(sc, "dma block '%s' already has size %lu\n", msg,
899              (unsigned long)dma->size);
900        return (1);
901    }
902
903    memset(dma, 0, sizeof(*dma)); /* sanity */
904    dma->sc   = sc;
905    dma->size = size;
906    snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
907
908    rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
909                            BCM_PAGE_SIZE,      /* alignment */
910                            0,                  /* boundary limit */
911                            BUS_SPACE_MAXADDR,  /* restricted low */
912                            BUS_SPACE_MAXADDR,  /* restricted hi */
913                            NULL,               /* addr filter() */
914                            NULL,               /* addr filter() arg */
915                            size,               /* max map size */
916                            1,                  /* num discontinuous */
917                            size,               /* max seg size */
918                            BUS_DMA_ALLOCNOW,   /* flags */
919                            NULL,               /* lock() */
920                            NULL,               /* lock() arg */
921                            &dma->tag);         /* returned dma tag */
922    if (rc != 0) {
923        BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc);
924        memset(dma, 0, sizeof(*dma));
925        return (1);
926    }
927
928    rc = bus_dmamem_alloc(dma->tag,
929                          (void **)&dma->vaddr,
930                          (BUS_DMA_NOWAIT | BUS_DMA_ZERO),
931                          &dma->map);
932    if (rc != 0) {
933        BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc);
934        bus_dma_tag_destroy(dma->tag);
935        memset(dma, 0, sizeof(*dma));
936        return (1);
937    }
938
939    rc = bus_dmamap_load(dma->tag,
940                         dma->map,
941                         dma->vaddr,
942                         size,
943                         bxe_dma_map_addr, /* BLOGD in here */
944                         dma,
945                         BUS_DMA_NOWAIT);
946    if (rc != 0) {
947        BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc);
948        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
949        bus_dma_tag_destroy(dma->tag);
950        memset(dma, 0, sizeof(*dma));
951        return (1);
952    }
953
954    return (0);
955}
956
957void
958bxe_dma_free(struct bxe_softc *sc,
959             struct bxe_dma   *dma)
960{
961    if (dma->size > 0) {
962        DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
963
964        bus_dmamap_sync(dma->tag, dma->map,
965                        (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE));
966        bus_dmamap_unload(dma->tag, dma->map);
967        bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
968        bus_dma_tag_destroy(dma->tag);
969    }
970
971    memset(dma, 0, sizeof(*dma));
972}
973
974/*
975 * These indirect read and write routines are only during init.
976 * The locking is handled by the MCP.
977 */
978
979void
980bxe_reg_wr_ind(struct bxe_softc *sc,
981               uint32_t         addr,
982               uint32_t         val)
983{
984    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
985    pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
986    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
987}
988
989uint32_t
990bxe_reg_rd_ind(struct bxe_softc *sc,
991               uint32_t         addr)
992{
993    uint32_t val;
994
995    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
996    val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
997    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
998
999    return (val);
1000}
1001
1002static int
1003bxe_acquire_hw_lock(struct bxe_softc *sc,
1004                    uint32_t         resource)
1005{
1006    uint32_t lock_status;
1007    uint32_t resource_bit = (1 << resource);
1008    int func = SC_FUNC(sc);
1009    uint32_t hw_lock_control_reg;
1010    int cnt;
1011
1012    /* validate the resource is within range */
1013    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1014        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1015            " resource_bit 0x%x\n", resource, resource_bit);
1016        return (-1);
1017    }
1018
1019    if (func <= 5) {
1020        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1021    } else {
1022        hw_lock_control_reg =
1023                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1024    }
1025
1026    /* validate the resource is not already taken */
1027    lock_status = REG_RD(sc, hw_lock_control_reg);
1028    if (lock_status & resource_bit) {
1029        BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n",
1030              resource, lock_status, resource_bit);
1031        return (-1);
1032    }
1033
1034    /* try every 5ms for 5 seconds */
1035    for (cnt = 0; cnt < 1000; cnt++) {
1036        REG_WR(sc, (hw_lock_control_reg + 4), resource_bit);
1037        lock_status = REG_RD(sc, hw_lock_control_reg);
1038        if (lock_status & resource_bit) {
1039            return (0);
1040        }
1041        DELAY(5000);
1042    }
1043
1044    BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n",
1045        resource, resource_bit);
1046    return (-1);
1047}
1048
1049static int
1050bxe_release_hw_lock(struct bxe_softc *sc,
1051                    uint32_t         resource)
1052{
1053    uint32_t lock_status;
1054    uint32_t resource_bit = (1 << resource);
1055    int func = SC_FUNC(sc);
1056    uint32_t hw_lock_control_reg;
1057
1058    /* validate the resource is within range */
1059    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1060        BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
1061            " resource_bit 0x%x\n", resource, resource_bit);
1062        return (-1);
1063    }
1064
1065    if (func <= 5) {
1066        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8));
1067    } else {
1068        hw_lock_control_reg =
1069                (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1070    }
1071
1072    /* validate the resource is currently taken */
1073    lock_status = REG_RD(sc, hw_lock_control_reg);
1074    if (!(lock_status & resource_bit)) {
1075        BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n",
1076              resource, lock_status, resource_bit);
1077        return (-1);
1078    }
1079
1080    REG_WR(sc, hw_lock_control_reg, resource_bit);
1081    return (0);
1082}
1083static void bxe_acquire_phy_lock(struct bxe_softc *sc)
1084{
1085	BXE_PHY_LOCK(sc);
1086	bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1087}
1088
1089static void bxe_release_phy_lock(struct bxe_softc *sc)
1090{
1091	bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO);
1092	BXE_PHY_UNLOCK(sc);
1093}
1094/*
1095 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1096 * had we done things the other way around, if two pfs from the same port
1097 * would attempt to access nvram at the same time, we could run into a
1098 * scenario such as:
1099 * pf A takes the port lock.
1100 * pf B succeeds in taking the same lock since they are from the same port.
1101 * pf A takes the per pf misc lock. Performs eeprom access.
1102 * pf A finishes. Unlocks the per pf misc lock.
1103 * Pf B takes the lock and proceeds to perform it's own access.
1104 * pf A unlocks the per port lock, while pf B is still working (!).
1105 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1106 * access corrupted by pf B).*
1107 */
1108static int
1109bxe_acquire_nvram_lock(struct bxe_softc *sc)
1110{
1111    int port = SC_PORT(sc);
1112    int count, i;
1113    uint32_t val = 0;
1114
1115    /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1116    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1117
1118    /* adjust timeout for emulation/FPGA */
1119    count = NVRAM_TIMEOUT_COUNT;
1120    if (CHIP_REV_IS_SLOW(sc)) {
1121        count *= 100;
1122    }
1123
1124    /* request access to nvram interface */
1125    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1126           (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1127
1128    for (i = 0; i < count*10; i++) {
1129        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1130        if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1131            break;
1132        }
1133
1134        DELAY(5);
1135    }
1136
1137    if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1138        BLOGE(sc, "Cannot get access to nvram interface "
1139            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1140            port, val);
1141        return (-1);
1142    }
1143
1144    return (0);
1145}
1146
1147static int
1148bxe_release_nvram_lock(struct bxe_softc *sc)
1149{
1150    int port = SC_PORT(sc);
1151    int count, i;
1152    uint32_t val = 0;
1153
1154    /* adjust timeout for emulation/FPGA */
1155    count = NVRAM_TIMEOUT_COUNT;
1156    if (CHIP_REV_IS_SLOW(sc)) {
1157        count *= 100;
1158    }
1159
1160    /* relinquish nvram interface */
1161    REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
1162           (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1163
1164    for (i = 0; i < count*10; i++) {
1165        val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB);
1166        if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
1167            break;
1168        }
1169
1170        DELAY(5);
1171    }
1172
1173    if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
1174        BLOGE(sc, "Cannot free access to nvram interface "
1175            "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n",
1176            port, val);
1177        return (-1);
1178    }
1179
1180    /* release HW lock: protect against other PFs in PF Direct Assignment */
1181    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM);
1182
1183    return (0);
1184}
1185
1186static void
1187bxe_enable_nvram_access(struct bxe_softc *sc)
1188{
1189    uint32_t val;
1190
1191    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1192
1193    /* enable both bits, even on read */
1194    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1195           (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN));
1196}
1197
1198static void
1199bxe_disable_nvram_access(struct bxe_softc *sc)
1200{
1201    uint32_t val;
1202
1203    val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1204
1205    /* disable both bits, even after read */
1206    REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1207           (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1208                    MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1209}
1210
1211static int
1212bxe_nvram_read_dword(struct bxe_softc *sc,
1213                     uint32_t         offset,
1214                     uint32_t         *ret_val,
1215                     uint32_t         cmd_flags)
1216{
1217    int count, i, rc;
1218    uint32_t val;
1219
1220    /* build the command word */
1221    cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1222
1223    /* need to clear DONE bit separately */
1224    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1225
1226    /* address of the NVRAM to read from */
1227    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1228           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1229
1230    /* issue a read command */
1231    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1232
1233    /* adjust timeout for emulation/FPGA */
1234    count = NVRAM_TIMEOUT_COUNT;
1235    if (CHIP_REV_IS_SLOW(sc)) {
1236        count *= 100;
1237    }
1238
1239    /* wait for completion */
1240    *ret_val = 0;
1241    rc = -1;
1242    for (i = 0; i < count; i++) {
1243        DELAY(5);
1244        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1245
1246        if (val & MCPR_NVM_COMMAND_DONE) {
1247            val = REG_RD(sc, MCP_REG_MCPR_NVM_READ);
1248            /* we read nvram data in cpu order
1249             * but ethtool sees it as an array of bytes
1250             * converting to big-endian will do the work
1251             */
1252            *ret_val = htobe32(val);
1253            rc = 0;
1254            break;
1255        }
1256    }
1257
1258    if (rc == -1) {
1259        BLOGE(sc, "nvram read timeout expired "
1260            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1261            offset, cmd_flags, val);
1262    }
1263
1264    return (rc);
1265}
1266
1267static int
1268bxe_nvram_read(struct bxe_softc *sc,
1269               uint32_t         offset,
1270               uint8_t          *ret_buf,
1271               int              buf_size)
1272{
1273    uint32_t cmd_flags;
1274    uint32_t val;
1275    int rc;
1276
1277    if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
1278        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1279              offset, buf_size);
1280        return (-1);
1281    }
1282
1283    if ((offset + buf_size) > sc->devinfo.flash_size) {
1284        BLOGE(sc, "Invalid parameter, "
1285                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1286              offset, buf_size, sc->devinfo.flash_size);
1287        return (-1);
1288    }
1289
1290    /* request access to nvram interface */
1291    rc = bxe_acquire_nvram_lock(sc);
1292    if (rc) {
1293        return (rc);
1294    }
1295
1296    /* enable access to nvram interface */
1297    bxe_enable_nvram_access(sc);
1298
1299    /* read the first word(s) */
1300    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1301    while ((buf_size > sizeof(uint32_t)) && (rc == 0)) {
1302        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1303        memcpy(ret_buf, &val, 4);
1304
1305        /* advance to the next dword */
1306        offset += sizeof(uint32_t);
1307        ret_buf += sizeof(uint32_t);
1308        buf_size -= sizeof(uint32_t);
1309        cmd_flags = 0;
1310    }
1311
1312    if (rc == 0) {
1313        cmd_flags |= MCPR_NVM_COMMAND_LAST;
1314        rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags);
1315        memcpy(ret_buf, &val, 4);
1316    }
1317
1318    /* disable access to nvram interface */
1319    bxe_disable_nvram_access(sc);
1320    bxe_release_nvram_lock(sc);
1321
1322    return (rc);
1323}
1324
1325static int
1326bxe_nvram_write_dword(struct bxe_softc *sc,
1327                      uint32_t         offset,
1328                      uint32_t         val,
1329                      uint32_t         cmd_flags)
1330{
1331    int count, i, rc;
1332
1333    /* build the command word */
1334    cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR);
1335
1336    /* need to clear DONE bit separately */
1337    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1338
1339    /* write the data */
1340    REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val);
1341
1342    /* address of the NVRAM to write to */
1343    REG_WR(sc, MCP_REG_MCPR_NVM_ADDR,
1344           (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1345
1346    /* issue the write command */
1347    REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1348
1349    /* adjust timeout for emulation/FPGA */
1350    count = NVRAM_TIMEOUT_COUNT;
1351    if (CHIP_REV_IS_SLOW(sc)) {
1352        count *= 100;
1353    }
1354
1355    /* wait for completion */
1356    rc = -1;
1357    for (i = 0; i < count; i++) {
1358        DELAY(5);
1359        val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND);
1360        if (val & MCPR_NVM_COMMAND_DONE) {
1361            rc = 0;
1362            break;
1363        }
1364    }
1365
1366    if (rc == -1) {
1367        BLOGE(sc, "nvram write timeout expired "
1368            "(offset 0x%x cmd_flags 0x%x val 0x%x)\n",
1369            offset, cmd_flags, val);
1370    }
1371
1372    return (rc);
1373}
1374
1375#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1376
1377static int
1378bxe_nvram_write1(struct bxe_softc *sc,
1379                 uint32_t         offset,
1380                 uint8_t          *data_buf,
1381                 int              buf_size)
1382{
1383    uint32_t cmd_flags;
1384    uint32_t align_offset;
1385    uint32_t val;
1386    int rc;
1387
1388    if ((offset + buf_size) > sc->devinfo.flash_size) {
1389        BLOGE(sc, "Invalid parameter, "
1390                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1391              offset, buf_size, sc->devinfo.flash_size);
1392        return (-1);
1393    }
1394
1395    /* request access to nvram interface */
1396    rc = bxe_acquire_nvram_lock(sc);
1397    if (rc) {
1398        return (rc);
1399    }
1400
1401    /* enable access to nvram interface */
1402    bxe_enable_nvram_access(sc);
1403
1404    cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1405    align_offset = (offset & ~0x03);
1406    rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags);
1407
1408    if (rc == 0) {
1409        val &= ~(0xff << BYTE_OFFSET(offset));
1410        val |= (*data_buf << BYTE_OFFSET(offset));
1411
1412        /* nvram data is returned as an array of bytes
1413         * convert it back to cpu order
1414         */
1415        val = be32toh(val);
1416
1417        rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags);
1418    }
1419
1420    /* disable access to nvram interface */
1421    bxe_disable_nvram_access(sc);
1422    bxe_release_nvram_lock(sc);
1423
1424    return (rc);
1425}
1426
1427static int
1428bxe_nvram_write(struct bxe_softc *sc,
1429                uint32_t         offset,
1430                uint8_t          *data_buf,
1431                int              buf_size)
1432{
1433    uint32_t cmd_flags;
1434    uint32_t val;
1435    uint32_t written_so_far;
1436    int rc;
1437
1438    if (buf_size == 1) {
1439        return (bxe_nvram_write1(sc, offset, data_buf, buf_size));
1440    }
1441
1442    if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) {
1443        BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n",
1444              offset, buf_size);
1445        return (-1);
1446    }
1447
1448    if (buf_size == 0) {
1449        return (0); /* nothing to do */
1450    }
1451
1452    if ((offset + buf_size) > sc->devinfo.flash_size) {
1453        BLOGE(sc, "Invalid parameter, "
1454                  "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n",
1455              offset, buf_size, sc->devinfo.flash_size);
1456        return (-1);
1457    }
1458
1459    /* request access to nvram interface */
1460    rc = bxe_acquire_nvram_lock(sc);
1461    if (rc) {
1462        return (rc);
1463    }
1464
1465    /* enable access to nvram interface */
1466    bxe_enable_nvram_access(sc);
1467
1468    written_so_far = 0;
1469    cmd_flags = MCPR_NVM_COMMAND_FIRST;
1470    while ((written_so_far < buf_size) && (rc == 0)) {
1471        if (written_so_far == (buf_size - sizeof(uint32_t))) {
1472            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1473        } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) {
1474            cmd_flags |= MCPR_NVM_COMMAND_LAST;
1475        } else if ((offset % NVRAM_PAGE_SIZE) == 0) {
1476            cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1477        }
1478
1479        memcpy(&val, data_buf, 4);
1480
1481        rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags);
1482
1483        /* advance to the next dword */
1484        offset += sizeof(uint32_t);
1485        data_buf += sizeof(uint32_t);
1486        written_so_far += sizeof(uint32_t);
1487        cmd_flags = 0;
1488    }
1489
1490    /* disable access to nvram interface */
1491    bxe_disable_nvram_access(sc);
1492    bxe_release_nvram_lock(sc);
1493
1494    return (rc);
1495}
1496
1497/* copy command into DMAE command memory and set DMAE command Go */
1498void
1499bxe_post_dmae(struct bxe_softc    *sc,
1500              struct dmae_cmd *dmae,
1501              int                 idx)
1502{
1503    uint32_t cmd_offset;
1504    int i;
1505
1506    cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx));
1507    for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) {
1508        REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i));
1509    }
1510
1511    REG_WR(sc, dmae_reg_go_c[idx], 1);
1512}
1513
1514uint32_t
1515bxe_dmae_opcode_add_comp(uint32_t opcode,
1516                         uint8_t  comp_type)
1517{
1518    return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) |
1519                      DMAE_CMD_C_TYPE_ENABLE));
1520}
1521
1522uint32_t
1523bxe_dmae_opcode_clr_src_reset(uint32_t opcode)
1524{
1525    return (opcode & ~DMAE_CMD_SRC_RESET);
1526}
1527
1528uint32_t
1529bxe_dmae_opcode(struct bxe_softc *sc,
1530                uint8_t          src_type,
1531                uint8_t          dst_type,
1532                uint8_t          with_comp,
1533                uint8_t          comp_type)
1534{
1535    uint32_t opcode = 0;
1536
1537    opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) |
1538               (dst_type << DMAE_CMD_DST_SHIFT));
1539
1540    opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
1541
1542    opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
1543
1544    opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) |
1545               (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT));
1546
1547    opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT);
1548
1549#ifdef __BIG_ENDIAN
1550    opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
1551#else
1552    opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
1553#endif
1554
1555    if (with_comp) {
1556        opcode = bxe_dmae_opcode_add_comp(opcode, comp_type);
1557    }
1558
1559    return (opcode);
1560}
1561
1562static void
1563bxe_prep_dmae_with_comp(struct bxe_softc    *sc,
1564                        struct dmae_cmd *dmae,
1565                        uint8_t             src_type,
1566                        uint8_t             dst_type)
1567{
1568    memset(dmae, 0, sizeof(struct dmae_cmd));
1569
1570    /* set the opcode */
1571    dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1572                                   TRUE, DMAE_COMP_PCI);
1573
1574    /* fill in the completion parameters */
1575    dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1576    dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1577    dmae->comp_val     = DMAE_COMP_VAL;
1578}
1579
1580/* issue a DMAE command over the init channel and wait for completion */
1581static int
1582bxe_issue_dmae_with_comp(struct bxe_softc    *sc,
1583                         struct dmae_cmd *dmae)
1584{
1585    uint32_t *wb_comp = BXE_SP(sc, wb_comp);
1586    int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000;
1587
1588    BXE_DMAE_LOCK(sc);
1589
1590    /* reset completion */
1591    *wb_comp = 0;
1592
1593    /* post the command on the channel used for initializations */
1594    bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc));
1595
1596    /* wait for completion */
1597    DELAY(5);
1598
1599    while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
1600        if (!timeout ||
1601            (sc->recovery_state != BXE_RECOVERY_DONE &&
1602             sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1603            BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n",
1604                *wb_comp, sc->recovery_state);
1605            BXE_DMAE_UNLOCK(sc);
1606            return (DMAE_TIMEOUT);
1607        }
1608
1609        timeout--;
1610        DELAY(50);
1611    }
1612
1613    if (*wb_comp & DMAE_PCI_ERR_FLAG) {
1614        BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n",
1615                *wb_comp, sc->recovery_state);
1616        BXE_DMAE_UNLOCK(sc);
1617        return (DMAE_PCI_ERROR);
1618    }
1619
1620    BXE_DMAE_UNLOCK(sc);
1621    return (0);
1622}
1623
1624void
1625bxe_read_dmae(struct bxe_softc *sc,
1626              uint32_t         src_addr,
1627              uint32_t         len32)
1628{
1629    struct dmae_cmd dmae;
1630    uint32_t *data;
1631    int i, rc;
1632
1633    DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32));
1634
1635    if (!sc->dmae_ready) {
1636        data = BXE_SP(sc, wb_data[0]);
1637
1638        for (i = 0; i < len32; i++) {
1639            data[i] = (CHIP_IS_E1(sc)) ?
1640                          bxe_reg_rd_ind(sc, (src_addr + (i * 4))) :
1641                          REG_RD(sc, (src_addr + (i * 4)));
1642        }
1643
1644        return;
1645    }
1646
1647    /* set opcode and fixed command fields */
1648    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
1649
1650    /* fill in addresses and len */
1651    dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */
1652    dmae.src_addr_hi = 0;
1653    dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data));
1654    dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data));
1655    dmae.len         = len32;
1656
1657    /* issue the command and wait for completion */
1658    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1659        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1660    }
1661}
1662
1663void
1664bxe_write_dmae(struct bxe_softc *sc,
1665               bus_addr_t       dma_addr,
1666               uint32_t         dst_addr,
1667               uint32_t         len32)
1668{
1669    struct dmae_cmd dmae;
1670    int rc;
1671
1672    if (!sc->dmae_ready) {
1673        DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32));
1674
1675        if (CHIP_IS_E1(sc)) {
1676            ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1677        } else {
1678            ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32);
1679        }
1680
1681        return;
1682    }
1683
1684    /* set opcode and fixed command fields */
1685    bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
1686
1687    /* fill in addresses and len */
1688    dmae.src_addr_lo = U64_LO(dma_addr);
1689    dmae.src_addr_hi = U64_HI(dma_addr);
1690    dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */
1691    dmae.dst_addr_hi = 0;
1692    dmae.len         = len32;
1693
1694    /* issue the command and wait for completion */
1695    if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) {
1696        bxe_panic(sc, ("DMAE failed (%d)\n", rc));
1697    }
1698}
1699
1700void
1701bxe_write_dmae_phys_len(struct bxe_softc *sc,
1702                        bus_addr_t       phys_addr,
1703                        uint32_t         addr,
1704                        uint32_t         len)
1705{
1706    int dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
1707    int offset = 0;
1708
1709    while (len > dmae_wr_max) {
1710        bxe_write_dmae(sc,
1711                       (phys_addr + offset), /* src DMA address */
1712                       (addr + offset),      /* dst GRC address */
1713                       dmae_wr_max);
1714        offset += (dmae_wr_max * 4);
1715        len -= dmae_wr_max;
1716    }
1717
1718    bxe_write_dmae(sc,
1719                   (phys_addr + offset), /* src DMA address */
1720                   (addr + offset),      /* dst GRC address */
1721                   len);
1722}
1723
1724void
1725bxe_set_ctx_validation(struct bxe_softc   *sc,
1726                       struct eth_context *cxt,
1727                       uint32_t           cid)
1728{
1729    /* ustorm cxt validation */
1730    cxt->ustorm_ag_context.cdu_usage =
1731        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1732            CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
1733    /* xcontext validation */
1734    cxt->xstorm_ag_context.cdu_reserved =
1735        CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid),
1736            CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
1737}
1738
1739static void
1740bxe_storm_memset_hc_timeout(struct bxe_softc *sc,
1741                            uint8_t          port,
1742                            uint8_t          fw_sb_id,
1743                            uint8_t          sb_index,
1744                            uint8_t          ticks)
1745{
1746    uint32_t addr =
1747        (BAR_CSTRORM_INTMEM +
1748         CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index));
1749
1750    REG_WR8(sc, addr, ticks);
1751
1752    BLOGD(sc, DBG_LOAD,
1753          "port %d fw_sb_id %d sb_index %d ticks %d\n",
1754          port, fw_sb_id, sb_index, ticks);
1755}
1756
1757static void
1758bxe_storm_memset_hc_disable(struct bxe_softc *sc,
1759                            uint8_t          port,
1760                            uint16_t         fw_sb_id,
1761                            uint8_t          sb_index,
1762                            uint8_t          disable)
1763{
1764    uint32_t enable_flag =
1765        (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
1766    uint32_t addr =
1767        (BAR_CSTRORM_INTMEM +
1768         CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index));
1769    uint8_t flags;
1770
1771    /* clear and set */
1772    flags = REG_RD8(sc, addr);
1773    flags &= ~HC_INDEX_DATA_HC_ENABLED;
1774    flags |= enable_flag;
1775    REG_WR8(sc, addr, flags);
1776
1777    BLOGD(sc, DBG_LOAD,
1778          "port %d fw_sb_id %d sb_index %d disable %d\n",
1779          port, fw_sb_id, sb_index, disable);
1780}
1781
1782void
1783bxe_update_coalesce_sb_index(struct bxe_softc *sc,
1784                             uint8_t          fw_sb_id,
1785                             uint8_t          sb_index,
1786                             uint8_t          disable,
1787                             uint16_t         usec)
1788{
1789    int port = SC_PORT(sc);
1790    uint8_t ticks = (usec / 4); /* XXX ??? */
1791
1792    bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks);
1793
1794    disable = (disable) ? 1 : ((usec) ? 0 : 1);
1795    bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable);
1796}
1797
1798void
1799elink_cb_udelay(struct bxe_softc *sc,
1800                uint32_t         usecs)
1801{
1802    DELAY(usecs);
1803}
1804
1805uint32_t
1806elink_cb_reg_read(struct bxe_softc *sc,
1807                  uint32_t         reg_addr)
1808{
1809    return (REG_RD(sc, reg_addr));
1810}
1811
1812void
1813elink_cb_reg_write(struct bxe_softc *sc,
1814                   uint32_t         reg_addr,
1815                   uint32_t         val)
1816{
1817    REG_WR(sc, reg_addr, val);
1818}
1819
1820void
1821elink_cb_reg_wb_write(struct bxe_softc *sc,
1822                      uint32_t         offset,
1823                      uint32_t         *wb_write,
1824                      uint16_t         len)
1825{
1826    REG_WR_DMAE(sc, offset, wb_write, len);
1827}
1828
1829void
1830elink_cb_reg_wb_read(struct bxe_softc *sc,
1831                     uint32_t         offset,
1832                     uint32_t         *wb_write,
1833                     uint16_t         len)
1834{
1835    REG_RD_DMAE(sc, offset, wb_write, len);
1836}
1837
1838uint8_t
1839elink_cb_path_id(struct bxe_softc *sc)
1840{
1841    return (SC_PATH(sc));
1842}
1843
1844void
1845elink_cb_event_log(struct bxe_softc     *sc,
1846                   const elink_log_id_t elink_log_id,
1847                   ...)
1848{
1849    /* XXX */
1850    BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id);
1851}
1852
1853static int
1854bxe_set_spio(struct bxe_softc *sc,
1855             int              spio,
1856             uint32_t         mode)
1857{
1858    uint32_t spio_reg;
1859
1860    /* Only 2 SPIOs are configurable */
1861    if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
1862        BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode);
1863        return (-1);
1864    }
1865
1866    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1867
1868    /* read SPIO and mask except the float bits */
1869    spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
1870
1871    switch (mode) {
1872    case MISC_SPIO_OUTPUT_LOW:
1873        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1874        /* clear FLOAT and set CLR */
1875        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1876        spio_reg |=  (spio << MISC_SPIO_CLR_POS);
1877        break;
1878
1879    case MISC_SPIO_OUTPUT_HIGH:
1880        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1881        /* clear FLOAT and set SET */
1882        spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
1883        spio_reg |=  (spio << MISC_SPIO_SET_POS);
1884        break;
1885
1886    case MISC_SPIO_INPUT_HI_Z:
1887        BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1888        /* set FLOAT */
1889        spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
1890        break;
1891
1892    default:
1893        break;
1894    }
1895
1896    REG_WR(sc, MISC_REG_SPIO, spio_reg);
1897    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO);
1898
1899    return (0);
1900}
1901
1902static int
1903bxe_gpio_read(struct bxe_softc *sc,
1904              int              gpio_num,
1905              uint8_t          port)
1906{
1907    /* The GPIO should be swapped if swap register is set and active */
1908    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1909                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1910    int gpio_shift = (gpio_num +
1911                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1912    uint32_t gpio_mask = (1 << gpio_shift);
1913    uint32_t gpio_reg;
1914
1915    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1916        BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1917            " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift,
1918            gpio_mask);
1919        return (-1);
1920    }
1921
1922    /* read GPIO value */
1923    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
1924
1925    /* get the requested pin value */
1926    return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0;
1927}
1928
1929static int
1930bxe_gpio_write(struct bxe_softc *sc,
1931               int              gpio_num,
1932               uint32_t         mode,
1933               uint8_t          port)
1934{
1935    /* The GPIO should be swapped if swap register is set and active */
1936    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
1937                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
1938    int gpio_shift = (gpio_num +
1939                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
1940    uint32_t gpio_mask = (1 << gpio_shift);
1941    uint32_t gpio_reg;
1942
1943    if (gpio_num > MISC_REGISTERS_GPIO_3) {
1944        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1945            " gpio_shift %d gpio_mask 0x%x\n",
1946            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
1947        return (-1);
1948    }
1949
1950    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1951
1952    /* read GPIO and mask except the float bits */
1953    gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1954
1955    switch (mode) {
1956    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1957        BLOGD(sc, DBG_PHY,
1958              "Set GPIO %d (shift %d) -> output low\n",
1959              gpio_num, gpio_shift);
1960        /* clear FLOAT and set CLR */
1961        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1962        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1963        break;
1964
1965    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1966        BLOGD(sc, DBG_PHY,
1967              "Set GPIO %d (shift %d) -> output high\n",
1968              gpio_num, gpio_shift);
1969        /* clear FLOAT and set SET */
1970        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1971        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1972        break;
1973
1974    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1975        BLOGD(sc, DBG_PHY,
1976              "Set GPIO %d (shift %d) -> input\n",
1977              gpio_num, gpio_shift);
1978        /* set FLOAT */
1979        gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1980        break;
1981
1982    default:
1983        break;
1984    }
1985
1986    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
1987    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
1988
1989    return (0);
1990}
1991
1992static int
1993bxe_gpio_mult_write(struct bxe_softc *sc,
1994                    uint8_t          pins,
1995                    uint32_t         mode)
1996{
1997    uint32_t gpio_reg;
1998
1999    /* any port swapping should be handled by caller */
2000
2001    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2002
2003    /* read GPIO and mask except the float bits */
2004    gpio_reg = REG_RD(sc, MISC_REG_GPIO);
2005    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2006    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2007    gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2008
2009    switch (mode) {
2010    case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2011        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2012        /* set CLR */
2013        gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2014        break;
2015
2016    case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2018        /* set SET */
2019        gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2020        break;
2021
2022    case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2023        BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2024        /* set FLOAT */
2025        gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2026        break;
2027
2028    default:
2029        BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2030            " gpio_reg 0x%x\n", pins, mode, gpio_reg);
2031        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2032        return (-1);
2033    }
2034
2035    REG_WR(sc, MISC_REG_GPIO, gpio_reg);
2036    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2037
2038    return (0);
2039}
2040
2041static int
2042bxe_gpio_int_write(struct bxe_softc *sc,
2043                   int              gpio_num,
2044                   uint32_t         mode,
2045                   uint8_t          port)
2046{
2047    /* The GPIO should be swapped if swap register is set and active */
2048    int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) &&
2049                      REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port);
2050    int gpio_shift = (gpio_num +
2051                      (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0));
2052    uint32_t gpio_mask = (1 << gpio_shift);
2053    uint32_t gpio_reg;
2054
2055    if (gpio_num > MISC_REGISTERS_GPIO_3) {
2056        BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2057            " gpio_shift %d gpio_mask 0x%x\n",
2058            gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask);
2059        return (-1);
2060    }
2061
2062    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2063
2064    /* read GPIO int */
2065    gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT);
2066
2067    switch (mode) {
2068    case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2069        BLOGD(sc, DBG_PHY,
2070              "Clear GPIO INT %d (shift %d) -> output low\n",
2071              gpio_num, gpio_shift);
2072        /* clear SET and set CLR */
2073        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2074        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2075        break;
2076
2077    case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2078        BLOGD(sc, DBG_PHY,
2079              "Set GPIO INT %d (shift %d) -> output high\n",
2080              gpio_num, gpio_shift);
2081        /* clear CLR and set SET */
2082        gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2083        gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2084        break;
2085
2086    default:
2087        break;
2088    }
2089
2090    REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg);
2091    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
2092
2093    return (0);
2094}
2095
2096uint32_t
2097elink_cb_gpio_read(struct bxe_softc *sc,
2098                   uint16_t         gpio_num,
2099                   uint8_t          port)
2100{
2101    return (bxe_gpio_read(sc, gpio_num, port));
2102}
2103
2104uint8_t
2105elink_cb_gpio_write(struct bxe_softc *sc,
2106                    uint16_t         gpio_num,
2107                    uint8_t          mode, /* 0=low 1=high */
2108                    uint8_t          port)
2109{
2110    return (bxe_gpio_write(sc, gpio_num, mode, port));
2111}
2112
2113uint8_t
2114elink_cb_gpio_mult_write(struct bxe_softc *sc,
2115                         uint8_t          pins,
2116                         uint8_t          mode) /* 0=low 1=high */
2117{
2118    return (bxe_gpio_mult_write(sc, pins, mode));
2119}
2120
2121uint8_t
2122elink_cb_gpio_int_write(struct bxe_softc *sc,
2123                        uint16_t         gpio_num,
2124                        uint8_t          mode, /* 0=low 1=high */
2125                        uint8_t          port)
2126{
2127    return (bxe_gpio_int_write(sc, gpio_num, mode, port));
2128}
2129
2130void
2131elink_cb_notify_link_changed(struct bxe_softc *sc)
2132{
2133    REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 +
2134                (SC_FUNC(sc) * sizeof(uint32_t))), 1);
2135}
2136
2137/* send the MCP a request, block until there is a reply */
2138uint32_t
2139elink_cb_fw_command(struct bxe_softc *sc,
2140                    uint32_t         command,
2141                    uint32_t         param)
2142{
2143    int mb_idx = SC_FW_MB_IDX(sc);
2144    uint32_t seq;
2145    uint32_t rc = 0;
2146    uint32_t cnt = 1;
2147    uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10;
2148
2149    BXE_FWMB_LOCK(sc);
2150
2151    seq = ++sc->fw_seq;
2152    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
2153    SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
2154
2155    BLOGD(sc, DBG_PHY,
2156          "wrote command 0x%08x to FW MB param 0x%08x\n",
2157          (command | seq), param);
2158
2159    /* Let the FW do it's magic. GIve it up to 5 seconds... */
2160    do {
2161        DELAY(delay * 1000);
2162        rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header);
2163    } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2164
2165    BLOGD(sc, DBG_PHY,
2166          "[after %d ms] read 0x%x seq 0x%x from FW MB\n",
2167          cnt*delay, rc, seq);
2168
2169    /* is this a reply to our command? */
2170    if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
2171        rc &= FW_MSG_CODE_MASK;
2172    } else {
2173        /* Ruh-roh! */
2174        BLOGE(sc, "FW failed to respond!\n");
2175        // XXX bxe_fw_dump(sc);
2176        rc = 0;
2177    }
2178
2179    BXE_FWMB_UNLOCK(sc);
2180    return (rc);
2181}
2182
2183static uint32_t
2184bxe_fw_command(struct bxe_softc *sc,
2185               uint32_t         command,
2186               uint32_t         param)
2187{
2188    return (elink_cb_fw_command(sc, command, param));
2189}
2190
2191static void
2192__storm_memset_dma_mapping(struct bxe_softc *sc,
2193                           uint32_t         addr,
2194                           bus_addr_t       mapping)
2195{
2196    REG_WR(sc, addr, U64_LO(mapping));
2197    REG_WR(sc, (addr + 4), U64_HI(mapping));
2198}
2199
2200static void
2201storm_memset_spq_addr(struct bxe_softc *sc,
2202                      bus_addr_t       mapping,
2203                      uint16_t         abs_fid)
2204{
2205    uint32_t addr = (XSEM_REG_FAST_MEMORY +
2206                     XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid));
2207    __storm_memset_dma_mapping(sc, addr, mapping);
2208}
2209
2210static void
2211storm_memset_vf_to_pf(struct bxe_softc *sc,
2212                      uint16_t         abs_fid,
2213                      uint16_t         pf_id)
2214{
2215    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2216    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2217    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2218    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id);
2219}
2220
2221static void
2222storm_memset_func_en(struct bxe_softc *sc,
2223                     uint16_t         abs_fid,
2224                     uint8_t          enable)
2225{
2226    REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2227    REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2228    REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2229    REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable);
2230}
2231
2232static void
2233storm_memset_eq_data(struct bxe_softc       *sc,
2234                     struct event_ring_data *eq_data,
2235                     uint16_t               pfid)
2236{
2237    uint32_t addr;
2238    size_t size;
2239
2240    addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid));
2241    size = sizeof(struct event_ring_data);
2242    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data);
2243}
2244
2245static void
2246storm_memset_eq_prod(struct bxe_softc *sc,
2247                     uint16_t         eq_prod,
2248                     uint16_t         pfid)
2249{
2250    uint32_t addr = (BAR_CSTRORM_INTMEM +
2251                     CSTORM_EVENT_RING_PROD_OFFSET(pfid));
2252    REG_WR16(sc, addr, eq_prod);
2253}
2254
2255/*
2256 * Post a slowpath command.
2257 *
2258 * A slowpath command is used to propagate a configuration change through
2259 * the controller in a controlled manner, allowing each STORM processor and
2260 * other H/W blocks to phase in the change.  The commands sent on the
2261 * slowpath are referred to as ramrods.  Depending on the ramrod used the
2262 * completion of the ramrod will occur in different ways.  Here's a
2263 * breakdown of ramrods and how they complete:
2264 *
2265 * RAMROD_CMD_ID_ETH_PORT_SETUP
2266 *   Used to setup the leading connection on a port.  Completes on the
2267 *   Receive Completion Queue (RCQ) of that port (typically fp[0]).
2268 *
2269 * RAMROD_CMD_ID_ETH_CLIENT_SETUP
2270 *   Used to setup an additional connection on a port.  Completes on the
2271 *   RCQ of the multi-queue/RSS connection being initialized.
2272 *
2273 * RAMROD_CMD_ID_ETH_STAT_QUERY
2274 *   Used to force the storm processors to update the statistics database
2275 *   in host memory.  This ramrod is send on the leading connection CID and
2276 *   completes as an index increment of the CSTORM on the default status
2277 *   block.
2278 *
2279 * RAMROD_CMD_ID_ETH_UPDATE
2280 *   Used to update the state of the leading connection, usually to udpate
2281 *   the RSS indirection table.  Completes on the RCQ of the leading
2282 *   connection. (Not currently used under FreeBSD until OS support becomes
2283 *   available.)
2284 *
2285 * RAMROD_CMD_ID_ETH_HALT
2286 *   Used when tearing down a connection prior to driver unload.  Completes
2287 *   on the RCQ of the multi-queue/RSS connection being torn down.  Don't
2288 *   use this on the leading connection.
2289 *
2290 * RAMROD_CMD_ID_ETH_SET_MAC
2291 *   Sets the Unicast/Broadcast/Multicast used by the port.  Completes on
2292 *   the RCQ of the leading connection.
2293 *
2294 * RAMROD_CMD_ID_ETH_CFC_DEL
2295 *   Used when tearing down a conneciton prior to driver unload.  Completes
2296 *   on the RCQ of the leading connection (since the current connection
2297 *   has been completely removed from controller memory).
2298 *
2299 * RAMROD_CMD_ID_ETH_PORT_DEL
2300 *   Used to tear down the leading connection prior to driver unload,
2301 *   typically fp[0].  Completes as an index increment of the CSTORM on the
2302 *   default status block.
2303 *
2304 * RAMROD_CMD_ID_ETH_FORWARD_SETUP
2305 *   Used for connection offload.  Completes on the RCQ of the multi-queue
2306 *   RSS connection that is being offloaded.  (Not currently used under
2307 *   FreeBSD.)
2308 *
2309 * There can only be one command pending per function.
2310 *
2311 * Returns:
2312 *   0 = Success, !0 = Failure.
2313 */
2314
2315/* must be called under the spq lock */
2316static inline
2317struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc)
2318{
2319    struct eth_spe *next_spe = sc->spq_prod_bd;
2320
2321    if (sc->spq_prod_bd == sc->spq_last_bd) {
2322        /* wrap back to the first eth_spq */
2323        sc->spq_prod_bd = sc->spq;
2324        sc->spq_prod_idx = 0;
2325    } else {
2326        sc->spq_prod_bd++;
2327        sc->spq_prod_idx++;
2328    }
2329
2330    return (next_spe);
2331}
2332
2333/* must be called under the spq lock */
2334static inline
2335void bxe_sp_prod_update(struct bxe_softc *sc)
2336{
2337    int func = SC_FUNC(sc);
2338
2339    /*
2340     * Make sure that BD data is updated before writing the producer.
2341     * BD data is written to the memory, the producer is read from the
2342     * memory, thus we need a full memory barrier to ensure the ordering.
2343     */
2344    mb();
2345
2346    REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)),
2347             sc->spq_prod_idx);
2348
2349    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2350                      BUS_SPACE_BARRIER_WRITE);
2351}
2352
2353/**
2354 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2355 *
2356 * @cmd:      command to check
2357 * @cmd_type: command type
2358 */
2359static inline
2360int bxe_is_contextless_ramrod(int cmd,
2361                              int cmd_type)
2362{
2363    if ((cmd_type == NONE_CONNECTION_TYPE) ||
2364        (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
2365        (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
2366        (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
2367        (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
2368        (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
2369        (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) {
2370        return (TRUE);
2371    } else {
2372        return (FALSE);
2373    }
2374}
2375
2376/**
2377 * bxe_sp_post - place a single command on an SP ring
2378 *
2379 * @sc:         driver handle
2380 * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
2381 * @cid:        SW CID the command is related to
2382 * @data_hi:    command private data address (high 32 bits)
2383 * @data_lo:    command private data address (low 32 bits)
2384 * @cmd_type:   command type (e.g. NONE, ETH)
2385 *
2386 * SP data is handled as if it's always an address pair, thus data fields are
2387 * not swapped to little endian in upper functions. Instead this function swaps
2388 * data as if it's two uint32 fields.
2389 */
2390int
2391bxe_sp_post(struct bxe_softc *sc,
2392            int              command,
2393            int              cid,
2394            uint32_t         data_hi,
2395            uint32_t         data_lo,
2396            int              cmd_type)
2397{
2398    struct eth_spe *spe;
2399    uint16_t type;
2400    int common;
2401
2402    common = bxe_is_contextless_ramrod(command, cmd_type);
2403
2404    BXE_SP_LOCK(sc);
2405
2406    if (common) {
2407        if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2408            BLOGE(sc, "EQ ring is full!\n");
2409            BXE_SP_UNLOCK(sc);
2410            return (-1);
2411        }
2412    } else {
2413        if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2414            BLOGE(sc, "SPQ ring is full!\n");
2415            BXE_SP_UNLOCK(sc);
2416            return (-1);
2417        }
2418    }
2419
2420    spe = bxe_sp_get_next(sc);
2421
2422    /* CID needs port number to be encoded int it */
2423    spe->hdr.conn_and_cmd_data =
2424        htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid));
2425
2426    type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE;
2427
2428    /* TBD: Check if it works for VFs */
2429    type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) &
2430             SPE_HDR_T_FUNCTION_ID);
2431
2432    spe->hdr.type = htole16(type);
2433
2434    spe->data.update_data_addr.hi = htole32(data_hi);
2435    spe->data.update_data_addr.lo = htole32(data_lo);
2436
2437    /*
2438     * It's ok if the actual decrement is issued towards the memory
2439     * somewhere between the lock and unlock. Thus no more explict
2440     * memory barrier is needed.
2441     */
2442    if (common) {
2443        atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2444    } else {
2445        atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2446    }
2447
2448    BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2449    BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2450          BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata));
2451    BLOGD(sc, DBG_SP,
2452          "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n",
2453          sc->spq_prod_idx,
2454          (uint32_t)U64_HI(sc->spq_dma.paddr),
2455          (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2456          command,
2457          common,
2458          HW_CID(sc, cid),
2459          data_hi,
2460          data_lo,
2461          type,
2462          atomic_load_acq_long(&sc->cq_spq_left),
2463          atomic_load_acq_long(&sc->eq_spq_left));
2464
2465    bxe_sp_prod_update(sc);
2466
2467    BXE_SP_UNLOCK(sc);
2468    return (0);
2469}
2470
2471/**
2472 * bxe_debug_print_ind_table - prints the indirection table configuration.
2473 *
2474 * @sc: driver hanlde
2475 * @p:  pointer to rss configuration
2476 */
2477
2478/*
2479 * FreeBSD Device probe function.
2480 *
2481 * Compares the device found to the driver's list of supported devices and
2482 * reports back to the bsd loader whether this is the right driver for the device.
2483 * This is the driver entry function called from the "kldload" command.
2484 *
2485 * Returns:
2486 *   BUS_PROBE_DEFAULT on success, positive value on failure.
2487 */
2488static int
2489bxe_probe(device_t dev)
2490{
2491    struct bxe_device_type *t;
2492    char *descbuf;
2493    uint16_t did, sdid, svid, vid;
2494
2495    /* Find our device structure */
2496    t = bxe_devs;
2497
2498    /* Get the data for the device to be probed. */
2499    vid  = pci_get_vendor(dev);
2500    did  = pci_get_device(dev);
2501    svid = pci_get_subvendor(dev);
2502    sdid = pci_get_subdevice(dev);
2503
2504    /* Look through the list of known devices for a match. */
2505    while (t->bxe_name != NULL) {
2506        if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2507            ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2508            ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2509            descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
2510            if (descbuf == NULL)
2511                return (ENOMEM);
2512
2513            /* Print out the device identity. */
2514            snprintf(descbuf, BXE_DEVDESC_MAX,
2515                     "%s (%c%d) BXE v:%s", t->bxe_name,
2516                     (((pci_read_config(dev, PCIR_REVID, 4) &
2517                        0xf0) >> 4) + 'A'),
2518                     (pci_read_config(dev, PCIR_REVID, 4) & 0xf),
2519                     BXE_DRIVER_VERSION);
2520
2521            device_set_desc_copy(dev, descbuf);
2522            free(descbuf, M_TEMP);
2523            return (BUS_PROBE_DEFAULT);
2524        }
2525        t++;
2526    }
2527
2528    return (ENXIO);
2529}
2530
2531static void
2532bxe_init_mutexes(struct bxe_softc *sc)
2533{
2534#ifdef BXE_CORE_LOCK_SX
2535    snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2536             "bxe%d_core_lock", sc->unit);
2537    sx_init(&sc->core_sx, sc->core_sx_name);
2538#else
2539    snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2540             "bxe%d_core_lock", sc->unit);
2541    mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2542#endif
2543
2544    snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2545             "bxe%d_sp_lock", sc->unit);
2546    mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2547
2548    snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2549             "bxe%d_dmae_lock", sc->unit);
2550    mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2551
2552    snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2553             "bxe%d_phy_lock", sc->unit);
2554    mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2555
2556    snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2557             "bxe%d_fwmb_lock", sc->unit);
2558    mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2559
2560    snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2561             "bxe%d_print_lock", sc->unit);
2562    mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2563
2564    snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2565             "bxe%d_stats_lock", sc->unit);
2566    mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2567
2568    snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2569             "bxe%d_mcast_lock", sc->unit);
2570    mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2571}
2572
2573static void
2574bxe_release_mutexes(struct bxe_softc *sc)
2575{
2576#ifdef BXE_CORE_LOCK_SX
2577    sx_destroy(&sc->core_sx);
2578#else
2579    if (mtx_initialized(&sc->core_mtx)) {
2580        mtx_destroy(&sc->core_mtx);
2581    }
2582#endif
2583
2584    if (mtx_initialized(&sc->sp_mtx)) {
2585        mtx_destroy(&sc->sp_mtx);
2586    }
2587
2588    if (mtx_initialized(&sc->dmae_mtx)) {
2589        mtx_destroy(&sc->dmae_mtx);
2590    }
2591
2592    if (mtx_initialized(&sc->port.phy_mtx)) {
2593        mtx_destroy(&sc->port.phy_mtx);
2594    }
2595
2596    if (mtx_initialized(&sc->fwmb_mtx)) {
2597        mtx_destroy(&sc->fwmb_mtx);
2598    }
2599
2600    if (mtx_initialized(&sc->print_mtx)) {
2601        mtx_destroy(&sc->print_mtx);
2602    }
2603
2604    if (mtx_initialized(&sc->stats_mtx)) {
2605        mtx_destroy(&sc->stats_mtx);
2606    }
2607
2608    if (mtx_initialized(&sc->mcast_mtx)) {
2609        mtx_destroy(&sc->mcast_mtx);
2610    }
2611}
2612
2613static void
2614bxe_tx_disable(struct bxe_softc* sc)
2615{
2616    if_t ifp = sc->ifp;
2617
2618    /* tell the stack the driver is stopped and TX queue is full */
2619    if (ifp !=  NULL) {
2620        if_setdrvflags(ifp, 0);
2621    }
2622}
2623
2624static void
2625bxe_drv_pulse(struct bxe_softc *sc)
2626{
2627    SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb,
2628             sc->fw_drv_pulse_wr_seq);
2629}
2630
2631static inline uint16_t
2632bxe_tx_avail(struct bxe_softc *sc,
2633             struct bxe_fastpath *fp)
2634{
2635    int16_t  used;
2636    uint16_t prod;
2637    uint16_t cons;
2638
2639    prod = fp->tx_bd_prod;
2640    cons = fp->tx_bd_cons;
2641
2642    used = SUB_S16(prod, cons);
2643
2644    return (int16_t)(sc->tx_ring_size) - used;
2645}
2646
2647static inline int
2648bxe_tx_queue_has_work(struct bxe_fastpath *fp)
2649{
2650    uint16_t hw_cons;
2651
2652    mb(); /* status block fields can change */
2653    hw_cons = le16toh(*fp->tx_cons_sb);
2654    return (hw_cons != fp->tx_pkt_cons);
2655}
2656
2657static inline uint8_t
2658bxe_has_tx_work(struct bxe_fastpath *fp)
2659{
2660    /* expand this for multi-cos if ever supported */
2661    return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE;
2662}
2663
2664static inline int
2665bxe_has_rx_work(struct bxe_fastpath *fp)
2666{
2667    uint16_t rx_cq_cons_sb;
2668
2669    mb(); /* status block fields can change */
2670    rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2671    if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX)
2672        rx_cq_cons_sb++;
2673    return (fp->rx_cq_cons != rx_cq_cons_sb);
2674}
2675
2676static void
2677bxe_sp_event(struct bxe_softc    *sc,
2678             struct bxe_fastpath *fp,
2679             union eth_rx_cqe    *rr_cqe)
2680{
2681    int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2682    int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2683    enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
2684    struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
2685
2686    BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n",
2687          fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2688
2689    switch (command) {
2690    case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
2691        BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid);
2692        drv_cmd = ECORE_Q_CMD_UPDATE;
2693        break;
2694
2695    case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
2696        BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid);
2697        drv_cmd = ECORE_Q_CMD_SETUP;
2698        break;
2699
2700    case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
2701        BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2702        drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
2703        break;
2704
2705    case (RAMROD_CMD_ID_ETH_HALT):
2706        BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid);
2707        drv_cmd = ECORE_Q_CMD_HALT;
2708        break;
2709
2710    case (RAMROD_CMD_ID_ETH_TERMINATE):
2711        BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid);
2712        drv_cmd = ECORE_Q_CMD_TERMINATE;
2713        break;
2714
2715    case (RAMROD_CMD_ID_ETH_EMPTY):
2716        BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid);
2717        drv_cmd = ECORE_Q_CMD_EMPTY;
2718        break;
2719
2720    default:
2721        BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n",
2722              command, fp->index);
2723        return;
2724    }
2725
2726    if ((drv_cmd != ECORE_Q_CMD_MAX) &&
2727        q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2728        /*
2729         * q_obj->complete_cmd() failure means that this was
2730         * an unexpected completion.
2731         *
2732         * In this case we don't want to increase the sc->spq_left
2733         * because apparently we haven't sent this command the first
2734         * place.
2735         */
2736        // bxe_panic(sc, ("Unexpected SP completion\n"));
2737        return;
2738    }
2739
2740    atomic_add_acq_long(&sc->cq_spq_left, 1);
2741
2742    BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2743          atomic_load_acq_long(&sc->cq_spq_left));
2744}
2745
2746/*
2747 * The current mbuf is part of an aggregation. Move the mbuf into the TPA
2748 * aggregation queue, put an empty mbuf back onto the receive chain, and mark
2749 * the current aggregation queue as in-progress.
2750 */
2751static void
2752bxe_tpa_start(struct bxe_softc            *sc,
2753              struct bxe_fastpath         *fp,
2754              uint16_t                    queue,
2755              uint16_t                    cons,
2756              uint16_t                    prod,
2757              struct eth_fast_path_rx_cqe *cqe)
2758{
2759    struct bxe_sw_rx_bd tmp_bd;
2760    struct bxe_sw_rx_bd *rx_buf;
2761    struct eth_rx_bd *rx_bd;
2762    int max_agg_queues __diagused;
2763    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2764    uint16_t index;
2765
2766    BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START "
2767                       "cons=%d prod=%d\n",
2768          fp->index, queue, cons, prod);
2769
2770    max_agg_queues = MAX_AGG_QS(sc);
2771
2772    KASSERT((queue < max_agg_queues),
2773            ("fp[%02d] invalid aggr queue (%d >= %d)!",
2774             fp->index, queue, max_agg_queues));
2775
2776    KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2777            ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!",
2778             fp->index, queue));
2779
2780    /* copy the existing mbuf and mapping from the TPA pool */
2781    tmp_bd = tpa_info->bd;
2782
2783    if (tmp_bd.m == NULL) {
2784        uint32_t *tmp;
2785
2786        tmp = (uint32_t *)cqe;
2787
2788        BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n",
2789              fp->index, queue, cons, prod);
2790        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2791            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2792
2793        /* XXX Error handling? */
2794        return;
2795    }
2796
2797    /* change the TPA queue to the start state */
2798    tpa_info->state            = BXE_TPA_STATE_START;
2799    tpa_info->placement_offset = cqe->placement_offset;
2800    tpa_info->parsing_flags    = le16toh(cqe->pars_flags.flags);
2801    tpa_info->vlan_tag         = le16toh(cqe->vlan_tag);
2802    tpa_info->len_on_bd        = le16toh(cqe->len_on_bd);
2803
2804    fp->rx_tpa_queue_used |= (1 << queue);
2805
2806    /*
2807     * If all the buffer descriptors are filled with mbufs then fill in
2808     * the current consumer index with a new BD. Else if a maximum Rx
2809     * buffer limit is imposed then fill in the next producer index.
2810     */
2811    index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2812                prod : cons;
2813
2814    /* move the received mbuf and mapping to TPA pool */
2815    tpa_info->bd = fp->rx_mbuf_chain[cons];
2816
2817    /* release any existing RX BD mbuf mappings */
2818    if (cons != index) {
2819        rx_buf = &fp->rx_mbuf_chain[cons];
2820
2821        if (rx_buf->m_map != NULL) {
2822            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2823                            BUS_DMASYNC_POSTREAD);
2824            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2825        }
2826
2827        /*
2828         * We get here when the maximum number of rx buffers is less than
2829         * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL
2830         * it out here without concern of a memory leak.
2831         */
2832        fp->rx_mbuf_chain[cons].m = NULL;
2833    }
2834
2835    /* update the Rx SW BD with the mbuf info from the TPA pool */
2836    fp->rx_mbuf_chain[index] = tmp_bd;
2837
2838    /* update the Rx BD with the empty mbuf phys address from the TPA pool */
2839    rx_bd = &fp->rx_chain[index];
2840    rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2841    rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2842}
2843
2844/*
2845 * When a TPA aggregation is completed, loop through the individual mbufs
2846 * of the aggregation, combining them into a single mbuf which will be sent
2847 * up the stack. Refill all freed SGEs with mbufs as we go along.
2848 */
2849static int
2850bxe_fill_frag_mbuf(struct bxe_softc          *sc,
2851                   struct bxe_fastpath       *fp,
2852                   struct bxe_sw_tpa_info    *tpa_info,
2853                   uint16_t                  queue,
2854                   uint16_t                  pages,
2855                   struct mbuf               *m,
2856			       struct eth_end_agg_rx_cqe *cqe,
2857                   uint16_t                  cqe_idx)
2858{
2859    struct mbuf *m_frag;
2860    uint32_t frag_len, frag_size, i;
2861    uint16_t sge_idx;
2862    int rc = 0;
2863    int j;
2864
2865    frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2866
2867    BLOGD(sc, DBG_LRO,
2868          "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n",
2869          fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2870
2871    /* make sure the aggregated frame is not too big to handle */
2872    if (pages > 8 * PAGES_PER_SGE) {
2873
2874        uint32_t *tmp = (uint32_t *)cqe;
2875
2876        BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! "
2877                  "pkt_len=%d len_on_bd=%d frag_size=%d\n",
2878              fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2879              tpa_info->len_on_bd, frag_size);
2880
2881        BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n",
2882            *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2883
2884        bxe_panic(sc, ("sge page count error\n"));
2885        return (EINVAL);
2886    }
2887
2888    /*
2889     * Scan through the scatter gather list pulling individual mbufs into a
2890     * single mbuf for the host stack.
2891     */
2892    for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
2893        sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2894
2895        /*
2896         * Firmware gives the indices of the SGE as if the ring is an array
2897         * (meaning that the "next" element will consume 2 indices).
2898         */
2899        frag_len = min(frag_size, (uint32_t)(SGE_PAGES));
2900
2901        BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d "
2902                           "sge_idx=%d frag_size=%d frag_len=%d\n",
2903              fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2904
2905        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2906
2907        /* allocate a new mbuf for the SGE */
2908        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
2909        if (rc) {
2910            /* Leave all remaining SGEs in the ring! */
2911            return (rc);
2912        }
2913
2914        /* update the fragment length */
2915        m_frag->m_len = frag_len;
2916
2917        /* concatenate the fragment to the head mbuf */
2918        m_cat(m, m_frag);
2919        fp->eth_q_stats.mbuf_alloc_sge--;
2920
2921        /* update the TPA mbuf size and remaining fragment size */
2922        m->m_pkthdr.len += frag_len;
2923        frag_size -= frag_len;
2924    }
2925
2926    BLOGD(sc, DBG_LRO,
2927          "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n",
2928          fp->index, queue, frag_size);
2929
2930    return (rc);
2931}
2932
2933static inline void
2934bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp)
2935{
2936    int i, j;
2937
2938    for (i = 1; i <= RX_SGE_NUM_PAGES; i++) {
2939        int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2940
2941        for (j = 0; j < 2; j++) {
2942            BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2943            idx--;
2944        }
2945    }
2946}
2947
2948static inline void
2949bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp)
2950{
2951    /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */
2952    memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2953
2954    /*
2955     * Clear the two last indices in the page to 1. These are the indices that
2956     * correspond to the "next" element, hence will never be indicated and
2957     * should be removed from the calculations.
2958     */
2959    bxe_clear_sge_mask_next_elems(fp);
2960}
2961
2962static inline void
2963bxe_update_last_max_sge(struct bxe_fastpath *fp,
2964                        uint16_t            idx)
2965{
2966    uint16_t last_max = fp->last_max_sge;
2967
2968    if (SUB_S16(idx, last_max) > 0) {
2969        fp->last_max_sge = idx;
2970    }
2971}
2972
2973static inline void
2974bxe_update_sge_prod(struct bxe_softc          *sc,
2975                    struct bxe_fastpath       *fp,
2976                    uint16_t                  sge_len,
2977                    union eth_sgl_or_raw_data *cqe)
2978{
2979    uint16_t last_max, last_elem, first_elem;
2980    uint16_t delta = 0;
2981    uint16_t i;
2982
2983    if (!sge_len) {
2984        return;
2985    }
2986
2987    /* first mark all used pages */
2988    for (i = 0; i < sge_len; i++) {
2989        BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2990                            RX_SGE(le16toh(cqe->sgl[i])));
2991    }
2992
2993    BLOGD(sc, DBG_LRO,
2994          "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2995          fp->index, sge_len - 1,
2996          le16toh(cqe->sgl[sge_len - 1]));
2997
2998    /* assume that the last SGE index is the biggest */
2999    bxe_update_last_max_sge(fp,
3000                            le16toh(cqe->sgl[sge_len - 1]));
3001
3002    last_max = RX_SGE(fp->last_max_sge);
3003    last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
3004    first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3005
3006    /* if ring is not full */
3007    if (last_elem + 1 != first_elem) {
3008        last_elem++;
3009    }
3010
3011    /* now update the prod */
3012    for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) {
3013        if (__predict_true(fp->sge_mask[i])) {
3014            break;
3015        }
3016
3017        fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3018        delta += BIT_VEC64_ELEM_SZ;
3019    }
3020
3021    if (delta > 0) {
3022        fp->rx_sge_prod += delta;
3023        /* clear page-end entries */
3024        bxe_clear_sge_mask_next_elems(fp);
3025    }
3026
3027    BLOGD(sc, DBG_LRO,
3028          "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3029          fp->index, fp->last_max_sge, fp->rx_sge_prod);
3030}
3031
3032/*
3033 * The aggregation on the current TPA queue has completed. Pull the individual
3034 * mbuf fragments together into a single mbuf, perform all necessary checksum
3035 * calculations, and send the resuting mbuf to the stack.
3036 */
3037static void
3038bxe_tpa_stop(struct bxe_softc          *sc,
3039             struct bxe_fastpath       *fp,
3040             struct bxe_sw_tpa_info    *tpa_info,
3041             uint16_t                  queue,
3042             uint16_t                  pages,
3043			 struct eth_end_agg_rx_cqe *cqe,
3044             uint16_t                  cqe_idx)
3045{
3046    if_t ifp = sc->ifp;
3047    struct mbuf *m;
3048    int rc = 0;
3049
3050    BLOGD(sc, DBG_LRO,
3051          "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n",
3052          fp->index, queue, tpa_info->placement_offset,
3053          le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3054
3055    m = tpa_info->bd.m;
3056
3057    /* allocate a replacement before modifying existing mbuf */
3058    rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
3059    if (rc) {
3060        /* drop the frame and log an error */
3061        fp->eth_q_stats.rx_soft_errors++;
3062        goto bxe_tpa_stop_exit;
3063    }
3064
3065    /* we have a replacement, fixup the current mbuf */
3066    m_adj(m, tpa_info->placement_offset);
3067    m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3068
3069    /* mark the checksums valid (taken care of by the firmware) */
3070    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3071    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3072    m->m_pkthdr.csum_data = 0xffff;
3073    m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3074                               CSUM_IP_VALID   |
3075                               CSUM_DATA_VALID |
3076                               CSUM_PSEUDO_HDR);
3077
3078    /* aggregate all of the SGEs into a single mbuf */
3079    rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
3080    if (rc) {
3081        /* drop the packet and log an error */
3082        fp->eth_q_stats.rx_soft_errors++;
3083        m_freem(m);
3084    } else {
3085        if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3086            m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3087            m->m_flags |= M_VLANTAG;
3088        }
3089
3090        /* assign packet to this interface interface */
3091        if_setrcvif(m, ifp);
3092
3093        /* specify what RSS queue was used for this flow */
3094        m->m_pkthdr.flowid = fp->index;
3095        BXE_SET_FLOWID(m);
3096
3097        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3098        fp->eth_q_stats.rx_tpa_pkts++;
3099
3100        /* pass the frame to the stack */
3101        if_input(ifp, m);
3102    }
3103
3104    /* we passed an mbuf up the stack or dropped the frame */
3105    fp->eth_q_stats.mbuf_alloc_tpa--;
3106
3107bxe_tpa_stop_exit:
3108
3109    fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3110    fp->rx_tpa_queue_used &= ~(1 << queue);
3111}
3112
3113static uint8_t
3114bxe_service_rxsgl(
3115                 struct bxe_fastpath *fp,
3116                 uint16_t len,
3117                 uint16_t lenonbd,
3118                 struct mbuf *m,
3119                 struct eth_fast_path_rx_cqe *cqe_fp)
3120{
3121    struct mbuf *m_frag;
3122    uint16_t frags, frag_len;
3123    uint16_t sge_idx = 0;
3124    uint16_t j;
3125    uint8_t i, rc = 0;
3126    uint32_t frag_size;
3127
3128    /* adjust the mbuf */
3129    m->m_len = lenonbd;
3130
3131    frag_size =  len - lenonbd;
3132    frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3133
3134    for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) {
3135        sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3136
3137        m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3138        frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE));
3139        m_frag->m_len = frag_len;
3140
3141       /* allocate a new mbuf for the SGE */
3142        rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx);
3143        if (rc) {
3144            /* Leave all remaining SGEs in the ring! */
3145            return (rc);
3146        }
3147        fp->eth_q_stats.mbuf_alloc_sge--;
3148
3149        /* concatenate the fragment to the head mbuf */
3150        m_cat(m, m_frag);
3151
3152        frag_size -= frag_len;
3153    }
3154
3155    bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3156
3157    return rc;
3158}
3159
3160static uint8_t
3161bxe_rxeof(struct bxe_softc    *sc,
3162          struct bxe_fastpath *fp)
3163{
3164    if_t ifp = sc->ifp;
3165    uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
3166    uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
3167    int rx_pkts = 0;
3168    int rc = 0;
3169
3170    BXE_FP_RX_LOCK(fp);
3171
3172    /* CQ "next element" is of the size of the regular element */
3173    hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3174    if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) {
3175        hw_cq_cons++;
3176    }
3177
3178    bd_cons = fp->rx_bd_cons;
3179    bd_prod = fp->rx_bd_prod;
3180    bd_prod_fw = bd_prod;
3181    sw_cq_cons = fp->rx_cq_cons;
3182    sw_cq_prod = fp->rx_cq_prod;
3183
3184    /*
3185     * Memory barrier necessary as speculative reads of the rx
3186     * buffer can be ahead of the index in the status block
3187     */
3188    rmb();
3189
3190    BLOGD(sc, DBG_RX,
3191          "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n",
3192          fp->index, hw_cq_cons, sw_cq_cons);
3193
3194    while (sw_cq_cons != hw_cq_cons) {
3195        struct bxe_sw_rx_bd *rx_buf = NULL;
3196        union eth_rx_cqe *cqe;
3197        struct eth_fast_path_rx_cqe *cqe_fp;
3198        uint8_t cqe_fp_flags;
3199        enum eth_rx_cqe_type cqe_fp_type;
3200        uint16_t len, lenonbd,  pad;
3201        struct mbuf *m = NULL;
3202
3203        comp_ring_cons = RCQ(sw_cq_cons);
3204        bd_prod = RX_BD(bd_prod);
3205        bd_cons = RX_BD(bd_cons);
3206
3207        cqe          = &fp->rcq_chain[comp_ring_cons];
3208        cqe_fp       = &cqe->fast_path_cqe;
3209        cqe_fp_flags = cqe_fp->type_error_flags;
3210        cqe_fp_type  = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
3211
3212        BLOGD(sc, DBG_RX,
3213              "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d "
3214              "BD prod=%d cons=%d CQE type=0x%x err=0x%x "
3215              "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n",
3216              fp->index,
3217              hw_cq_cons,
3218              sw_cq_cons,
3219              bd_prod,
3220              bd_cons,
3221              CQE_TYPE(cqe_fp_flags),
3222              cqe_fp_flags,
3223              cqe_fp->status_flags,
3224              le32toh(cqe_fp->rss_hash_result),
3225              le16toh(cqe_fp->vlan_tag),
3226              le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3227              le16toh(cqe_fp->len_on_bd));
3228
3229        /* is this a slowpath msg? */
3230        if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) {
3231            bxe_sp_event(sc, fp, cqe);
3232            goto next_cqe;
3233        }
3234
3235        rx_buf = &fp->rx_mbuf_chain[bd_cons];
3236
3237        if (!CQE_TYPE_FAST(cqe_fp_type)) {
3238            struct bxe_sw_tpa_info *tpa_info;
3239            uint16_t frag_size, pages;
3240            uint8_t queue;
3241
3242            if (CQE_TYPE_START(cqe_fp_type)) {
3243                bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3244                              bd_cons, bd_prod, cqe_fp);
3245                m = NULL; /* packet not ready yet */
3246                goto next_rx;
3247            }
3248
3249            KASSERT(CQE_TYPE_STOP(cqe_fp_type),
3250                    ("CQE type is not STOP! (0x%x)\n", cqe_fp_type));
3251
3252            queue = cqe->end_agg_cqe.queue_index;
3253            tpa_info = &fp->rx_tpa_info[queue];
3254
3255            BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n",
3256                  fp->index, queue);
3257
3258            frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3259                         tpa_info->len_on_bd);
3260            pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
3261
3262            bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
3263                         &cqe->end_agg_cqe, comp_ring_cons);
3264
3265            bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3266
3267            goto next_cqe;
3268        }
3269
3270        /* non TPA */
3271
3272        /* is this an error packet? */
3273        if (__predict_false(cqe_fp_flags &
3274                            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
3275            BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons);
3276            fp->eth_q_stats.rx_soft_errors++;
3277            goto next_rx;
3278        }
3279
3280        len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3281        lenonbd = le16toh(cqe_fp->len_on_bd);
3282        pad = cqe_fp->placement_offset;
3283
3284        m = rx_buf->m;
3285
3286        if (__predict_false(m == NULL)) {
3287            BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n",
3288                  bd_cons, fp->index);
3289            goto next_rx;
3290        }
3291
3292        /* XXX double copy if packet length under a threshold */
3293
3294        /*
3295         * If all the buffer descriptors are filled with mbufs then fill in
3296         * the current consumer index with a new BD. Else if a maximum Rx
3297         * buffer limit is imposed then fill in the next producer index.
3298         */
3299        rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons,
3300                                  (sc->max_rx_bufs != RX_BD_USABLE) ?
3301                                      bd_prod : bd_cons);
3302        if (rc != 0) {
3303
3304            /* we simply reuse the received mbuf and don't post it to the stack */
3305            m = NULL;
3306
3307            BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
3308                  fp->index, rc);
3309            fp->eth_q_stats.rx_soft_errors++;
3310
3311            if (sc->max_rx_bufs != RX_BD_USABLE) {
3312                /* copy this consumer index to the producer index */
3313                memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3314                       sizeof(struct bxe_sw_rx_bd));
3315                memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd));
3316            }
3317
3318            goto next_rx;
3319        }
3320
3321        /* current mbuf was detached from the bd */
3322        fp->eth_q_stats.mbuf_alloc_rx--;
3323
3324        /* we allocated a replacement mbuf, fixup the current one */
3325        m_adj(m, pad);
3326        m->m_pkthdr.len = m->m_len = len;
3327
3328        if ((len > 60) && (len > lenonbd)) {
3329            fp->eth_q_stats.rx_bxe_service_rxsgl++;
3330            rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp);
3331            if (rc)
3332                break;
3333            fp->eth_q_stats.rx_jumbo_sge_pkts++;
3334        } else if (lenonbd < len) {
3335            fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3336        }
3337
3338        /* assign packet to this interface interface */
3339	if_setrcvif(m, ifp);
3340
3341        /* assume no hardware checksum has complated */
3342        m->m_pkthdr.csum_flags = 0;
3343
3344        /* validate checksum if offload enabled */
3345        if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
3346            /* check for a valid IP frame */
3347            if (!(cqe->fast_path_cqe.status_flags &
3348                  ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) {
3349                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3350                if (__predict_false(cqe_fp_flags &
3351                                    ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) {
3352                    fp->eth_q_stats.rx_hw_csum_errors++;
3353                } else {
3354                    fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3355                    m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3356                }
3357            }
3358
3359            /* check for a valid TCP/UDP frame */
3360            if (!(cqe->fast_path_cqe.status_flags &
3361                  ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) {
3362                if (__predict_false(cqe_fp_flags &
3363                                    ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) {
3364                    fp->eth_q_stats.rx_hw_csum_errors++;
3365                } else {
3366                    fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3367                    m->m_pkthdr.csum_data = 0xFFFF;
3368                    m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3369                                               CSUM_PSEUDO_HDR);
3370                }
3371            }
3372        }
3373
3374        /* if there is a VLAN tag then flag that info */
3375        if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3376            m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3377            m->m_flags |= M_VLANTAG;
3378        }
3379
3380        /* specify what RSS queue was used for this flow */
3381        m->m_pkthdr.flowid = fp->index;
3382        BXE_SET_FLOWID(m);
3383
3384next_rx:
3385
3386        bd_cons    = RX_BD_NEXT(bd_cons);
3387        bd_prod    = RX_BD_NEXT(bd_prod);
3388        bd_prod_fw = RX_BD_NEXT(bd_prod_fw);
3389
3390        /* pass the frame to the stack */
3391        if (__predict_true(m != NULL)) {
3392            if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
3393            rx_pkts++;
3394            if_input(ifp, m);
3395        }
3396
3397next_cqe:
3398
3399        sw_cq_prod = RCQ_NEXT(sw_cq_prod);
3400        sw_cq_cons = RCQ_NEXT(sw_cq_cons);
3401
3402        /* limit spinning on the queue */
3403        if (rc != 0)
3404            break;
3405
3406        if (rx_pkts == sc->rx_budget) {
3407            fp->eth_q_stats.rx_budget_reached++;
3408            break;
3409        }
3410    } /* while work to do */
3411
3412    fp->rx_bd_cons = bd_cons;
3413    fp->rx_bd_prod = bd_prod_fw;
3414    fp->rx_cq_cons = sw_cq_cons;
3415    fp->rx_cq_prod = sw_cq_prod;
3416
3417    /* Update producers */
3418    bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3419
3420    fp->eth_q_stats.rx_pkts += rx_pkts;
3421    fp->eth_q_stats.rx_calls++;
3422
3423    BXE_FP_RX_UNLOCK(fp);
3424
3425    return (sw_cq_cons != hw_cq_cons);
3426}
3427
3428static uint16_t
3429bxe_free_tx_pkt(struct bxe_softc    *sc,
3430                struct bxe_fastpath *fp,
3431                uint16_t            idx)
3432{
3433    struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3434    struct eth_tx_start_bd *tx_start_bd;
3435    uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3436    uint16_t new_cons;
3437    int nbd;
3438
3439    /* unmap the mbuf from non-paged memory */
3440    bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3441
3442    tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3443    nbd = le16toh(tx_start_bd->nbd) - 1;
3444
3445    new_cons = (tx_buf->first_bd + nbd);
3446
3447    /* free the mbuf */
3448    if (__predict_true(tx_buf->m != NULL)) {
3449        m_freem(tx_buf->m);
3450        fp->eth_q_stats.mbuf_alloc_tx--;
3451    } else {
3452        fp->eth_q_stats.tx_chain_lost_mbuf++;
3453    }
3454
3455    tx_buf->m = NULL;
3456    tx_buf->first_bd = 0;
3457
3458    return (new_cons);
3459}
3460
3461/* transmit timeout watchdog */
3462static int
3463bxe_watchdog(struct bxe_softc    *sc,
3464             struct bxe_fastpath *fp)
3465{
3466    BXE_FP_TX_LOCK(fp);
3467
3468    if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3469        BXE_FP_TX_UNLOCK(fp);
3470        return (0);
3471    }
3472
3473    BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3474
3475    BXE_FP_TX_UNLOCK(fp);
3476    BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK);
3477    taskqueue_enqueue_timeout(taskqueue_thread,
3478        &sc->sp_err_timeout_task, hz/10);
3479
3480    return (-1);
3481}
3482
3483/* processes transmit completions */
3484static uint8_t
3485bxe_txeof(struct bxe_softc    *sc,
3486          struct bxe_fastpath *fp)
3487{
3488    if_t ifp = sc->ifp;
3489    uint16_t bd_cons, hw_cons, sw_cons, pkt_cons;
3490    uint16_t tx_bd_avail;
3491
3492    BXE_FP_TX_LOCK_ASSERT(fp);
3493
3494    bd_cons = fp->tx_bd_cons;
3495    hw_cons = le16toh(*fp->tx_cons_sb);
3496    sw_cons = fp->tx_pkt_cons;
3497
3498    while (sw_cons != hw_cons) {
3499        pkt_cons = TX_BD(sw_cons);
3500
3501        BLOGD(sc, DBG_TX,
3502              "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n",
3503              fp->index, hw_cons, sw_cons, pkt_cons);
3504
3505        bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons);
3506
3507        sw_cons++;
3508    }
3509
3510    fp->tx_pkt_cons = sw_cons;
3511    fp->tx_bd_cons  = bd_cons;
3512
3513    BLOGD(sc, DBG_TX,
3514          "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n",
3515          fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3516
3517    mb();
3518
3519    tx_bd_avail = bxe_tx_avail(sc, fp);
3520
3521    if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
3522        if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
3523    } else {
3524        if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3525    }
3526
3527    if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3528        /* reset the watchdog timer if there are pending transmits */
3529        fp->watchdog_timer = BXE_TX_TIMEOUT;
3530        return (TRUE);
3531    } else {
3532        /* clear watchdog when there are no pending transmits */
3533        fp->watchdog_timer = 0;
3534        return (FALSE);
3535    }
3536}
3537
3538static void
3539bxe_drain_tx_queues(struct bxe_softc *sc)
3540{
3541    struct bxe_fastpath *fp;
3542    int i, count;
3543
3544    /* wait until all TX fastpath tasks have completed */
3545    for (i = 0; i < sc->num_queues; i++) {
3546        fp = &sc->fp[i];
3547
3548        count = 1000;
3549
3550        while (bxe_has_tx_work(fp)) {
3551
3552            BXE_FP_TX_LOCK(fp);
3553            bxe_txeof(sc, fp);
3554            BXE_FP_TX_UNLOCK(fp);
3555
3556            if (count == 0) {
3557                BLOGE(sc, "Timeout waiting for fp[%d] "
3558                          "transmits to complete!\n", i);
3559                bxe_panic(sc, ("tx drain failure\n"));
3560                return;
3561            }
3562
3563            count--;
3564            DELAY(1000);
3565            rmb();
3566        }
3567    }
3568
3569    return;
3570}
3571
3572static int
3573bxe_del_all_macs(struct bxe_softc          *sc,
3574                 struct ecore_vlan_mac_obj *mac_obj,
3575                 int                       mac_type,
3576                 uint8_t                   wait_for_comp)
3577{
3578    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3579    int rc;
3580
3581    /* wait for completion of requested */
3582    if (wait_for_comp) {
3583        bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3584    }
3585
3586    /* Set the mac type of addresses we want to clear */
3587    bxe_set_bit(mac_type, &vlan_mac_flags);
3588
3589    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3590    if (rc < 0) {
3591        BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n",
3592            rc, mac_type, wait_for_comp);
3593    }
3594
3595    return (rc);
3596}
3597
3598static int
3599bxe_fill_accept_flags(struct bxe_softc *sc,
3600                      uint32_t         rx_mode,
3601                      unsigned long    *rx_accept_flags,
3602                      unsigned long    *tx_accept_flags)
3603{
3604    /* Clear the flags first */
3605    *rx_accept_flags = 0;
3606    *tx_accept_flags = 0;
3607
3608    switch (rx_mode) {
3609    case BXE_RX_MODE_NONE:
3610        /*
3611         * 'drop all' supersedes any accept flags that may have been
3612         * passed to the function.
3613         */
3614        break;
3615
3616    case BXE_RX_MODE_NORMAL:
3617        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3618        bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags);
3619        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3620
3621        /* internal switching mode */
3622        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3623        bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags);
3624        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3625
3626        break;
3627
3628    case BXE_RX_MODE_ALLMULTI:
3629        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3630        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3631        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3632
3633        /* internal switching mode */
3634        bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3635        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3636        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3637
3638        break;
3639
3640    case BXE_RX_MODE_PROMISC:
3641        /*
3642         * According to deffinition of SI mode, iface in promisc mode
3643         * should receive matched and unmatched (in resolution of port)
3644         * unicast packets.
3645         */
3646        bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags);
3647        bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags);
3648        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags);
3649        bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags);
3650
3651        /* internal switching mode */
3652        bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags);
3653        bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags);
3654
3655        if (IS_MF_SI(sc)) {
3656            bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags);
3657        } else {
3658            bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags);
3659        }
3660
3661        break;
3662
3663    default:
3664        BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode);
3665        return (-1);
3666    }
3667
3668    /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
3669    if (rx_mode != BXE_RX_MODE_NONE) {
3670        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags);
3671        bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags);
3672    }
3673
3674    return (0);
3675}
3676
3677static int
3678bxe_set_q_rx_mode(struct bxe_softc *sc,
3679                  uint8_t          cl_id,
3680                  unsigned long    rx_mode_flags,
3681                  unsigned long    rx_accept_flags,
3682                  unsigned long    tx_accept_flags,
3683                  unsigned long    ramrod_flags)
3684{
3685    struct ecore_rx_mode_ramrod_params ramrod_param;
3686    int rc;
3687
3688    memset(&ramrod_param, 0, sizeof(ramrod_param));
3689
3690    /* Prepare ramrod parameters */
3691    ramrod_param.cid = 0;
3692    ramrod_param.cl_id = cl_id;
3693    ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3694    ramrod_param.func_id = SC_FUNC(sc);
3695
3696    ramrod_param.pstate = &sc->sp_state;
3697    ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING;
3698
3699    ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata);
3700    ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata);
3701
3702    bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3703
3704    ramrod_param.ramrod_flags = ramrod_flags;
3705    ramrod_param.rx_mode_flags = rx_mode_flags;
3706
3707    ramrod_param.rx_accept_flags = rx_accept_flags;
3708    ramrod_param.tx_accept_flags = tx_accept_flags;
3709
3710    rc = ecore_config_rx_mode(sc, &ramrod_param);
3711    if (rc < 0) {
3712        BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x "
3713            "rx_accept_flags 0x%x tx_accept_flags 0x%x "
3714            "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3715            (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags,
3716            (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc);
3717        return (rc);
3718    }
3719
3720    return (0);
3721}
3722
3723static int
3724bxe_set_storm_rx_mode(struct bxe_softc *sc)
3725{
3726    unsigned long rx_mode_flags = 0, ramrod_flags = 0;
3727    unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
3728    int rc;
3729
3730    rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3731                               &tx_accept_flags);
3732    if (rc) {
3733        return (rc);
3734    }
3735
3736    bxe_set_bit(RAMROD_RX, &ramrod_flags);
3737    bxe_set_bit(RAMROD_TX, &ramrod_flags);
3738
3739    /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */
3740    return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3741                              rx_accept_flags, tx_accept_flags,
3742                              ramrod_flags));
3743}
3744
3745/* returns the "mcp load_code" according to global load_count array */
3746static int
3747bxe_nic_load_no_mcp(struct bxe_softc *sc)
3748{
3749    int path = SC_PATH(sc);
3750    int port = SC_PORT(sc);
3751
3752    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3753          path, load_count[path][0], load_count[path][1],
3754          load_count[path][2]);
3755    load_count[path][0]++;
3756    load_count[path][1 + port]++;
3757    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3758          path, load_count[path][0], load_count[path][1],
3759          load_count[path][2]);
3760    if (load_count[path][0] == 1) {
3761        return (FW_MSG_CODE_DRV_LOAD_COMMON);
3762    } else if (load_count[path][1 + port] == 1) {
3763        return (FW_MSG_CODE_DRV_LOAD_PORT);
3764    } else {
3765        return (FW_MSG_CODE_DRV_LOAD_FUNCTION);
3766    }
3767}
3768
3769/* returns the "mcp load_code" according to global load_count array */
3770static int
3771bxe_nic_unload_no_mcp(struct bxe_softc *sc)
3772{
3773    int port = SC_PORT(sc);
3774    int path = SC_PATH(sc);
3775
3776    BLOGI(sc, "NO MCP - load counts[%d]      %d, %d, %d\n",
3777          path, load_count[path][0], load_count[path][1],
3778          load_count[path][2]);
3779    load_count[path][0]--;
3780    load_count[path][1 + port]--;
3781    BLOGI(sc, "NO MCP - new load counts[%d]  %d, %d, %d\n",
3782          path, load_count[path][0], load_count[path][1],
3783          load_count[path][2]);
3784    if (load_count[path][0] == 0) {
3785        return (FW_MSG_CODE_DRV_UNLOAD_COMMON);
3786    } else if (load_count[path][1 + port] == 0) {
3787        return (FW_MSG_CODE_DRV_UNLOAD_PORT);
3788    } else {
3789        return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION);
3790    }
3791}
3792
3793/* request unload mode from the MCP: COMMON, PORT or FUNCTION */
3794static uint32_t
3795bxe_send_unload_req(struct bxe_softc *sc,
3796                    int              unload_mode)
3797{
3798    uint32_t reset_code = 0;
3799
3800    /* Select the UNLOAD request mode */
3801    if (unload_mode == UNLOAD_NORMAL) {
3802        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3803    } else {
3804        reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
3805    }
3806
3807    /* Send the request to the MCP */
3808    if (!BXE_NOMCP(sc)) {
3809        reset_code = bxe_fw_command(sc, reset_code, 0);
3810    } else {
3811        reset_code = bxe_nic_unload_no_mcp(sc);
3812    }
3813
3814    return (reset_code);
3815}
3816
3817/* send UNLOAD_DONE command to the MCP */
3818static void
3819bxe_send_unload_done(struct bxe_softc *sc,
3820                     uint8_t          keep_link)
3821{
3822    uint32_t reset_param =
3823        keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
3824
3825    /* Report UNLOAD_DONE to MCP */
3826    if (!BXE_NOMCP(sc)) {
3827        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
3828    }
3829}
3830
3831static int
3832bxe_func_wait_started(struct bxe_softc *sc)
3833{
3834    int tout = 50;
3835
3836    if (!sc->port.pmf) {
3837        return (0);
3838    }
3839
3840    /*
3841     * (assumption: No Attention from MCP at this stage)
3842     * PMF probably in the middle of TX disable/enable transaction
3843     * 1. Sync IRS for default SB
3844     * 2. Sync SP queue - this guarantees us that attention handling started
3845     * 3. Wait, that TX disable/enable transaction completes
3846     *
3847     * 1+2 guarantee that if DCBX attention was scheduled it already changed
3848     * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3849     * received completion for the transaction the state is TX_STOPPED.
3850     * State will return to STARTED after completion of TX_STOPPED-->STARTED
3851     * transaction.
3852     */
3853
3854    /* XXX make sure default SB ISR is done */
3855    /* need a way to synchronize an irq (intr_mtx?) */
3856
3857    /* XXX flush any work queues */
3858
3859    while (ecore_func_get_state(sc, &sc->func_obj) !=
3860           ECORE_F_STATE_STARTED && tout--) {
3861        DELAY(20000);
3862    }
3863
3864    if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3865        /*
3866         * Failed to complete the transaction in a "good way"
3867         * Force both transactions with CLR bit.
3868         */
3869        struct ecore_func_state_params func_params = { NULL };
3870
3871        BLOGE(sc, "Unexpected function state! "
3872                  "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3873
3874        func_params.f_obj = &sc->func_obj;
3875        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3876
3877        /* STARTED-->TX_STOPPED */
3878        func_params.cmd = ECORE_F_CMD_TX_STOP;
3879        ecore_func_state_change(sc, &func_params);
3880
3881        /* TX_STOPPED-->STARTED */
3882        func_params.cmd = ECORE_F_CMD_TX_START;
3883        return (ecore_func_state_change(sc, &func_params));
3884    }
3885
3886    return (0);
3887}
3888
3889static int
3890bxe_stop_queue(struct bxe_softc *sc,
3891               int              index)
3892{
3893    struct bxe_fastpath *fp = &sc->fp[index];
3894    struct ecore_queue_state_params q_params = { NULL };
3895    int rc;
3896
3897    BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3898
3899    q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3900    /* We want to wait for completion in this context */
3901    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3902
3903    /* Stop the primary connection: */
3904
3905    /* ...halt the connection */
3906    q_params.cmd = ECORE_Q_CMD_HALT;
3907    rc = ecore_queue_state_change(sc, &q_params);
3908    if (rc) {
3909        return (rc);
3910    }
3911
3912    /* ...terminate the connection */
3913    q_params.cmd = ECORE_Q_CMD_TERMINATE;
3914    memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate));
3915    q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
3916    rc = ecore_queue_state_change(sc, &q_params);
3917    if (rc) {
3918        return (rc);
3919    }
3920
3921    /* ...delete cfc entry */
3922    q_params.cmd = ECORE_Q_CMD_CFC_DEL;
3923    memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del));
3924    q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
3925    return (ecore_queue_state_change(sc, &q_params));
3926}
3927
3928/* wait for the outstanding SP commands */
3929static inline uint8_t
3930bxe_wait_sp_comp(struct bxe_softc *sc,
3931                 unsigned long    mask)
3932{
3933    unsigned long tmp;
3934    int tout = 5000; /* wait for 5 secs tops */
3935
3936    while (tout--) {
3937        mb();
3938        if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3939            return (TRUE);
3940        }
3941
3942        DELAY(1000);
3943    }
3944
3945    mb();
3946
3947    tmp = atomic_load_acq_long(&sc->sp_state);
3948    if (tmp & mask) {
3949        BLOGE(sc, "Filtering completion timed out: "
3950                  "sp_state 0x%lx, mask 0x%lx\n",
3951              tmp, mask);
3952        return (FALSE);
3953    }
3954
3955    return (FALSE);
3956}
3957
3958static int
3959bxe_func_stop(struct bxe_softc *sc)
3960{
3961    struct ecore_func_state_params func_params = { NULL };
3962    int rc;
3963
3964    /* prepare parameters for function state transitions */
3965    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3966    func_params.f_obj = &sc->func_obj;
3967    func_params.cmd = ECORE_F_CMD_STOP;
3968
3969    /*
3970     * Try to stop the function the 'good way'. If it fails (in case
3971     * of a parity error during bxe_chip_cleanup()) and we are
3972     * not in a debug mode, perform a state transaction in order to
3973     * enable further HW_RESET transaction.
3974     */
3975    rc = ecore_func_state_change(sc, &func_params);
3976    if (rc) {
3977        BLOGE(sc, "FUNC_STOP ramrod failed. "
3978                  "Running a dry transaction (%d)\n", rc);
3979        bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
3980        return (ecore_func_state_change(sc, &func_params));
3981    }
3982
3983    return (0);
3984}
3985
3986static int
3987bxe_reset_hw(struct bxe_softc *sc,
3988             uint32_t         load_code)
3989{
3990    struct ecore_func_state_params func_params = { NULL };
3991
3992    /* Prepare parameters for function state transitions */
3993    bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
3994
3995    func_params.f_obj = &sc->func_obj;
3996    func_params.cmd = ECORE_F_CMD_HW_RESET;
3997
3998    func_params.params.hw_init.load_phase = load_code;
3999
4000    return (ecore_func_state_change(sc, &func_params));
4001}
4002
4003static void
4004bxe_int_disable_sync(struct bxe_softc *sc,
4005                     int              disable_hw)
4006{
4007    if (disable_hw) {
4008        /* prevent the HW from sending interrupts */
4009        bxe_int_disable(sc);
4010    }
4011
4012    /* XXX need a way to synchronize ALL irqs (intr_mtx?) */
4013    /* make sure all ISRs are done */
4014
4015    /* XXX make sure sp_task is not running */
4016    /* cancel and flush work queues */
4017}
4018
4019static void
4020bxe_chip_cleanup(struct bxe_softc *sc,
4021                 uint32_t         unload_mode,
4022                 uint8_t          keep_link)
4023{
4024    int port = SC_PORT(sc);
4025    struct ecore_mcast_ramrod_params rparam = { NULL };
4026    uint32_t reset_code;
4027    int i, rc = 0;
4028
4029    bxe_drain_tx_queues(sc);
4030
4031    /* give HW time to discard old tx messages */
4032    DELAY(1000);
4033
4034    /* Clean all ETH MACs */
4035    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4036    if (rc < 0) {
4037        BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc);
4038    }
4039
4040    /* Clean up UC list  */
4041    rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4042    if (rc < 0) {
4043        BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc);
4044    }
4045
4046    /* Disable LLH */
4047    if (!CHIP_IS_E1(sc)) {
4048        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
4049    }
4050
4051    /* Set "drop all" to stop Rx */
4052
4053    /*
4054     * We need to take the BXE_MCAST_LOCK() here in order to prevent
4055     * a race between the completion code and this code.
4056     */
4057    BXE_MCAST_LOCK(sc);
4058
4059    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4060        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4061    } else {
4062        bxe_set_storm_rx_mode(sc);
4063    }
4064
4065    /* Clean up multicast configuration */
4066    rparam.mcast_obj = &sc->mcast_obj;
4067    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4068    if (rc < 0) {
4069        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4070    }
4071
4072    BXE_MCAST_UNLOCK(sc);
4073
4074    // XXX bxe_iov_chip_cleanup(sc);
4075
4076    /*
4077     * Send the UNLOAD_REQUEST to the MCP. This will return if
4078     * this function should perform FUNCTION, PORT, or COMMON HW
4079     * reset.
4080     */
4081    reset_code = bxe_send_unload_req(sc, unload_mode);
4082
4083    /*
4084     * (assumption: No Attention from MCP at this stage)
4085     * PMF probably in the middle of TX disable/enable transaction
4086     */
4087    rc = bxe_func_wait_started(sc);
4088    if (rc) {
4089        BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc);
4090    }
4091
4092    /*
4093     * Close multi and leading connections
4094     * Completions for ramrods are collected in a synchronous way
4095     */
4096    for (i = 0; i < sc->num_queues; i++) {
4097        if (bxe_stop_queue(sc, i)) {
4098            goto unload_error;
4099        }
4100    }
4101
4102    /*
4103     * If SP settings didn't get completed so far - something
4104     * very wrong has happen.
4105     */
4106    if (!bxe_wait_sp_comp(sc, ~0x0UL)) {
4107        BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc);
4108    }
4109
4110unload_error:
4111
4112    rc = bxe_func_stop(sc);
4113    if (rc) {
4114        BLOGE(sc, "Function stop failed!(%d)\n", rc);
4115    }
4116
4117    /* disable HW interrupts */
4118    bxe_int_disable_sync(sc, TRUE);
4119
4120    /* detach interrupts */
4121    bxe_interrupt_detach(sc);
4122
4123    /* Reset the chip */
4124    rc = bxe_reset_hw(sc, reset_code);
4125    if (rc) {
4126        BLOGE(sc, "Hardware reset failed(%d)\n", rc);
4127    }
4128
4129    /* Report UNLOAD_DONE to MCP */
4130    bxe_send_unload_done(sc, keep_link);
4131}
4132
4133static void
4134bxe_disable_close_the_gate(struct bxe_softc *sc)
4135{
4136    uint32_t val;
4137    int port = SC_PORT(sc);
4138
4139    BLOGD(sc, DBG_LOAD,
4140          "Disabling 'close the gates'\n");
4141
4142    if (CHIP_IS_E1(sc)) {
4143        uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4144                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
4145        val = REG_RD(sc, addr);
4146        val &= ~(0x300);
4147        REG_WR(sc, addr, val);
4148    } else {
4149        val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
4150        val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
4151                 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
4152        REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val);
4153    }
4154}
4155
4156/*
4157 * Cleans the object that have internal lists without sending
4158 * ramrods. Should be run when interrupts are disabled.
4159 */
4160static void
4161bxe_squeeze_objects(struct bxe_softc *sc)
4162{
4163    unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
4164    struct ecore_mcast_ramrod_params rparam = { NULL };
4165    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4166    int rc;
4167
4168    /* Cleanup MACs' object first... */
4169
4170    /* Wait for completion of requested */
4171    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
4172    /* Perform a dry cleanup */
4173    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
4174
4175    /* Clean ETH primary MAC */
4176    bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags);
4177    rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4178                             &ramrod_flags);
4179    if (rc != 0) {
4180        BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc);
4181    }
4182
4183    /* Cleanup UC list */
4184    vlan_mac_flags = 0;
4185    bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
4186    rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4187                             &ramrod_flags);
4188    if (rc != 0) {
4189        BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc);
4190    }
4191
4192    /* Now clean mcast object... */
4193
4194    rparam.mcast_obj = &sc->mcast_obj;
4195    bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
4196
4197    /* Add a DEL command... */
4198    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
4199    if (rc < 0) {
4200        BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc);
4201    }
4202
4203    /* now wait until all pending commands are cleared */
4204
4205    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4206    while (rc != 0) {
4207        if (rc < 0) {
4208            BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc);
4209            return;
4210        }
4211
4212        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
4213    }
4214}
4215
4216/* stop the controller */
4217static __noinline int
4218bxe_nic_unload(struct bxe_softc *sc,
4219               uint32_t         unload_mode,
4220               uint8_t          keep_link)
4221{
4222    uint8_t global = FALSE;
4223    uint32_t val;
4224    int i;
4225
4226    BXE_CORE_LOCK_ASSERT(sc);
4227
4228    if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4229
4230    for (i = 0; i < sc->num_queues; i++) {
4231        struct bxe_fastpath *fp;
4232
4233        fp = &sc->fp[i];
4234	fp->watchdog_timer = 0;
4235        BXE_FP_TX_LOCK(fp);
4236        BXE_FP_TX_UNLOCK(fp);
4237    }
4238
4239    BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n");
4240
4241    /* mark driver as unloaded in shmem2 */
4242    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
4243        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
4244        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
4245                  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
4246    }
4247
4248    if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4249        (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4250
4251	if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
4252            /*
4253             * We can get here if the driver has been unloaded
4254             * during parity error recovery and is either waiting for a
4255             * leader to complete or for other functions to unload and
4256             * then ifconfig down has been issued. In this case we want to
4257             * unload and let other functions to complete a recovery
4258             * process.
4259             */
4260            sc->recovery_state = BXE_RECOVERY_DONE;
4261            sc->is_leader = 0;
4262            bxe_release_leader_lock(sc);
4263            mb();
4264            BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n");
4265	}
4266        BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x"
4267            " state = 0x%x\n", sc->recovery_state, sc->state);
4268        return (-1);
4269    }
4270
4271    /*
4272     * Nothing to do during unload if previous bxe_nic_load()
4273     * did not completed successfully - all resourses are released.
4274     */
4275    if ((sc->state == BXE_STATE_CLOSED) ||
4276        (sc->state == BXE_STATE_ERROR)) {
4277        return (0);
4278    }
4279
4280    sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4281    mb();
4282
4283    /* stop tx */
4284    bxe_tx_disable(sc);
4285
4286    sc->rx_mode = BXE_RX_MODE_NONE;
4287    /* XXX set rx mode ??? */
4288
4289    if (IS_PF(sc) && !sc->grcdump_done) {
4290        /* set ALWAYS_ALIVE bit in shmem */
4291        sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4292
4293        bxe_drv_pulse(sc);
4294
4295        bxe_stats_handle(sc, STATS_EVENT_STOP);
4296        bxe_save_statistics(sc);
4297    }
4298
4299    /* wait till consumers catch up with producers in all queues */
4300    bxe_drain_tx_queues(sc);
4301
4302    /* if VF indicate to PF this function is going down (PF will delete sp
4303     * elements and clear initializations
4304     */
4305    if (IS_VF(sc)) {
4306        ; /* bxe_vfpf_close_vf(sc); */
4307    } else if (unload_mode != UNLOAD_RECOVERY) {
4308        /* if this is a normal/close unload need to clean up chip */
4309        if (!sc->grcdump_done)
4310            bxe_chip_cleanup(sc, unload_mode, keep_link);
4311    } else {
4312        /* Send the UNLOAD_REQUEST to the MCP */
4313        bxe_send_unload_req(sc, unload_mode);
4314
4315        /*
4316         * Prevent transactions to host from the functions on the
4317         * engine that doesn't reset global blocks in case of global
4318         * attention once gloabl blocks are reset and gates are opened
4319         * (the engine which leader will perform the recovery
4320         * last).
4321         */
4322        if (!CHIP_IS_E1x(sc)) {
4323            bxe_pf_disable(sc);
4324        }
4325
4326        /* disable HW interrupts */
4327        bxe_int_disable_sync(sc, TRUE);
4328
4329        /* detach interrupts */
4330        bxe_interrupt_detach(sc);
4331
4332        /* Report UNLOAD_DONE to MCP */
4333        bxe_send_unload_done(sc, FALSE);
4334    }
4335
4336    /*
4337     * At this stage no more interrupts will arrive so we may safely clean
4338     * the queue'able objects here in case they failed to get cleaned so far.
4339     */
4340    if (IS_PF(sc)) {
4341        bxe_squeeze_objects(sc);
4342    }
4343
4344    /* There should be no more pending SP commands at this stage */
4345    sc->sp_state = 0;
4346
4347    sc->port.pmf = 0;
4348
4349    bxe_free_fp_buffers(sc);
4350
4351    if (IS_PF(sc)) {
4352        bxe_free_mem(sc);
4353    }
4354
4355    bxe_free_fw_stats_mem(sc);
4356
4357    sc->state = BXE_STATE_CLOSED;
4358
4359    /*
4360     * Check if there are pending parity attentions. If there are - set
4361     * RECOVERY_IN_PROGRESS.
4362     */
4363    if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) {
4364        bxe_set_reset_in_progress(sc);
4365
4366        /* Set RESET_IS_GLOBAL if needed */
4367        if (global) {
4368            bxe_set_reset_global(sc);
4369        }
4370    }
4371
4372    /*
4373     * The last driver must disable a "close the gate" if there is no
4374     * parity attention or "process kill" pending.
4375     */
4376    if (IS_PF(sc) && !bxe_clear_pf_load(sc) &&
4377        bxe_reset_is_done(sc, SC_PATH(sc))) {
4378        bxe_disable_close_the_gate(sc);
4379    }
4380
4381    BLOGD(sc, DBG_LOAD, "Ended NIC unload\n");
4382
4383    bxe_link_report(sc);
4384
4385    return (0);
4386}
4387
4388/*
4389 * Called by the OS to set various media options (i.e. link, speed, etc.) when
4390 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...".
4391 */
4392static int
4393bxe_ifmedia_update(if_t ifp)
4394{
4395    struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp);
4396    struct ifmedia *ifm;
4397
4398    ifm = &sc->ifmedia;
4399
4400    /* We only support Ethernet media type. */
4401    if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4402        return (EINVAL);
4403    }
4404
4405    switch (IFM_SUBTYPE(ifm->ifm_media)) {
4406    case IFM_AUTO:
4407         break;
4408    case IFM_10G_CX4:
4409    case IFM_10G_SR:
4410    case IFM_10G_T:
4411    case IFM_10G_TWINAX:
4412    default:
4413        /* We don't support changing the media type. */
4414        BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n",
4415              IFM_SUBTYPE(ifm->ifm_media));
4416        return (EINVAL);
4417    }
4418
4419    return (0);
4420}
4421
4422/*
4423 * Called by the OS to get the current media status (i.e. link, speed, etc.).
4424 */
4425static void
4426bxe_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
4427{
4428    struct bxe_softc *sc = if_getsoftc(ifp);
4429
4430    /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..."
4431       line if the IFM_AVALID flag is *NOT* set. So we need to set this
4432       flag unconditionally (irrespective of the admininistrative
4433       'up/down' state of the interface) to ensure that the line is always
4434       displayed.
4435    */
4436    ifmr->ifm_status = IFM_AVALID;
4437
4438    /* Setup the default interface info. */
4439    ifmr->ifm_active = IFM_ETHER;
4440
4441    /* Report link down if the driver isn't running. */
4442    if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4443        ifmr->ifm_active |= IFM_NONE;
4444        BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__);
4445        BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n",
4446                __func__, sc->link_vars.link_up);
4447        return;
4448    }
4449
4450
4451    if (sc->link_vars.link_up) {
4452        ifmr->ifm_status |= IFM_ACTIVE;
4453        ifmr->ifm_active |= IFM_FDX;
4454    } else {
4455        ifmr->ifm_active |= IFM_NONE;
4456        BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n",
4457                __func__);
4458        return;
4459    }
4460
4461    ifmr->ifm_active |= sc->media;
4462    return;
4463}
4464
4465static void
4466bxe_handle_chip_tq(void *context,
4467                   int  pending)
4468{
4469    struct bxe_softc *sc = (struct bxe_softc *)context;
4470    long work = atomic_load_acq_long(&sc->chip_tq_flags);
4471
4472    switch (work)
4473    {
4474
4475    case CHIP_TQ_REINIT:
4476        if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4477            /* restart the interface */
4478            BLOGD(sc, DBG_LOAD, "Restarting the interface...\n");
4479            bxe_periodic_stop(sc);
4480            BXE_CORE_LOCK(sc);
4481            bxe_stop_locked(sc);
4482            bxe_init_locked(sc);
4483            BXE_CORE_UNLOCK(sc);
4484        }
4485        break;
4486
4487    default:
4488        break;
4489    }
4490}
4491
4492/*
4493 * Handles any IOCTL calls from the operating system.
4494 *
4495 * Returns:
4496 *   0 = Success, >0 Failure
4497 */
4498static int
4499bxe_ioctl(if_t ifp,
4500          u_long       command,
4501          caddr_t      data)
4502{
4503    struct bxe_softc *sc = if_getsoftc(ifp);
4504    struct ifreq *ifr = (struct ifreq *)data;
4505    int mask = 0;
4506    int reinit = 0;
4507    int error = 0;
4508
4509    int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4510    int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4511
4512    switch (command)
4513    {
4514    case SIOCSIFMTU:
4515        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n",
4516              ifr->ifr_mtu);
4517
4518        if (sc->mtu == ifr->ifr_mtu) {
4519            /* nothing to change */
4520            break;
4521        }
4522
4523        if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4524            BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4525                  ifr->ifr_mtu, mtu_min, mtu_max);
4526            error = EINVAL;
4527            break;
4528        }
4529
4530        atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4531                             (unsigned long)ifr->ifr_mtu);
4532	/*
4533        atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp),
4534                              (unsigned long)ifr->ifr_mtu);
4535	XXX - Not sure why it needs to be atomic
4536	*/
4537	if_setmtu(ifp, ifr->ifr_mtu);
4538        reinit = 1;
4539        break;
4540
4541    case SIOCSIFFLAGS:
4542        /* toggle the interface state up or down */
4543        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n");
4544
4545	BXE_CORE_LOCK(sc);
4546        /* check if the interface is up */
4547        if (if_getflags(ifp) & IFF_UP) {
4548            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4549                /* set the receive mode flags */
4550                bxe_set_rx_mode(sc);
4551            } else if(sc->state != BXE_STATE_DISABLED) {
4552		bxe_init_locked(sc);
4553            }
4554        } else {
4555            if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4556		bxe_periodic_stop(sc);
4557		bxe_stop_locked(sc);
4558            }
4559        }
4560	BXE_CORE_UNLOCK(sc);
4561
4562        break;
4563
4564    case SIOCADDMULTI:
4565    case SIOCDELMULTI:
4566        /* add/delete multicast addresses */
4567        BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n");
4568
4569        /* check if the interface is up */
4570        if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4571            /* set the receive mode flags */
4572	    BXE_CORE_LOCK(sc);
4573            bxe_set_rx_mode(sc);
4574	    BXE_CORE_UNLOCK(sc);
4575        }
4576
4577        break;
4578
4579    case SIOCSIFCAP:
4580        /* find out which capabilities have changed */
4581        mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4582
4583        BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n",
4584              mask);
4585
4586        /* toggle the LRO capabilites enable flag */
4587        if (mask & IFCAP_LRO) {
4588	    if_togglecapenable(ifp, IFCAP_LRO);
4589            BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n",
4590                  (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF");
4591            reinit = 1;
4592        }
4593
4594        /* toggle the TXCSUM checksum capabilites enable flag */
4595        if (mask & IFCAP_TXCSUM) {
4596	    if_togglecapenable(ifp, IFCAP_TXCSUM);
4597            BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n",
4598                  (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF");
4599            if (if_getcapenable(ifp) & IFCAP_TXCSUM) {
4600                if_sethwassistbits(ifp, (CSUM_IP      |
4601                                    CSUM_TCP      |
4602                                    CSUM_UDP      |
4603                                    CSUM_TSO      |
4604                                    CSUM_TCP_IPV6 |
4605                                    CSUM_UDP_IPV6), 0);
4606            } else {
4607		if_clearhwassist(ifp); /* XXX */
4608            }
4609        }
4610
4611        /* toggle the RXCSUM checksum capabilities enable flag */
4612        if (mask & IFCAP_RXCSUM) {
4613	    if_togglecapenable(ifp, IFCAP_RXCSUM);
4614            BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n",
4615                  (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF");
4616            if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
4617                if_sethwassistbits(ifp, (CSUM_IP      |
4618                                    CSUM_TCP      |
4619                                    CSUM_UDP      |
4620                                    CSUM_TSO      |
4621                                    CSUM_TCP_IPV6 |
4622                                    CSUM_UDP_IPV6), 0);
4623            } else {
4624		if_clearhwassist(ifp); /* XXX */
4625            }
4626        }
4627
4628        /* toggle TSO4 capabilities enabled flag */
4629        if (mask & IFCAP_TSO4) {
4630            if_togglecapenable(ifp, IFCAP_TSO4);
4631            BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n",
4632                  (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF");
4633        }
4634
4635        /* toggle TSO6 capabilities enabled flag */
4636        if (mask & IFCAP_TSO6) {
4637	    if_togglecapenable(ifp, IFCAP_TSO6);
4638            BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n",
4639                  (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF");
4640        }
4641
4642        /* toggle VLAN_HWTSO capabilities enabled flag */
4643        if (mask & IFCAP_VLAN_HWTSO) {
4644
4645	    if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
4646            BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n",
4647                  (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF");
4648        }
4649
4650        /* toggle VLAN_HWCSUM capabilities enabled flag */
4651        if (mask & IFCAP_VLAN_HWCSUM) {
4652            /* XXX investigate this... */
4653            BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n");
4654            error = EINVAL;
4655        }
4656
4657        /* toggle VLAN_MTU capabilities enable flag */
4658        if (mask & IFCAP_VLAN_MTU) {
4659            /* XXX investigate this... */
4660            BLOGE(sc, "Changing VLAN_MTU is not supported!\n");
4661            error = EINVAL;
4662        }
4663
4664        /* toggle VLAN_HWTAGGING capabilities enabled flag */
4665        if (mask & IFCAP_VLAN_HWTAGGING) {
4666            /* XXX investigate this... */
4667            BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n");
4668            error = EINVAL;
4669        }
4670
4671        /* toggle VLAN_HWFILTER capabilities enabled flag */
4672        if (mask & IFCAP_VLAN_HWFILTER) {
4673            /* XXX investigate this... */
4674            BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n");
4675            error = EINVAL;
4676        }
4677
4678        /* XXX not yet...
4679         * IFCAP_WOL_MAGIC
4680         */
4681
4682        break;
4683
4684    case SIOCSIFMEDIA:
4685    case SIOCGIFMEDIA:
4686        /* set/get interface media */
4687        BLOGD(sc, DBG_IOCTL,
4688              "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n",
4689              (command & 0xff));
4690        error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4691        break;
4692
4693    default:
4694        BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n",
4695              (command & 0xff));
4696        error = ether_ioctl(ifp, command, data);
4697        break;
4698    }
4699
4700    if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4701        BLOGD(sc, DBG_LOAD | DBG_IOCTL,
4702              "Re-initializing hardware from IOCTL change\n");
4703	bxe_periodic_stop(sc);
4704	BXE_CORE_LOCK(sc);
4705	bxe_stop_locked(sc);
4706	bxe_init_locked(sc);
4707	BXE_CORE_UNLOCK(sc);
4708    }
4709
4710    return (error);
4711}
4712
4713static __noinline void
4714bxe_dump_mbuf(struct bxe_softc *sc,
4715              struct mbuf      *m,
4716              uint8_t          contents)
4717{
4718    char * type;
4719    int i = 0;
4720
4721    if (!(sc->debug & DBG_MBUF)) {
4722        return;
4723    }
4724
4725    if (m == NULL) {
4726        BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n");
4727        return;
4728    }
4729
4730    while (m) {
4731
4732        BLOGD(sc, DBG_MBUF,
4733              "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n",
4734              i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4735
4736        if (m->m_flags & M_PKTHDR) {
4737             BLOGD(sc, DBG_MBUF,
4738                   "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4739                   i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4740                   (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4741        }
4742
4743        if (m->m_flags & M_EXT) {
4744            switch (m->m_ext.ext_type) {
4745            case EXT_CLUSTER:    type = "EXT_CLUSTER";    break;
4746            case EXT_SFBUF:      type = "EXT_SFBUF";      break;
4747            case EXT_JUMBOP:     type = "EXT_JUMBOP";     break;
4748            case EXT_JUMBO9:     type = "EXT_JUMBO9";     break;
4749            case EXT_JUMBO16:    type = "EXT_JUMBO16";    break;
4750            case EXT_PACKET:     type = "EXT_PACKET";     break;
4751            case EXT_MBUF:       type = "EXT_MBUF";       break;
4752            case EXT_NET_DRV:    type = "EXT_NET_DRV";    break;
4753            case EXT_MOD_TYPE:   type = "EXT_MOD_TYPE";   break;
4754            case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break;
4755            case EXT_EXTREF:     type = "EXT_EXTREF";     break;
4756            default:             type = "UNKNOWN";        break;
4757            }
4758
4759            BLOGD(sc, DBG_MBUF,
4760                  "%02d: - m_ext: %p ext_size=%d type=%s\n",
4761                  i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4762        }
4763
4764        if (contents) {
4765            bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE);
4766        }
4767
4768        m = m->m_next;
4769        i++;
4770    }
4771}
4772
4773/*
4774 * Checks to ensure the 13 bd sliding window is >= MSS for TSO.
4775 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4776 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD
4777 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4778 * Returns: 0 if OK to send, 1 if packet needs further defragmentation
4779 */
4780static int
4781bxe_chktso_window(struct bxe_softc  *sc,
4782                  int               nsegs,
4783                  bus_dma_segment_t *segs,
4784                  struct mbuf       *m)
4785{
4786    uint32_t num_wnds, wnd_size, wnd_sum;
4787    int32_t frag_idx, wnd_idx;
4788    unsigned short lso_mss;
4789
4790    wnd_sum = 0;
4791    wnd_size = 10;
4792    num_wnds = nsegs - wnd_size;
4793    lso_mss = htole16(m->m_pkthdr.tso_segsz);
4794
4795    /*
4796     * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the
4797     * first window sum of data while skipping the first assuming it is the
4798     * header in FreeBSD.
4799     */
4800    for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) {
4801        wnd_sum += htole16(segs[frag_idx].ds_len);
4802    }
4803
4804    /* check the first 10 bd window size */
4805    if (wnd_sum < lso_mss) {
4806        return (1);
4807    }
4808
4809    /* run through the windows */
4810    for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) {
4811        /* subtract the first mbuf->m_len of the last wndw(-header) */
4812        wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4813        /* add the next mbuf len to the len of our new window */
4814        wnd_sum += htole16(segs[frag_idx].ds_len);
4815        if (wnd_sum < lso_mss) {
4816            return (1);
4817        }
4818    }
4819
4820    return (0);
4821}
4822
4823static uint8_t
4824bxe_set_pbd_csum_e2(struct bxe_fastpath *fp,
4825                    struct mbuf         *m,
4826                    uint32_t            *parsing_data)
4827{
4828    struct ether_vlan_header *eh = NULL;
4829    struct ip *ip4 = NULL;
4830    struct ip6_hdr *ip6 = NULL;
4831    caddr_t ip = NULL;
4832    struct tcphdr *th = NULL;
4833    int e_hlen, ip_hlen, l4_off;
4834    uint16_t proto;
4835
4836    if (m->m_pkthdr.csum_flags == CSUM_IP) {
4837        /* no L4 checksum offload needed */
4838        return (0);
4839    }
4840
4841    /* get the Ethernet header */
4842    eh = mtod(m, struct ether_vlan_header *);
4843
4844    /* handle VLAN encapsulation if present */
4845    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4846        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4847        proto  = ntohs(eh->evl_proto);
4848    } else {
4849        e_hlen = ETHER_HDR_LEN;
4850        proto  = ntohs(eh->evl_encap_proto);
4851    }
4852
4853    switch (proto) {
4854    case ETHERTYPE_IP:
4855        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4856        ip4 = (m->m_len < sizeof(struct ip)) ?
4857                  (struct ip *)m->m_next->m_data :
4858                  (struct ip *)(m->m_data + e_hlen);
4859        /* ip_hl is number of 32-bit words */
4860        ip_hlen = (ip4->ip_hl << 2);
4861        ip = (caddr_t)ip4;
4862        break;
4863    case ETHERTYPE_IPV6:
4864        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4865        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4866                  (struct ip6_hdr *)m->m_next->m_data :
4867                  (struct ip6_hdr *)(m->m_data + e_hlen);
4868        /* XXX cannot support offload with IPv6 extensions */
4869        ip_hlen = sizeof(struct ip6_hdr);
4870        ip = (caddr_t)ip6;
4871        break;
4872    default:
4873        /* We can't offload in this case... */
4874        /* XXX error stat ??? */
4875        return (0);
4876    }
4877
4878    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4879    l4_off = (e_hlen + ip_hlen);
4880
4881    *parsing_data |=
4882        (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
4883         ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W);
4884
4885    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4886                                  CSUM_TSO |
4887                                  CSUM_TCP_IPV6)) {
4888        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4889        th = (struct tcphdr *)(ip + ip_hlen);
4890        /* th_off is number of 32-bit words */
4891        *parsing_data |= ((th->th_off <<
4892                           ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
4893                          ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW);
4894        return (l4_off + (th->th_off << 2)); /* entire header length */
4895    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4896                                         CSUM_UDP_IPV6)) {
4897        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4898        return (l4_off + sizeof(struct udphdr)); /* entire header length */
4899    } else {
4900        /* XXX error stat ??? */
4901        return (0);
4902    }
4903}
4904
4905static uint8_t
4906bxe_set_pbd_csum(struct bxe_fastpath        *fp,
4907                 struct mbuf                *m,
4908                 struct eth_tx_parse_bd_e1x *pbd)
4909{
4910    struct ether_vlan_header *eh = NULL;
4911    struct ip *ip4 = NULL;
4912    struct ip6_hdr *ip6 = NULL;
4913    caddr_t ip = NULL;
4914    struct tcphdr *th = NULL;
4915    struct udphdr *uh = NULL;
4916    int e_hlen, ip_hlen;
4917    uint16_t proto;
4918    uint8_t hlen;
4919    uint16_t tmp_csum;
4920    uint32_t *tmp_uh;
4921
4922    /* get the Ethernet header */
4923    eh = mtod(m, struct ether_vlan_header *);
4924
4925    /* handle VLAN encapsulation if present */
4926    if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4927        e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
4928        proto  = ntohs(eh->evl_proto);
4929    } else {
4930        e_hlen = ETHER_HDR_LEN;
4931        proto  = ntohs(eh->evl_encap_proto);
4932    }
4933
4934    switch (proto) {
4935    case ETHERTYPE_IP:
4936        /* get the IP header, if mbuf len < 20 then header in next mbuf */
4937        ip4 = (m->m_len < sizeof(struct ip)) ?
4938                  (struct ip *)m->m_next->m_data :
4939                  (struct ip *)(m->m_data + e_hlen);
4940        /* ip_hl is number of 32-bit words */
4941        ip_hlen = (ip4->ip_hl << 1);
4942        ip = (caddr_t)ip4;
4943        break;
4944    case ETHERTYPE_IPV6:
4945        /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */
4946        ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4947                  (struct ip6_hdr *)m->m_next->m_data :
4948                  (struct ip6_hdr *)(m->m_data + e_hlen);
4949        /* XXX cannot support offload with IPv6 extensions */
4950        ip_hlen = (sizeof(struct ip6_hdr) >> 1);
4951        ip = (caddr_t)ip6;
4952        break;
4953    default:
4954        /* We can't offload in this case... */
4955        /* XXX error stat ??? */
4956        return (0);
4957    }
4958
4959    hlen = (e_hlen >> 1);
4960
4961    /* note that rest of global_data is indirectly zeroed here */
4962    if (m->m_flags & M_VLANTAG) {
4963        pbd->global_data =
4964            htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
4965    } else {
4966        pbd->global_data = htole16(hlen);
4967    }
4968
4969    pbd->ip_hlen_w = ip_hlen;
4970
4971    hlen += pbd->ip_hlen_w;
4972
4973    /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */
4974
4975    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4976                                  CSUM_TSO |
4977                                  CSUM_TCP_IPV6)) {
4978        th = (struct tcphdr *)(ip + (ip_hlen << 1));
4979        /* th_off is number of 32-bit words */
4980        hlen += (uint16_t)(th->th_off << 1);
4981    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4982                                         CSUM_UDP_IPV6)) {
4983        uh = (struct udphdr *)(ip + (ip_hlen << 1));
4984        hlen += (sizeof(struct udphdr) / 2);
4985    } else {
4986        /* valid case as only CSUM_IP was set */
4987        return (0);
4988    }
4989
4990    pbd->total_hlen_w = htole16(hlen);
4991
4992    if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4993                                  CSUM_TSO |
4994                                  CSUM_TCP_IPV6)) {
4995        fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4996        pbd->tcp_pseudo_csum = ntohs(th->th_sum);
4997    } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4998                                         CSUM_UDP_IPV6)) {
4999        fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5000
5001        /*
5002         * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP
5003         * checksums and does not know anything about the UDP header and where
5004         * the checksum field is located. It only knows about TCP. Therefore
5005         * we "lie" to the hardware for outgoing UDP packets w/ checksum
5006         * offload. Since the checksum field offset for TCP is 16 bytes and
5007         * for UDP it is 6 bytes we pass a pointer to the hardware that is 10
5008         * bytes less than the start of the UDP header. This allows the
5009         * hardware to write the checksum in the correct spot. But the
5010         * hardware will compute a checksum which includes the last 10 bytes
5011         * of the IP header. To correct this we tweak the stack computed
5012         * pseudo checksum by folding in the calculation of the inverse
5013         * checksum for those final 10 bytes of the IP header. This allows
5014         * the correct checksum to be computed by the hardware.
5015         */
5016
5017        /* set pointer 10 bytes before UDP header */
5018        tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5019
5020        /* calculate a pseudo header checksum over the first 10 bytes */
5021        tmp_csum = in_pseudo(*tmp_uh,
5022                             *(tmp_uh + 1),
5023                             *(uint16_t *)(tmp_uh + 2));
5024
5025        pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5026    }
5027
5028    return (hlen * 2); /* entire header length, number of bytes */
5029}
5030
5031static void
5032bxe_set_pbd_lso_e2(struct mbuf *m,
5033                   uint32_t    *parsing_data)
5034{
5035    *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5036                       ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
5037                      ETH_TX_PARSE_BD_E2_LSO_MSS);
5038
5039    /* XXX test for IPv6 with extension header... */
5040}
5041
5042static void
5043bxe_set_pbd_lso(struct mbuf                *m,
5044                struct eth_tx_parse_bd_e1x *pbd)
5045{
5046    struct ether_vlan_header *eh = NULL;
5047    struct ip *ip = NULL;
5048    struct tcphdr *th = NULL;
5049    int e_hlen;
5050
5051    /* get the Ethernet header */
5052    eh = mtod(m, struct ether_vlan_header *);
5053
5054    /* handle VLAN encapsulation if present */
5055    e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5056                 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN;
5057
5058    /* get the IP and TCP header, with LSO entire header in first mbuf */
5059    /* XXX assuming IPv4 */
5060    ip = (struct ip *)(m->m_data + e_hlen);
5061    th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5062
5063    pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5064    pbd->tcp_send_seq = ntohl(th->th_seq);
5065    pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5066
5067#if 1
5068        /* XXX IPv4 */
5069        pbd->ip_id = ntohs(ip->ip_id);
5070        pbd->tcp_pseudo_csum =
5071            ntohs(in_pseudo(ip->ip_src.s_addr,
5072                            ip->ip_dst.s_addr,
5073                            htons(IPPROTO_TCP)));
5074#else
5075        /* XXX IPv6 */
5076        pbd->tcp_pseudo_csum =
5077            ntohs(in_pseudo(&ip6->ip6_src,
5078                            &ip6->ip6_dst,
5079                            htons(IPPROTO_TCP)));
5080#endif
5081
5082    pbd->global_data |=
5083        htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
5084}
5085
5086/*
5087 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
5088 * visible to the controller.
5089 *
5090 * If an mbuf is submitted to this routine and cannot be given to the
5091 * controller (e.g. it has too many fragments) then the function may free
5092 * the mbuf and return to the caller.
5093 *
5094 * Returns:
5095 *   0 = Success, !0 = Failure
5096 *   Note the side effect that an mbuf may be freed if it causes a problem.
5097 */
5098static int
5099bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
5100{
5101    bus_dma_segment_t segs[32];
5102    struct mbuf *m0;
5103    struct bxe_sw_tx_bd *tx_buf;
5104    struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
5105    struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
5106    /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */
5107    struct eth_tx_bd *tx_data_bd;
5108    struct eth_tx_bd *tx_total_pkt_size_bd;
5109    struct eth_tx_start_bd *tx_start_bd;
5110    uint16_t bd_prod, pkt_prod, total_pkt_size;
5111    uint8_t mac_type;
5112    int defragged, error, nsegs, rc, nbds, vlan_off, ovlan;
5113    struct bxe_softc *sc;
5114    uint16_t tx_bd_avail;
5115    struct ether_vlan_header *eh;
5116    uint32_t pbd_e2_parsing_data = 0;
5117    uint8_t hlen = 0;
5118    int tmp_bd;
5119    int i;
5120
5121    sc = fp->sc;
5122
5123    M_ASSERTPKTHDR(*m_head);
5124
5125    m0 = *m_head;
5126    rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0;
5127    tx_start_bd = NULL;
5128    tx_data_bd = NULL;
5129    tx_total_pkt_size_bd = NULL;
5130
5131    /* get the H/W pointer for packets and BDs */
5132    pkt_prod = fp->tx_pkt_prod;
5133    bd_prod = fp->tx_bd_prod;
5134
5135    mac_type = UNICAST_ADDRESS;
5136
5137    /* map the mbuf into the next open DMAable memory */
5138    tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5139    error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5140                                    tx_buf->m_map, m0,
5141                                    segs, &nsegs, BUS_DMA_NOWAIT);
5142
5143    /* mapping errors */
5144    if(__predict_false(error != 0)) {
5145        fp->eth_q_stats.tx_dma_mapping_failure++;
5146        if (error == ENOMEM) {
5147            /* resource issue, try again later */
5148            rc = ENOMEM;
5149        } else if (error == EFBIG) {
5150            /* possibly recoverable with defragmentation */
5151            fp->eth_q_stats.mbuf_defrag_attempts++;
5152            m0 = m_defrag(*m_head, M_NOWAIT);
5153            if (m0 == NULL) {
5154                fp->eth_q_stats.mbuf_defrag_failures++;
5155                rc = ENOBUFS;
5156            } else {
5157                /* defrag successful, try mapping again */
5158                *m_head = m0;
5159                error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5160                                                tx_buf->m_map, m0,
5161                                                segs, &nsegs, BUS_DMA_NOWAIT);
5162                if (error) {
5163                    fp->eth_q_stats.tx_dma_mapping_failure++;
5164                    rc = error;
5165                }
5166            }
5167        } else {
5168            /* unknown, unrecoverable mapping error */
5169            BLOGE(sc, "Unknown TX mapping error rc=%d\n", error);
5170            bxe_dump_mbuf(sc, m0, FALSE);
5171            rc = error;
5172        }
5173
5174        goto bxe_tx_encap_continue;
5175    }
5176
5177    tx_bd_avail = bxe_tx_avail(sc, fp);
5178
5179    /* make sure there is enough room in the send queue */
5180    if (__predict_false(tx_bd_avail < (nsegs + 2))) {
5181        /* Recoverable, try again later. */
5182        fp->eth_q_stats.tx_hw_queue_full++;
5183        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5184        rc = ENOMEM;
5185        goto bxe_tx_encap_continue;
5186    }
5187
5188    /* capture the current H/W TX chain high watermark */
5189    if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5190                        (TX_BD_USABLE - tx_bd_avail))) {
5191        fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5192    }
5193
5194    /* make sure it fits in the packet window */
5195    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5196        /*
5197         * The mbuf may be to big for the controller to handle. If the frame
5198         * is a TSO frame we'll need to do an additional check.
5199         */
5200        if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5201            if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) {
5202                goto bxe_tx_encap_continue; /* OK to send */
5203            } else {
5204                fp->eth_q_stats.tx_window_violation_tso++;
5205            }
5206        } else {
5207            fp->eth_q_stats.tx_window_violation_std++;
5208        }
5209
5210        /* lets try to defragment this mbuf and remap it */
5211        fp->eth_q_stats.mbuf_defrag_attempts++;
5212        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5213
5214        m0 = m_defrag(*m_head, M_NOWAIT);
5215        if (m0 == NULL) {
5216            fp->eth_q_stats.mbuf_defrag_failures++;
5217            /* Ugh, just drop the frame... :( */
5218            rc = ENOBUFS;
5219        } else {
5220            /* defrag successful, try mapping again */
5221            *m_head = m0;
5222            error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5223                                            tx_buf->m_map, m0,
5224                                            segs, &nsegs, BUS_DMA_NOWAIT);
5225            if (error) {
5226                fp->eth_q_stats.tx_dma_mapping_failure++;
5227                /* No sense in trying to defrag/copy chain, drop it. :( */
5228                rc = error;
5229            } else {
5230               /* if the chain is still too long then drop it */
5231                if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5232                    /*
5233                     * in case TSO is enabled nsegs should be checked against
5234                     * BXE_TSO_MAX_SEGMENTS
5235                     */
5236                    if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) {
5237                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5238                        fp->eth_q_stats.nsegs_path1_errors++;
5239                        rc = ENODEV;
5240                    }
5241                } else {
5242                    if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) {
5243                        bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5244                        fp->eth_q_stats.nsegs_path2_errors++;
5245                        rc = ENODEV;
5246                    }
5247                }
5248            }
5249        }
5250    }
5251
5252bxe_tx_encap_continue:
5253
5254    /* Check for errors */
5255    if (rc) {
5256        if (rc == ENOMEM) {
5257            /* recoverable try again later  */
5258        } else {
5259            fp->eth_q_stats.tx_soft_errors++;
5260            fp->eth_q_stats.mbuf_alloc_tx--;
5261            m_freem(*m_head);
5262            *m_head = NULL;
5263        }
5264
5265        return (rc);
5266    }
5267
5268    /* set flag according to packet type (UNICAST_ADDRESS is default) */
5269    if (m0->m_flags & M_BCAST) {
5270        mac_type = BROADCAST_ADDRESS;
5271    } else if (m0->m_flags & M_MCAST) {
5272        mac_type = MULTICAST_ADDRESS;
5273    }
5274
5275    /* store the mbuf into the mbuf ring */
5276    tx_buf->m        = m0;
5277    tx_buf->first_bd = fp->tx_bd_prod;
5278    tx_buf->flags    = 0;
5279
5280    /* prepare the first transmit (start) BD for the mbuf */
5281    tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5282
5283    BLOGD(sc, DBG_TX,
5284          "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n",
5285          pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5286
5287    tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5288    tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5289    tx_start_bd->nbytes  = htole16(segs[0].ds_len);
5290    total_pkt_size += tx_start_bd->nbytes;
5291    tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5292
5293    tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5294
5295    /* all frames have at least Start BD + Parsing BD */
5296    nbds = nsegs + 1;
5297    tx_start_bd->nbd = htole16(nbds);
5298
5299    if (m0->m_flags & M_VLANTAG) {
5300        tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5301        tx_start_bd->bd_flags.as_bitfield |=
5302            (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
5303    } else {
5304        /* vf tx, start bd must hold the ethertype for fw to enforce it */
5305        if (IS_VF(sc)) {
5306            /* map ethernet header to find type and header length */
5307            eh = mtod(m0, struct ether_vlan_header *);
5308            tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5309        } else {
5310            /* used by FW for packet accounting */
5311            tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5312        }
5313    }
5314
5315    /*
5316     * add a parsing BD from the chain. The parsing BD is always added
5317     * though it is only used for TSO and chksum
5318     */
5319    bd_prod = TX_BD_NEXT(bd_prod);
5320
5321    if (m0->m_pkthdr.csum_flags) {
5322        if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5323            fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5324            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5325        }
5326
5327        if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5328            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5329                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5330        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5331            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6   |
5332                                                  ETH_TX_BD_FLAGS_IS_UDP |
5333                                                  ETH_TX_BD_FLAGS_L4_CSUM);
5334        } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5335                   (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5336            tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5337        } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5338            tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5339                                                  ETH_TX_BD_FLAGS_IS_UDP);
5340        }
5341    }
5342
5343    if (!CHIP_IS_E1x(sc)) {
5344        pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5345        memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
5346
5347        if (m0->m_pkthdr.csum_flags) {
5348            hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data);
5349        }
5350
5351        SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE,
5352                 mac_type);
5353    } else {
5354        uint16_t global_data = 0;
5355
5356        pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5357        memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
5358
5359        if (m0->m_pkthdr.csum_flags) {
5360            hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x);
5361        }
5362
5363        SET_FLAG(global_data,
5364                 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
5365        pbd_e1x->global_data |= htole16(global_data);
5366    }
5367
5368    /* setup the parsing BD with TSO specific info */
5369    if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5370        fp->eth_q_stats.tx_ofld_frames_lso++;
5371        tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5372
5373        if (__predict_false(tx_start_bd->nbytes > hlen)) {
5374            fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5375
5376            /* split the first BD into header/data making the fw job easy */
5377            nbds++;
5378            tx_start_bd->nbd = htole16(nbds);
5379            tx_start_bd->nbytes = htole16(hlen);
5380
5381            bd_prod = TX_BD_NEXT(bd_prod);
5382
5383            /* new transmit BD after the tx_parse_bd */
5384            tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5385            tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5386            tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5387            tx_data_bd->nbytes  = htole16(segs[0].ds_len - hlen);
5388            if (tx_total_pkt_size_bd == NULL) {
5389                tx_total_pkt_size_bd = tx_data_bd;
5390            }
5391
5392            BLOGD(sc, DBG_TX,
5393                  "TSO split header size is %d (%x:%x) nbds %d\n",
5394                  le16toh(tx_start_bd->nbytes),
5395                  le32toh(tx_start_bd->addr_hi),
5396                  le32toh(tx_start_bd->addr_lo),
5397                  nbds);
5398        }
5399
5400        if (!CHIP_IS_E1x(sc)) {
5401            bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data);
5402        } else {
5403            bxe_set_pbd_lso(m0, pbd_e1x);
5404        }
5405    }
5406
5407    if (pbd_e2_parsing_data) {
5408        pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5409    }
5410
5411    /* prepare remaining BDs, start tx bd contains first seg/frag */
5412    for (i = 1; i < nsegs ; i++) {
5413        bd_prod = TX_BD_NEXT(bd_prod);
5414        tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5415        tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5416        tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5417        tx_data_bd->nbytes  = htole16(segs[i].ds_len);
5418        if (tx_total_pkt_size_bd == NULL) {
5419            tx_total_pkt_size_bd = tx_data_bd;
5420        }
5421        total_pkt_size += tx_data_bd->nbytes;
5422    }
5423
5424    BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd);
5425
5426    if (tx_total_pkt_size_bd != NULL) {
5427        tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5428    }
5429
5430    if (__predict_false(sc->debug & DBG_TX)) {
5431        tmp_bd = tx_buf->first_bd;
5432        for (i = 0; i < nbds; i++)
5433        {
5434            if (i == 0) {
5435                BLOGD(sc, DBG_TX,
5436                      "TX Strt: %p bd=%d nbd=%d vlan=0x%x "
5437                      "bd_flags=0x%x hdr_nbds=%d\n",
5438                      tx_start_bd,
5439                      tmp_bd,
5440                      le16toh(tx_start_bd->nbd),
5441                      le16toh(tx_start_bd->vlan_or_ethertype),
5442                      tx_start_bd->bd_flags.as_bitfield,
5443                      (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5444            } else if (i == 1) {
5445                if (pbd_e1x) {
5446                    BLOGD(sc, DBG_TX,
5447                          "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5448                          "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x "
5449                          "tcp_seq=%u total_hlen_w=%u\n",
5450                          pbd_e1x,
5451                          tmp_bd,
5452                          pbd_e1x->global_data,
5453                          pbd_e1x->ip_hlen_w,
5454                          pbd_e1x->ip_id,
5455                          pbd_e1x->lso_mss,
5456                          pbd_e1x->tcp_flags,
5457                          pbd_e1x->tcp_pseudo_csum,
5458                          pbd_e1x->tcp_send_seq,
5459                          le16toh(pbd_e1x->total_hlen_w));
5460                } else { /* if (pbd_e2) */
5461                    BLOGD(sc, DBG_TX,
5462                          "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5463                          "src=%02x:%02x:%02x parsing_data=0x%x\n",
5464                          pbd_e2,
5465                          tmp_bd,
5466                          pbd_e2->data.mac_addr.dst_hi,
5467                          pbd_e2->data.mac_addr.dst_mid,
5468                          pbd_e2->data.mac_addr.dst_lo,
5469                          pbd_e2->data.mac_addr.src_hi,
5470                          pbd_e2->data.mac_addr.src_mid,
5471                          pbd_e2->data.mac_addr.src_lo,
5472                          pbd_e2->parsing_data);
5473                }
5474            }
5475
5476            if (i != 1) { /* skip parse db as it doesn't hold data */
5477                tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5478                BLOGD(sc, DBG_TX,
5479                      "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5480                      tx_data_bd,
5481                      tmp_bd,
5482                      le16toh(tx_data_bd->nbytes),
5483                      le32toh(tx_data_bd->addr_hi),
5484                      le32toh(tx_data_bd->addr_lo));
5485            }
5486
5487            tmp_bd = TX_BD_NEXT(tmp_bd);
5488        }
5489    }
5490
5491    BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod);
5492
5493    /* update TX BD producer index value for next TX */
5494    bd_prod = TX_BD_NEXT(bd_prod);
5495
5496    /*
5497     * If the chain of tx_bd's describing this frame is adjacent to or spans
5498     * an eth_tx_next_bd element then we need to increment the nbds value.
5499     */
5500    if (TX_BD_IDX(bd_prod) < nbds) {
5501        nbds++;
5502    }
5503
5504    /* don't allow reordering of writes for nbd and packets */
5505    mb();
5506
5507    fp->tx_db.data.prod += nbds;
5508
5509    /* producer points to the next free tx_bd at this point */
5510    fp->tx_pkt_prod++;
5511    fp->tx_bd_prod = bd_prod;
5512
5513    DOORBELL(sc, fp->index, fp->tx_db.raw);
5514
5515    fp->eth_q_stats.tx_pkts++;
5516
5517    /* Prevent speculative reads from getting ahead of the status block. */
5518    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5519                      0, 0, BUS_SPACE_BARRIER_READ);
5520
5521    /* Prevent speculative reads from getting ahead of the doorbell. */
5522    bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5523                      0, 0, BUS_SPACE_BARRIER_READ);
5524
5525    return (0);
5526}
5527
5528static void
5529bxe_tx_start_locked(struct bxe_softc *sc,
5530                    if_t ifp,
5531                    struct bxe_fastpath *fp)
5532{
5533    struct mbuf *m = NULL;
5534    int tx_count = 0;
5535    uint16_t tx_bd_avail;
5536
5537    BXE_FP_TX_LOCK_ASSERT(fp);
5538
5539    /* keep adding entries while there are frames to send */
5540    while (!if_sendq_empty(ifp)) {
5541
5542        /*
5543         * check for any frames to send
5544         * dequeue can still be NULL even if queue is not empty
5545         */
5546        m = if_dequeue(ifp);
5547        if (__predict_false(m == NULL)) {
5548            break;
5549        }
5550
5551        /* the mbuf now belongs to us */
5552        fp->eth_q_stats.mbuf_alloc_tx++;
5553
5554        /*
5555         * Put the frame into the transmit ring. If we don't have room,
5556         * place the mbuf back at the head of the TX queue, set the
5557         * OACTIVE flag, and wait for the NIC to drain the chain.
5558         */
5559        if (__predict_false(bxe_tx_encap(fp, &m))) {
5560            fp->eth_q_stats.tx_encap_failures++;
5561            if (m != NULL) {
5562                /* mark the TX queue as full and return the frame */
5563                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5564		if_sendq_prepend(ifp, m);
5565                fp->eth_q_stats.mbuf_alloc_tx--;
5566                fp->eth_q_stats.tx_queue_xoff++;
5567            }
5568
5569            /* stop looking for more work */
5570            break;
5571        }
5572
5573        /* the frame was enqueued successfully */
5574        tx_count++;
5575
5576        /* send a copy of the frame to any BPF listeners. */
5577        ether_bpf_mtap_if(ifp, m);
5578
5579        tx_bd_avail = bxe_tx_avail(sc, fp);
5580
5581        /* handle any completions if we're running low */
5582        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5583            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5584            bxe_txeof(sc, fp);
5585            if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5586                break;
5587            }
5588        }
5589    }
5590
5591    /* all TX packets were dequeued and/or the tx ring is full */
5592    if (tx_count > 0) {
5593        /* reset the TX watchdog timeout timer */
5594        fp->watchdog_timer = BXE_TX_TIMEOUT;
5595    }
5596}
5597
5598/* Legacy (non-RSS) dispatch routine */
5599static void
5600bxe_tx_start(if_t ifp)
5601{
5602    struct bxe_softc *sc;
5603    struct bxe_fastpath *fp;
5604
5605    sc = if_getsoftc(ifp);
5606
5607    if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5608        BLOGW(sc, "Interface not running, ignoring transmit request\n");
5609        return;
5610    }
5611
5612    if (!sc->link_vars.link_up) {
5613        BLOGW(sc, "Interface link is down, ignoring transmit request\n");
5614        return;
5615    }
5616
5617    fp = &sc->fp[0];
5618
5619    if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
5620        fp->eth_q_stats.tx_queue_full_return++;
5621        return;
5622    }
5623
5624    BXE_FP_TX_LOCK(fp);
5625    bxe_tx_start_locked(sc, ifp, fp);
5626    BXE_FP_TX_UNLOCK(fp);
5627}
5628
5629static int
5630bxe_tx_mq_start_locked(struct bxe_softc    *sc,
5631                       if_t                ifp,
5632                       struct bxe_fastpath *fp,
5633                       struct mbuf         *m)
5634{
5635    struct buf_ring *tx_br = fp->tx_br;
5636    struct mbuf *next;
5637    int depth, rc, tx_count;
5638    uint16_t tx_bd_avail;
5639
5640    rc = tx_count = 0;
5641
5642    BXE_FP_TX_LOCK_ASSERT(fp);
5643
5644    if (sc->state != BXE_STATE_OPEN)  {
5645        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5646        return ENETDOWN;
5647    }
5648
5649    if (!tx_br) {
5650        BLOGE(sc, "Multiqueue TX and no buf_ring!\n");
5651        return (EINVAL);
5652    }
5653
5654    if (m != NULL) {
5655        rc = drbr_enqueue(ifp, tx_br, m);
5656        if (rc != 0) {
5657            fp->eth_q_stats.tx_soft_errors++;
5658            goto bxe_tx_mq_start_locked_exit;
5659        }
5660    }
5661
5662    if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5663        fp->eth_q_stats.tx_request_link_down_failures++;
5664        goto bxe_tx_mq_start_locked_exit;
5665    }
5666
5667    /* fetch the depth of the driver queue */
5668    depth = drbr_inuse(ifp, tx_br);
5669    if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5670        fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5671    }
5672
5673    /* keep adding entries while there are frames to send */
5674    while ((next = drbr_peek(ifp, tx_br)) != NULL) {
5675        /* handle any completions if we're running low */
5676        tx_bd_avail = bxe_tx_avail(sc, fp);
5677        if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) {
5678            /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */
5679            bxe_txeof(sc, fp);
5680            tx_bd_avail = bxe_tx_avail(sc, fp);
5681            if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) {
5682                fp->eth_q_stats.bd_avail_too_less_failures++;
5683                m_freem(next);
5684                drbr_advance(ifp, tx_br);
5685                rc = ENOBUFS;
5686                break;
5687            }
5688        }
5689
5690        /* the mbuf now belongs to us */
5691        fp->eth_q_stats.mbuf_alloc_tx++;
5692
5693        /*
5694         * Put the frame into the transmit ring. If we don't have room,
5695         * place the mbuf back at the head of the TX queue, set the
5696         * OACTIVE flag, and wait for the NIC to drain the chain.
5697         */
5698        rc = bxe_tx_encap(fp, &next);
5699        if (__predict_false(rc != 0)) {
5700            fp->eth_q_stats.tx_encap_failures++;
5701            if (next != NULL) {
5702                /* mark the TX queue as full and save the frame */
5703                if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5704                drbr_putback(ifp, tx_br, next);
5705                fp->eth_q_stats.mbuf_alloc_tx--;
5706                fp->eth_q_stats.tx_frames_deferred++;
5707            } else
5708                drbr_advance(ifp, tx_br);
5709
5710            /* stop looking for more work */
5711            break;
5712        }
5713
5714        /* the transmit frame was enqueued successfully */
5715        tx_count++;
5716
5717        /* send a copy of the frame to any BPF listeners */
5718        ether_bpf_mtap_if(ifp, next);
5719
5720        drbr_advance(ifp, tx_br);
5721    }
5722
5723    /* all TX packets were dequeued and/or the tx ring is full */
5724    if (tx_count > 0) {
5725        /* reset the TX watchdog timeout timer */
5726        fp->watchdog_timer = BXE_TX_TIMEOUT;
5727    }
5728
5729bxe_tx_mq_start_locked_exit:
5730    /* If we didn't drain the drbr, enqueue a task in the future to do it. */
5731    if (!drbr_empty(ifp, tx_br)) {
5732        fp->eth_q_stats.tx_mq_not_empty++;
5733        taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5734    }
5735
5736    return (rc);
5737}
5738
5739static void
5740bxe_tx_mq_start_deferred(void *arg,
5741                         int pending)
5742{
5743    struct bxe_fastpath *fp = (struct bxe_fastpath *)arg;
5744    struct bxe_softc *sc = fp->sc;
5745    if_t ifp = sc->ifp;
5746
5747    BXE_FP_TX_LOCK(fp);
5748    bxe_tx_mq_start_locked(sc, ifp, fp, NULL);
5749    BXE_FP_TX_UNLOCK(fp);
5750}
5751
5752/* Multiqueue (TSS) dispatch routine. */
5753static int
5754bxe_tx_mq_start(if_t ifp,
5755                struct mbuf  *m)
5756{
5757    struct bxe_softc *sc = if_getsoftc(ifp);
5758    struct bxe_fastpath *fp;
5759    int fp_index, rc;
5760
5761    fp_index = 0; /* default is the first queue */
5762
5763    /* check if flowid is set */
5764
5765    if (BXE_VALID_FLOWID(m))
5766        fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5767
5768    fp = &sc->fp[fp_index];
5769
5770    if (sc->state != BXE_STATE_OPEN)  {
5771        fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5772        return ENETDOWN;
5773    }
5774
5775    if (BXE_FP_TX_TRYLOCK(fp)) {
5776        rc = bxe_tx_mq_start_locked(sc, ifp, fp, m);
5777        BXE_FP_TX_UNLOCK(fp);
5778    } else {
5779        rc = drbr_enqueue(ifp, fp->tx_br, m);
5780        taskqueue_enqueue(fp->tq, &fp->tx_task);
5781    }
5782
5783    return (rc);
5784}
5785
5786static void
5787bxe_mq_flush(if_t ifp)
5788{
5789    struct bxe_softc *sc = if_getsoftc(ifp);
5790    struct bxe_fastpath *fp;
5791    struct mbuf *m;
5792    int i;
5793
5794    for (i = 0; i < sc->num_queues; i++) {
5795        fp = &sc->fp[i];
5796
5797        if (fp->state != BXE_FP_STATE_IRQ) {
5798            BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n",
5799                  fp->index, fp->state);
5800            continue;
5801        }
5802
5803        if (fp->tx_br != NULL) {
5804            BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5805            BXE_FP_TX_LOCK(fp);
5806            while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5807                m_freem(m);
5808            }
5809            BXE_FP_TX_UNLOCK(fp);
5810        }
5811    }
5812
5813    if_qflush(ifp);
5814}
5815
5816static uint16_t
5817bxe_cid_ilt_lines(struct bxe_softc *sc)
5818{
5819    if (IS_SRIOV(sc)) {
5820        return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS);
5821    }
5822    return (L2_ILT_LINES(sc));
5823}
5824
5825static void
5826bxe_ilt_set_info(struct bxe_softc *sc)
5827{
5828    struct ilt_client_info *ilt_client;
5829    struct ecore_ilt *ilt = sc->ilt;
5830    uint16_t line = 0;
5831
5832    ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5833    BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5834
5835    /* CDU */
5836    ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5837    ilt_client->client_num = ILT_CLIENT_CDU;
5838    ilt_client->page_size = CDU_ILT_PAGE_SZ;
5839    ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5840    ilt_client->start = line;
5841    line += bxe_cid_ilt_lines(sc);
5842
5843    if (CNIC_SUPPORT(sc)) {
5844        line += CNIC_ILT_LINES;
5845    }
5846
5847    ilt_client->end = (line - 1);
5848
5849    BLOGD(sc, DBG_LOAD,
5850          "ilt client[CDU]: start %d, end %d, "
5851          "psz 0x%x, flags 0x%x, hw psz %d\n",
5852          ilt_client->start, ilt_client->end,
5853          ilt_client->page_size,
5854          ilt_client->flags,
5855          ilog2(ilt_client->page_size >> 12));
5856
5857    /* QM */
5858    if (QM_INIT(sc->qm_cid_count)) {
5859        ilt_client = &ilt->clients[ILT_CLIENT_QM];
5860        ilt_client->client_num = ILT_CLIENT_QM;
5861        ilt_client->page_size = QM_ILT_PAGE_SZ;
5862        ilt_client->flags = 0;
5863        ilt_client->start = line;
5864
5865        /* 4 bytes for each cid */
5866        line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5867                             QM_ILT_PAGE_SZ);
5868
5869        ilt_client->end = (line - 1);
5870
5871        BLOGD(sc, DBG_LOAD,
5872              "ilt client[QM]: start %d, end %d, "
5873              "psz 0x%x, flags 0x%x, hw psz %d\n",
5874              ilt_client->start, ilt_client->end,
5875              ilt_client->page_size, ilt_client->flags,
5876              ilog2(ilt_client->page_size >> 12));
5877    }
5878
5879    if (CNIC_SUPPORT(sc)) {
5880        /* SRC */
5881        ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5882        ilt_client->client_num = ILT_CLIENT_SRC;
5883        ilt_client->page_size = SRC_ILT_PAGE_SZ;
5884        ilt_client->flags = 0;
5885        ilt_client->start = line;
5886        line += SRC_ILT_LINES;
5887        ilt_client->end = (line - 1);
5888
5889        BLOGD(sc, DBG_LOAD,
5890              "ilt client[SRC]: start %d, end %d, "
5891              "psz 0x%x, flags 0x%x, hw psz %d\n",
5892              ilt_client->start, ilt_client->end,
5893              ilt_client->page_size, ilt_client->flags,
5894              ilog2(ilt_client->page_size >> 12));
5895
5896        /* TM */
5897        ilt_client = &ilt->clients[ILT_CLIENT_TM];
5898        ilt_client->client_num = ILT_CLIENT_TM;
5899        ilt_client->page_size = TM_ILT_PAGE_SZ;
5900        ilt_client->flags = 0;
5901        ilt_client->start = line;
5902        line += TM_ILT_LINES;
5903        ilt_client->end = (line - 1);
5904
5905        BLOGD(sc, DBG_LOAD,
5906              "ilt client[TM]: start %d, end %d, "
5907              "psz 0x%x, flags 0x%x, hw psz %d\n",
5908              ilt_client->start, ilt_client->end,
5909              ilt_client->page_size, ilt_client->flags,
5910              ilog2(ilt_client->page_size >> 12));
5911    }
5912
5913    KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!"));
5914}
5915
5916static void
5917bxe_set_fp_rx_buf_size(struct bxe_softc *sc)
5918{
5919    int i;
5920    uint32_t rx_buf_size;
5921
5922    rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5923
5924    for (i = 0; i < sc->num_queues; i++) {
5925        if(rx_buf_size <= MCLBYTES){
5926            sc->fp[i].rx_buf_size = rx_buf_size;
5927            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5928        }else if (rx_buf_size <= MJUMPAGESIZE){
5929            sc->fp[i].rx_buf_size = rx_buf_size;
5930            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5931        }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){
5932            sc->fp[i].rx_buf_size = MCLBYTES;
5933            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5934        }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){
5935            sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5936            sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5937        }else {
5938            sc->fp[i].rx_buf_size = MCLBYTES;
5939            sc->fp[i].mbuf_alloc_size = MCLBYTES;
5940        }
5941    }
5942}
5943
5944static int
5945bxe_alloc_ilt_mem(struct bxe_softc *sc)
5946{
5947    int rc = 0;
5948
5949    if ((sc->ilt =
5950         (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt),
5951                                    M_BXE_ILT,
5952                                    (M_NOWAIT | M_ZERO))) == NULL) {
5953        rc = 1;
5954    }
5955
5956    return (rc);
5957}
5958
5959static int
5960bxe_alloc_ilt_lines_mem(struct bxe_softc *sc)
5961{
5962    int rc = 0;
5963
5964    if ((sc->ilt->lines =
5965         (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES),
5966                                    M_BXE_ILT,
5967                                    (M_NOWAIT | M_ZERO))) == NULL) {
5968        rc = 1;
5969    }
5970
5971    return (rc);
5972}
5973
5974static void
5975bxe_free_ilt_mem(struct bxe_softc *sc)
5976{
5977    if (sc->ilt != NULL) {
5978        free(sc->ilt, M_BXE_ILT);
5979        sc->ilt = NULL;
5980    }
5981}
5982
5983static void
5984bxe_free_ilt_lines_mem(struct bxe_softc *sc)
5985{
5986    if (sc->ilt->lines != NULL) {
5987        free(sc->ilt->lines, M_BXE_ILT);
5988        sc->ilt->lines = NULL;
5989    }
5990}
5991
5992static void
5993bxe_free_mem(struct bxe_softc *sc)
5994{
5995    int i;
5996
5997    for (i = 0; i < L2_ILT_LINES(sc); i++) {
5998        bxe_dma_free(sc, &sc->context[i].vcxt_dma);
5999        sc->context[i].vcxt = NULL;
6000        sc->context[i].size = 0;
6001    }
6002
6003    ecore_ilt_mem_op(sc, ILT_MEMOP_FREE);
6004
6005    bxe_free_ilt_lines_mem(sc);
6006
6007}
6008
6009static int
6010bxe_alloc_mem(struct bxe_softc *sc)
6011{
6012
6013    int context_size;
6014    int allocated;
6015    int i;
6016
6017    /*
6018     * Allocate memory for CDU context:
6019     * This memory is allocated separately and not in the generic ILT
6020     * functions because CDU differs in few aspects:
6021     * 1. There can be multiple entities allocating memory for context -
6022     * regular L2, CNIC, and SRIOV drivers. Each separately controls
6023     * its own ILT lines.
6024     * 2. Since CDU page-size is not a single 4KB page (which is the case
6025     * for the other ILT clients), to be efficient we want to support
6026     * allocation of sub-page-size in the last entry.
6027     * 3. Context pointers are used by the driver to pass to FW / update
6028     * the context (for the other ILT clients the pointers are used just to
6029     * free the memory during unload).
6030     */
6031    context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc));
6032    for (i = 0, allocated = 0; allocated < context_size; i++) {
6033        sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6034                                  (context_size - allocated));
6035
6036        if (bxe_dma_alloc(sc, sc->context[i].size,
6037                          &sc->context[i].vcxt_dma,
6038                          "cdu context") != 0) {
6039            bxe_free_mem(sc);
6040            return (-1);
6041        }
6042
6043        sc->context[i].vcxt =
6044            (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6045
6046        allocated += sc->context[i].size;
6047    }
6048
6049    bxe_alloc_ilt_lines_mem(sc);
6050
6051    BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n",
6052          sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6053    {
6054        for (i = 0; i < 4; i++) {
6055            BLOGD(sc, DBG_LOAD,
6056                  "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n",
6057                  i,
6058                  sc->ilt->clients[i].page_size,
6059                  sc->ilt->clients[i].start,
6060                  sc->ilt->clients[i].end,
6061                  sc->ilt->clients[i].client_num,
6062                  sc->ilt->clients[i].flags);
6063        }
6064    }
6065    if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
6066        BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n");
6067        bxe_free_mem(sc);
6068        return (-1);
6069    }
6070
6071    return (0);
6072}
6073
6074static void
6075bxe_free_rx_bd_chain(struct bxe_fastpath *fp)
6076{
6077    int i;
6078
6079    if (fp->rx_mbuf_tag == NULL) {
6080        return;
6081    }
6082
6083    /* free all mbufs and unload all maps */
6084    for (i = 0; i < RX_BD_TOTAL; i++) {
6085        if (fp->rx_mbuf_chain[i].m_map != NULL) {
6086            bus_dmamap_sync(fp->rx_mbuf_tag,
6087                            fp->rx_mbuf_chain[i].m_map,
6088                            BUS_DMASYNC_POSTREAD);
6089            bus_dmamap_unload(fp->rx_mbuf_tag,
6090                              fp->rx_mbuf_chain[i].m_map);
6091        }
6092
6093        if (fp->rx_mbuf_chain[i].m != NULL) {
6094            m_freem(fp->rx_mbuf_chain[i].m);
6095            fp->rx_mbuf_chain[i].m = NULL;
6096            fp->eth_q_stats.mbuf_alloc_rx--;
6097        }
6098    }
6099}
6100
6101static void
6102bxe_free_tpa_pool(struct bxe_fastpath *fp)
6103{
6104    struct bxe_softc *sc;
6105    int i, max_agg_queues;
6106
6107    sc = fp->sc;
6108
6109    if (fp->rx_mbuf_tag == NULL) {
6110        return;
6111    }
6112
6113    max_agg_queues = MAX_AGG_QS(sc);
6114
6115    /* release all mbufs and unload all DMA maps in the TPA pool */
6116    for (i = 0; i < max_agg_queues; i++) {
6117        if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6118            bus_dmamap_sync(fp->rx_mbuf_tag,
6119                            fp->rx_tpa_info[i].bd.m_map,
6120                            BUS_DMASYNC_POSTREAD);
6121            bus_dmamap_unload(fp->rx_mbuf_tag,
6122                              fp->rx_tpa_info[i].bd.m_map);
6123        }
6124
6125        if (fp->rx_tpa_info[i].bd.m != NULL) {
6126            m_freem(fp->rx_tpa_info[i].bd.m);
6127            fp->rx_tpa_info[i].bd.m = NULL;
6128            fp->eth_q_stats.mbuf_alloc_tpa--;
6129        }
6130    }
6131}
6132
6133static void
6134bxe_free_sge_chain(struct bxe_fastpath *fp)
6135{
6136    int i;
6137
6138    if (fp->rx_sge_mbuf_tag == NULL) {
6139        return;
6140    }
6141
6142    /* rree all mbufs and unload all maps */
6143    for (i = 0; i < RX_SGE_TOTAL; i++) {
6144        if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6145            bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6146                            fp->rx_sge_mbuf_chain[i].m_map,
6147                            BUS_DMASYNC_POSTREAD);
6148            bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6149                              fp->rx_sge_mbuf_chain[i].m_map);
6150        }
6151
6152        if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6153            m_freem(fp->rx_sge_mbuf_chain[i].m);
6154            fp->rx_sge_mbuf_chain[i].m = NULL;
6155            fp->eth_q_stats.mbuf_alloc_sge--;
6156        }
6157    }
6158}
6159
6160static void
6161bxe_free_fp_buffers(struct bxe_softc *sc)
6162{
6163    struct bxe_fastpath *fp;
6164    int i;
6165
6166    for (i = 0; i < sc->num_queues; i++) {
6167        fp = &sc->fp[i];
6168
6169        if (fp->tx_br != NULL) {
6170            /* just in case bxe_mq_flush() wasn't called */
6171            if (mtx_initialized(&fp->tx_mtx)) {
6172                struct mbuf *m;
6173
6174                BXE_FP_TX_LOCK(fp);
6175                while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6176                    m_freem(m);
6177                BXE_FP_TX_UNLOCK(fp);
6178            }
6179        }
6180
6181        /* free all RX buffers */
6182        bxe_free_rx_bd_chain(fp);
6183        bxe_free_tpa_pool(fp);
6184        bxe_free_sge_chain(fp);
6185
6186        if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6187            BLOGE(sc, "failed to claim all rx mbufs (%d left)\n",
6188                  fp->eth_q_stats.mbuf_alloc_rx);
6189        }
6190
6191        if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6192            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6193                  fp->eth_q_stats.mbuf_alloc_sge);
6194        }
6195
6196        if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6197            BLOGE(sc, "failed to claim all sge mbufs (%d left)\n",
6198                  fp->eth_q_stats.mbuf_alloc_tpa);
6199        }
6200
6201        if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6202            BLOGE(sc, "failed to release tx mbufs (%d left)\n",
6203                  fp->eth_q_stats.mbuf_alloc_tx);
6204        }
6205
6206        /* XXX verify all mbufs were reclaimed */
6207    }
6208}
6209
6210static int
6211bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp,
6212                     uint16_t            prev_index,
6213                     uint16_t            index)
6214{
6215    struct bxe_sw_rx_bd *rx_buf;
6216    struct eth_rx_bd *rx_bd;
6217    bus_dma_segment_t segs[1];
6218    bus_dmamap_t map;
6219    struct mbuf *m;
6220    int nsegs, rc;
6221
6222    rc = 0;
6223
6224    /* allocate the new RX BD mbuf */
6225    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6226    if (__predict_false(m == NULL)) {
6227        fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6228        return (ENOBUFS);
6229    }
6230
6231    fp->eth_q_stats.mbuf_alloc_rx++;
6232
6233    /* initialize the mbuf buffer length */
6234    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6235
6236    /* map the mbuf into non-paged pool */
6237    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6238                                 fp->rx_mbuf_spare_map,
6239                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6240    if (__predict_false(rc != 0)) {
6241        fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6242        m_freem(m);
6243        fp->eth_q_stats.mbuf_alloc_rx--;
6244        return (rc);
6245    }
6246
6247    /* all mbufs must map to a single segment */
6248    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6249
6250    /* release any existing RX BD mbuf mappings */
6251
6252    if (prev_index != index) {
6253        rx_buf = &fp->rx_mbuf_chain[prev_index];
6254
6255        if (rx_buf->m_map != NULL) {
6256            bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6257                            BUS_DMASYNC_POSTREAD);
6258            bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6259        }
6260
6261        /*
6262         * We only get here from bxe_rxeof() when the maximum number
6263         * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already
6264         * holds the mbuf in the prev_index so it's OK to NULL it out
6265         * here without concern of a memory leak.
6266         */
6267        fp->rx_mbuf_chain[prev_index].m = NULL;
6268    }
6269
6270    rx_buf = &fp->rx_mbuf_chain[index];
6271
6272    if (rx_buf->m_map != NULL) {
6273        bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6274                        BUS_DMASYNC_POSTREAD);
6275        bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6276    }
6277
6278    /* save the mbuf and mapping info for a future packet */
6279    map = (prev_index != index) ?
6280              fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6281    rx_buf->m_map = fp->rx_mbuf_spare_map;
6282    fp->rx_mbuf_spare_map = map;
6283    bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6284                    BUS_DMASYNC_PREREAD);
6285    rx_buf->m = m;
6286
6287    rx_bd = &fp->rx_chain[index];
6288    rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6289    rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6290
6291    return (rc);
6292}
6293
6294static int
6295bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp,
6296                      int                 queue)
6297{
6298    struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6299    bus_dma_segment_t segs[1];
6300    bus_dmamap_t map;
6301    struct mbuf *m;
6302    int nsegs;
6303    int rc = 0;
6304
6305    /* allocate the new TPA mbuf */
6306    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6307    if (__predict_false(m == NULL)) {
6308        fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6309        return (ENOBUFS);
6310    }
6311
6312    fp->eth_q_stats.mbuf_alloc_tpa++;
6313
6314    /* initialize the mbuf buffer length */
6315    m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6316
6317    /* map the mbuf into non-paged pool */
6318    rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6319                                 fp->rx_tpa_info_mbuf_spare_map,
6320                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6321    if (__predict_false(rc != 0)) {
6322        fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6323        m_free(m);
6324        fp->eth_q_stats.mbuf_alloc_tpa--;
6325        return (rc);
6326    }
6327
6328    /* all mbufs must map to a single segment */
6329    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6330
6331    /* release any existing TPA mbuf mapping */
6332    if (tpa_info->bd.m_map != NULL) {
6333        bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6334                        BUS_DMASYNC_POSTREAD);
6335        bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6336    }
6337
6338    /* save the mbuf and mapping info for the TPA mbuf */
6339    map = tpa_info->bd.m_map;
6340    tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6341    fp->rx_tpa_info_mbuf_spare_map = map;
6342    bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6343                    BUS_DMASYNC_PREREAD);
6344    tpa_info->bd.m = m;
6345    tpa_info->seg = segs[0];
6346
6347    return (rc);
6348}
6349
6350/*
6351 * Allocate an mbuf and assign it to the receive scatter gather chain. The
6352 * caller must take care to save a copy of the existing mbuf in the SG mbuf
6353 * chain.
6354 */
6355static int
6356bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp,
6357                      uint16_t            index)
6358{
6359    struct bxe_sw_rx_bd *sge_buf;
6360    struct eth_rx_sge *sge;
6361    bus_dma_segment_t segs[1];
6362    bus_dmamap_t map;
6363    struct mbuf *m;
6364    int nsegs;
6365    int rc = 0;
6366
6367    /* allocate a new SGE mbuf */
6368    m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
6369    if (__predict_false(m == NULL)) {
6370        fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6371        return (ENOMEM);
6372    }
6373
6374    fp->eth_q_stats.mbuf_alloc_sge++;
6375
6376    /* initialize the mbuf buffer length */
6377    m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6378
6379    /* map the SGE mbuf into non-paged pool */
6380    rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6381                                 fp->rx_sge_mbuf_spare_map,
6382                                 m, segs, &nsegs, BUS_DMA_NOWAIT);
6383    if (__predict_false(rc != 0)) {
6384        fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6385        m_freem(m);
6386        fp->eth_q_stats.mbuf_alloc_sge--;
6387        return (rc);
6388    }
6389
6390    /* all mbufs must map to a single segment */
6391    KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs));
6392
6393    sge_buf = &fp->rx_sge_mbuf_chain[index];
6394
6395    /* release any existing SGE mbuf mapping */
6396    if (sge_buf->m_map != NULL) {
6397        bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6398                        BUS_DMASYNC_POSTREAD);
6399        bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6400    }
6401
6402    /* save the mbuf and mapping info for a future packet */
6403    map = sge_buf->m_map;
6404    sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6405    fp->rx_sge_mbuf_spare_map = map;
6406    bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6407                    BUS_DMASYNC_PREREAD);
6408    sge_buf->m = m;
6409
6410    sge = &fp->rx_sge_chain[index];
6411    sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6412    sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6413
6414    return (rc);
6415}
6416
6417static __noinline int
6418bxe_alloc_fp_buffers(struct bxe_softc *sc)
6419{
6420    struct bxe_fastpath *fp;
6421    int i, j, rc = 0;
6422    int ring_prod, cqe_ring_prod;
6423    int max_agg_queues;
6424
6425    for (i = 0; i < sc->num_queues; i++) {
6426        fp = &sc->fp[i];
6427
6428        ring_prod = cqe_ring_prod = 0;
6429        fp->rx_bd_cons = 0;
6430        fp->rx_cq_cons = 0;
6431
6432        /* allocate buffers for the RX BDs in RX BD chain */
6433        for (j = 0; j < sc->max_rx_bufs; j++) {
6434            rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod);
6435            if (rc != 0) {
6436                BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n",
6437                      i, rc);
6438                goto bxe_alloc_fp_buffers_error;
6439            }
6440
6441            ring_prod     = RX_BD_NEXT(ring_prod);
6442            cqe_ring_prod = RCQ_NEXT(cqe_ring_prod);
6443        }
6444
6445        fp->rx_bd_prod = ring_prod;
6446        fp->rx_cq_prod = cqe_ring_prod;
6447        fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6448
6449        max_agg_queues = MAX_AGG_QS(sc);
6450
6451        fp->tpa_enable = TRUE;
6452
6453        /* fill the TPA pool */
6454        for (j = 0; j < max_agg_queues; j++) {
6455            rc = bxe_alloc_rx_tpa_mbuf(fp, j);
6456            if (rc != 0) {
6457                BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n",
6458                          i, j);
6459                fp->tpa_enable = FALSE;
6460                goto bxe_alloc_fp_buffers_error;
6461            }
6462
6463            fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6464        }
6465
6466        if (fp->tpa_enable) {
6467            /* fill the RX SGE chain */
6468            ring_prod = 0;
6469            for (j = 0; j < RX_SGE_USABLE; j++) {
6470                rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod);
6471                if (rc != 0) {
6472                    BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n",
6473                              i, ring_prod);
6474                    fp->tpa_enable = FALSE;
6475                    ring_prod = 0;
6476                    goto bxe_alloc_fp_buffers_error;
6477                }
6478
6479                ring_prod = RX_SGE_NEXT(ring_prod);
6480            }
6481
6482            fp->rx_sge_prod = ring_prod;
6483        }
6484    }
6485
6486    return (0);
6487
6488bxe_alloc_fp_buffers_error:
6489
6490    /* unwind what was already allocated */
6491    bxe_free_rx_bd_chain(fp);
6492    bxe_free_tpa_pool(fp);
6493    bxe_free_sge_chain(fp);
6494
6495    return (ENOBUFS);
6496}
6497
6498static void
6499bxe_free_fw_stats_mem(struct bxe_softc *sc)
6500{
6501    bxe_dma_free(sc, &sc->fw_stats_dma);
6502
6503    sc->fw_stats_num = 0;
6504
6505    sc->fw_stats_req_size = 0;
6506    sc->fw_stats_req = NULL;
6507    sc->fw_stats_req_mapping = 0;
6508
6509    sc->fw_stats_data_size = 0;
6510    sc->fw_stats_data = NULL;
6511    sc->fw_stats_data_mapping = 0;
6512}
6513
6514static int
6515bxe_alloc_fw_stats_mem(struct bxe_softc *sc)
6516{
6517    uint8_t num_queue_stats;
6518    int num_groups;
6519
6520    /* number of queues for statistics is number of eth queues */
6521    num_queue_stats = BXE_NUM_ETH_QUEUES(sc);
6522
6523    /*
6524     * Total number of FW statistics requests =
6525     *   1 for port stats + 1 for PF stats + num of queues
6526     */
6527    sc->fw_stats_num = (2 + num_queue_stats);
6528
6529    /*
6530     * Request is built from stats_query_header and an array of
6531     * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT
6532     * rules. The real number or requests is configured in the
6533     * stats_query_header.
6534     */
6535    num_groups =
6536        ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6537         ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6538
6539    BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n",
6540          sc->fw_stats_num, num_groups);
6541
6542    sc->fw_stats_req_size =
6543        (sizeof(struct stats_query_header) +
6544         (num_groups * sizeof(struct stats_query_cmd_group)));
6545
6546    /*
6547     * Data for statistics requests + stats_counter.
6548     * stats_counter holds per-STORM counters that are incremented when
6549     * STORM has finished with the current request. Memory for FCoE
6550     * offloaded statistics are counted anyway, even if they will not be sent.
6551     * VF stats are not accounted for here as the data of VF stats is stored
6552     * in memory allocated by the VF, not here.
6553     */
6554    sc->fw_stats_data_size =
6555        (sizeof(struct stats_counter) +
6556         sizeof(struct per_port_stats) +
6557         sizeof(struct per_pf_stats) +
6558         /* sizeof(struct fcoe_statistics_params) + */
6559         (sizeof(struct per_queue_stats) * num_queue_stats));
6560
6561    if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6562                      &sc->fw_stats_dma, "fw stats") != 0) {
6563        bxe_free_fw_stats_mem(sc);
6564        return (-1);
6565    }
6566
6567    /* set up the shortcuts */
6568
6569    sc->fw_stats_req =
6570        (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6571    sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6572
6573    sc->fw_stats_data =
6574        (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6575                                     sc->fw_stats_req_size);
6576    sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6577                                 sc->fw_stats_req_size);
6578
6579    BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n",
6580          (uintmax_t)sc->fw_stats_req_mapping);
6581
6582    BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n",
6583          (uintmax_t)sc->fw_stats_data_mapping);
6584
6585    return (0);
6586}
6587
6588/*
6589 * Bits map:
6590 * 0-7  - Engine0 load counter.
6591 * 8-15 - Engine1 load counter.
6592 * 16   - Engine0 RESET_IN_PROGRESS bit.
6593 * 17   - Engine1 RESET_IN_PROGRESS bit.
6594 * 18   - Engine0 ONE_IS_LOADED. Set when there is at least one active
6595 *        function on the engine
6596 * 19   - Engine1 ONE_IS_LOADED.
6597 * 20   - Chip reset flow bit. When set none-leader must wait for both engines
6598 *        leader to complete (check for both RESET_IN_PROGRESS bits and not
6599 *        for just the one belonging to its engine).
6600 */
6601#define BXE_RECOVERY_GLOB_REG     MISC_REG_GENERIC_POR_1
6602#define BXE_PATH0_LOAD_CNT_MASK   0x000000ff
6603#define BXE_PATH0_LOAD_CNT_SHIFT  0
6604#define BXE_PATH1_LOAD_CNT_MASK   0x0000ff00
6605#define BXE_PATH1_LOAD_CNT_SHIFT  8
6606#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000
6607#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000
6608#define BXE_GLOBAL_RESET_BIT      0x00040000
6609
6610/* set the GLOBAL_RESET bit, should be run under rtnl lock */
6611static void
6612bxe_set_reset_global(struct bxe_softc *sc)
6613{
6614    uint32_t val;
6615    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6616    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6617    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT);
6618    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6619}
6620
6621/* clear the GLOBAL_RESET bit, should be run under rtnl lock */
6622static void
6623bxe_clear_reset_global(struct bxe_softc *sc)
6624{
6625    uint32_t val;
6626    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6627    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6628    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT));
6629    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6630}
6631
6632/* checks the GLOBAL_RESET bit, should be run under rtnl lock */
6633static uint8_t
6634bxe_reset_is_global(struct bxe_softc *sc)
6635{
6636    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6637    BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val);
6638    return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE;
6639}
6640
6641/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
6642static void
6643bxe_set_reset_done(struct bxe_softc *sc)
6644{
6645    uint32_t val;
6646    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6647                                 BXE_PATH0_RST_IN_PROG_BIT;
6648
6649    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6650
6651    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6652    /* Clear the bit */
6653    val &= ~bit;
6654    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6655
6656    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6657}
6658
6659/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */
6660static void
6661bxe_set_reset_in_progress(struct bxe_softc *sc)
6662{
6663    uint32_t val;
6664    uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT :
6665                                 BXE_PATH0_RST_IN_PROG_BIT;
6666
6667    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6668
6669    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6670    /* Set the bit */
6671    val |= bit;
6672    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6673
6674    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6675}
6676
6677/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */
6678static uint8_t
6679bxe_reset_is_done(struct bxe_softc *sc,
6680                  int              engine)
6681{
6682    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6683    uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT :
6684                            BXE_PATH0_RST_IN_PROG_BIT;
6685
6686    /* return false if bit is set */
6687    return (val & bit) ? FALSE : TRUE;
6688}
6689
6690/* get the load status for an engine, should be run under rtnl lock */
6691static uint8_t
6692bxe_get_load_status(struct bxe_softc *sc,
6693                    int              engine)
6694{
6695    uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK :
6696                             BXE_PATH0_LOAD_CNT_MASK;
6697    uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT :
6698                              BXE_PATH0_LOAD_CNT_SHIFT;
6699    uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6700
6701    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6702
6703    val = ((val & mask) >> shift);
6704
6705    BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val);
6706
6707    return (val != 0);
6708}
6709
6710/* set pf load mark */
6711/* XXX needs to be under rtnl lock */
6712static void
6713bxe_set_pf_load(struct bxe_softc *sc)
6714{
6715    uint32_t val;
6716    uint32_t val1;
6717    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6718                                  BXE_PATH0_LOAD_CNT_MASK;
6719    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6720                                   BXE_PATH0_LOAD_CNT_SHIFT;
6721
6722    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6723
6724    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6725    BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val);
6726
6727    /* get the current counter value */
6728    val1 = ((val & mask) >> shift);
6729
6730    /* set bit of this PF */
6731    val1 |= (1 << SC_ABS_FUNC(sc));
6732
6733    /* clear the old value */
6734    val &= ~mask;
6735
6736    /* set the new one */
6737    val |= ((val1 << shift) & mask);
6738
6739    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6740
6741    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6742}
6743
6744/* clear pf load mark */
6745/* XXX needs to be under rtnl lock */
6746static uint8_t
6747bxe_clear_pf_load(struct bxe_softc *sc)
6748{
6749    uint32_t val1, val;
6750    uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK :
6751                                  BXE_PATH0_LOAD_CNT_MASK;
6752    uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT :
6753                                   BXE_PATH0_LOAD_CNT_SHIFT;
6754
6755    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6756    val = REG_RD(sc, BXE_RECOVERY_GLOB_REG);
6757    BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val);
6758
6759    /* get the current counter value */
6760    val1 = (val & mask) >> shift;
6761
6762    /* clear bit of that PF */
6763    val1 &= ~(1 << SC_ABS_FUNC(sc));
6764
6765    /* clear the old value */
6766    val &= ~mask;
6767
6768    /* set the new one */
6769    val |= ((val1 << shift) & mask);
6770
6771    REG_WR(sc, BXE_RECOVERY_GLOB_REG, val);
6772    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
6773    return (val1 != 0);
6774}
6775
6776/* send load requrest to mcp and analyze response */
6777static int
6778bxe_nic_load_request(struct bxe_softc *sc,
6779                     uint32_t         *load_code)
6780{
6781    /* init fw_seq */
6782    sc->fw_seq =
6783        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
6784         DRV_MSG_SEQ_NUMBER_MASK);
6785
6786    BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6787
6788    /* get the current FW pulse sequence */
6789    sc->fw_drv_pulse_wr_seq =
6790        (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) &
6791         DRV_PULSE_SEQ_MASK);
6792
6793    BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n",
6794          sc->fw_drv_pulse_wr_seq);
6795
6796    /* load request */
6797    (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
6798                                  DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
6799
6800    /* if the MCP fails to respond we must abort */
6801    if (!(*load_code)) {
6802        BLOGE(sc, "MCP response failure!\n");
6803        return (-1);
6804    }
6805
6806    /* if MCP refused then must abort */
6807    if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6808        BLOGE(sc, "MCP refused load request\n");
6809        return (-1);
6810    }
6811
6812    return (0);
6813}
6814
6815/*
6816 * Check whether another PF has already loaded FW to chip. In virtualized
6817 * environments a pf from anoth VM may have already initialized the device
6818 * including loading FW.
6819 */
6820static int
6821bxe_nic_load_analyze_req(struct bxe_softc *sc,
6822                         uint32_t         load_code)
6823{
6824    uint32_t my_fw, loaded_fw;
6825
6826    /* is another pf loaded on this engine? */
6827    if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
6828        (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
6829        /* build my FW version dword */
6830        my_fw = (BCM_5710_FW_MAJOR_VERSION +
6831                 (BCM_5710_FW_MINOR_VERSION << 8 ) +
6832                 (BCM_5710_FW_REVISION_VERSION << 16) +
6833                 (BCM_5710_FW_ENGINEERING_VERSION << 24));
6834
6835        /* read loaded FW from chip */
6836        loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
6837        BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n",
6838              loaded_fw, my_fw);
6839
6840        /* abort nic load if version mismatch */
6841        if (my_fw != loaded_fw) {
6842            BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)",
6843                  loaded_fw, my_fw);
6844            return (-1);
6845        }
6846    }
6847
6848    return (0);
6849}
6850
6851/* mark PMF if applicable */
6852static void
6853bxe_nic_load_pmf(struct bxe_softc *sc,
6854                 uint32_t         load_code)
6855{
6856    uint32_t ncsi_oem_data_addr;
6857
6858    if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6859        (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
6860        (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
6861        /*
6862         * Barrier here for ordering between the writing to sc->port.pmf here
6863         * and reading it from the periodic task.
6864         */
6865        sc->port.pmf = 1;
6866        mb();
6867    } else {
6868        sc->port.pmf = 0;
6869    }
6870
6871    BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6872
6873    /* XXX needed? */
6874    if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
6875        if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
6876            ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr);
6877            if (ncsi_oem_data_addr) {
6878                REG_WR(sc,
6879                       (ncsi_oem_data_addr +
6880                        offsetof(struct glob_ncsi_oem_data, driver_version)),
6881                       0);
6882            }
6883        }
6884    }
6885}
6886
6887static void
6888bxe_read_mf_cfg(struct bxe_softc *sc)
6889{
6890    int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1);
6891    int abs_func;
6892    int vn;
6893
6894    if (BXE_NOMCP(sc)) {
6895        return; /* what should be the default bvalue in this case */
6896    }
6897
6898    /*
6899     * The formula for computing the absolute function number is...
6900     * For 2 port configuration (4 functions per port):
6901     *   abs_func = 2 * vn + SC_PORT + SC_PATH
6902     * For 4 port configuration (2 functions per port):
6903     *   abs_func = 4 * vn + 2 * SC_PORT + SC_PATH
6904     */
6905    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
6906        abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc));
6907        if (abs_func >= E1H_FUNC_MAX) {
6908            break;
6909        }
6910        sc->devinfo.mf_info.mf_config[vn] =
6911            MFCFG_RD(sc, func_mf_config[abs_func].config);
6912    }
6913
6914    if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6915        FUNC_MF_CFG_FUNC_DISABLED) {
6916        BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n");
6917        sc->flags |= BXE_MF_FUNC_DIS;
6918    } else {
6919        BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n");
6920        sc->flags &= ~BXE_MF_FUNC_DIS;
6921    }
6922}
6923
6924/* acquire split MCP access lock register */
6925static int bxe_acquire_alr(struct bxe_softc *sc)
6926{
6927    uint32_t j, val;
6928
6929    for (j = 0; j < 1000; j++) {
6930        val = (1UL << 31);
6931        REG_WR(sc, GRCBASE_MCP + 0x9c, val);
6932        val = REG_RD(sc, GRCBASE_MCP + 0x9c);
6933        if (val & (1L << 31))
6934            break;
6935
6936        DELAY(5000);
6937    }
6938
6939    if (!(val & (1L << 31))) {
6940        BLOGE(sc, "Cannot acquire MCP access lock register\n");
6941        return (-1);
6942    }
6943
6944    return (0);
6945}
6946
6947/* release split MCP access lock register */
6948static void bxe_release_alr(struct bxe_softc *sc)
6949{
6950    REG_WR(sc, GRCBASE_MCP + 0x9c, 0);
6951}
6952
6953static void
6954bxe_fan_failure(struct bxe_softc *sc)
6955{
6956    int port = SC_PORT(sc);
6957    uint32_t ext_phy_config;
6958
6959    /* mark the failure */
6960    ext_phy_config =
6961        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
6962
6963    ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6964    ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
6965    SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config,
6966             ext_phy_config);
6967
6968    /* log the failure */
6969    BLOGW(sc, "Fan Failure has caused the driver to shutdown "
6970              "the card to prevent permanent damage. "
6971              "Please contact OEM Support for assistance\n");
6972
6973    /* XXX */
6974#if 1
6975    bxe_panic(sc, ("Schedule task to handle fan failure\n"));
6976#else
6977    /*
6978     * Schedule device reset (unload)
6979     * This is due to some boards consuming sufficient power when driver is
6980     * up to overheat if fan fails.
6981     */
6982    bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6983    schedule_delayed_work(&sc->sp_rtnl_task, 0);
6984#endif
6985}
6986
6987/* this function is called upon a link interrupt */
6988static void
6989bxe_link_attn(struct bxe_softc *sc)
6990{
6991    uint32_t pause_enabled = 0;
6992    struct host_port_stats *pstats;
6993    int cmng_fns;
6994    struct bxe_fastpath *fp;
6995    int i;
6996
6997    /* Make sure that we are synced with the current statistics */
6998    bxe_stats_handle(sc, STATS_EVENT_STOP);
6999    BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
7000    elink_link_update(&sc->link_params, &sc->link_vars);
7001
7002    if (sc->link_vars.link_up) {
7003
7004        /* dropless flow control */
7005        if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
7006            pause_enabled = 0;
7007
7008            if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7009                pause_enabled = 1;
7010            }
7011
7012            REG_WR(sc,
7013                   (BAR_USTRORM_INTMEM +
7014                    USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))),
7015                   pause_enabled);
7016        }
7017
7018        if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7019            pstats = BXE_SP(sc, port_stats);
7020            /* reset old mac stats */
7021            memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7022        }
7023
7024        if (sc->state == BXE_STATE_OPEN) {
7025            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
7026	    /* Restart tx when the link comes back. */
7027	    FOR_EACH_ETH_QUEUE(sc, i) {
7028		fp = &sc->fp[i];
7029		taskqueue_enqueue(fp->tq, &fp->tx_task);
7030	    }
7031        }
7032
7033    }
7034
7035    if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7036        cmng_fns = bxe_get_cmng_fns_mode(sc);
7037
7038        if (cmng_fns != CMNG_FNS_NONE) {
7039            bxe_cmng_fns_init(sc, FALSE, cmng_fns);
7040            storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7041        } else {
7042            /* rate shaping and fairness are disabled */
7043            BLOGD(sc, DBG_LOAD, "single function mode without fairness\n");
7044        }
7045    }
7046
7047    bxe_link_report_locked(sc);
7048
7049    if (IS_MF(sc)) {
7050        ; // XXX bxe_link_sync_notify(sc);
7051    }
7052}
7053
7054static void
7055bxe_attn_int_asserted(struct bxe_softc *sc,
7056                      uint32_t         asserted)
7057{
7058    int port = SC_PORT(sc);
7059    uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7060                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
7061    uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
7062                                        NIG_REG_MASK_INTERRUPT_PORT0;
7063    uint32_t aeu_mask;
7064    uint32_t nig_mask = 0;
7065    uint32_t reg_addr;
7066    uint32_t igu_acked;
7067    uint32_t cnt;
7068
7069    if (sc->attn_state & asserted) {
7070        BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted);
7071    }
7072
7073    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7074
7075    aeu_mask = REG_RD(sc, aeu_addr);
7076
7077    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n",
7078          aeu_mask, asserted);
7079
7080    aeu_mask &= ~(asserted & 0x3ff);
7081
7082    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
7083
7084    REG_WR(sc, aeu_addr, aeu_mask);
7085
7086    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
7087
7088    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7089    sc->attn_state |= asserted;
7090    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7091
7092    if (asserted & ATTN_HARD_WIRED_MASK) {
7093        if (asserted & ATTN_NIG_FOR_FUNC) {
7094
7095	    bxe_acquire_phy_lock(sc);
7096            /* save nig interrupt mask */
7097            nig_mask = REG_RD(sc, nig_int_mask_addr);
7098
7099            /* If nig_mask is not set, no need to call the update function */
7100            if (nig_mask) {
7101                REG_WR(sc, nig_int_mask_addr, 0);
7102
7103                bxe_link_attn(sc);
7104            }
7105
7106            /* handle unicore attn? */
7107        }
7108
7109        if (asserted & ATTN_SW_TIMER_4_FUNC) {
7110            BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n");
7111        }
7112
7113        if (asserted & GPIO_2_FUNC) {
7114            BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n");
7115        }
7116
7117        if (asserted & GPIO_3_FUNC) {
7118            BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n");
7119        }
7120
7121        if (asserted & GPIO_4_FUNC) {
7122            BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n");
7123        }
7124
7125        if (port == 0) {
7126            if (asserted & ATTN_GENERAL_ATTN_1) {
7127                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n");
7128                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
7129            }
7130            if (asserted & ATTN_GENERAL_ATTN_2) {
7131                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n");
7132                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
7133            }
7134            if (asserted & ATTN_GENERAL_ATTN_3) {
7135                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n");
7136                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
7137            }
7138        } else {
7139            if (asserted & ATTN_GENERAL_ATTN_4) {
7140                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n");
7141                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
7142            }
7143            if (asserted & ATTN_GENERAL_ATTN_5) {
7144                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n");
7145                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
7146            }
7147            if (asserted & ATTN_GENERAL_ATTN_6) {
7148                BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n");
7149                REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
7150            }
7151        }
7152    } /* hardwired */
7153
7154    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7155        reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET);
7156    } else {
7157        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
7158    }
7159
7160    BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n",
7161          asserted,
7162          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7163    REG_WR(sc, reg_addr, asserted);
7164
7165    /* now set back the mask */
7166    if (asserted & ATTN_NIG_FOR_FUNC) {
7167        /*
7168         * Verify that IGU ack through BAR was written before restoring
7169         * NIG mask. This loop should exit after 2-3 iterations max.
7170         */
7171        if (sc->devinfo.int_block != INT_BLOCK_HC) {
7172            cnt = 0;
7173
7174            do {
7175                igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS);
7176            } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
7177                     (++cnt < MAX_IGU_ATTN_ACK_TO));
7178
7179            if (!igu_acked) {
7180                BLOGE(sc, "Failed to verify IGU ack on time\n");
7181            }
7182
7183            mb();
7184        }
7185
7186        REG_WR(sc, nig_int_mask_addr, nig_mask);
7187
7188	bxe_release_phy_lock(sc);
7189    }
7190}
7191
7192static void
7193bxe_print_next_block(struct bxe_softc *sc,
7194                     int              idx,
7195                     const char       *blk)
7196{
7197    BLOGI(sc, "%s%s", idx ? ", " : "", blk);
7198}
7199
7200static int
7201bxe_check_blocks_with_parity0(struct bxe_softc *sc,
7202                              uint32_t         sig,
7203                              int              par_num,
7204                              uint8_t          print)
7205{
7206    uint32_t cur_bit = 0;
7207    int i = 0;
7208
7209    for (i = 0; sig; i++) {
7210        cur_bit = ((uint32_t)0x1 << i);
7211        if (sig & cur_bit) {
7212            switch (cur_bit) {
7213            case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
7214                if (print)
7215                    bxe_print_next_block(sc, par_num++, "BRB");
7216                break;
7217            case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
7218                if (print)
7219                    bxe_print_next_block(sc, par_num++, "PARSER");
7220                break;
7221            case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
7222                if (print)
7223                    bxe_print_next_block(sc, par_num++, "TSDM");
7224                break;
7225            case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
7226                if (print)
7227                    bxe_print_next_block(sc, par_num++, "SEARCHER");
7228                break;
7229            case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
7230                if (print)
7231                    bxe_print_next_block(sc, par_num++, "TCM");
7232                break;
7233            case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
7234                if (print)
7235                    bxe_print_next_block(sc, par_num++, "TSEMI");
7236                break;
7237            case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
7238                if (print)
7239                    bxe_print_next_block(sc, par_num++, "XPB");
7240                break;
7241            }
7242
7243            /* Clear the bit */
7244            sig &= ~cur_bit;
7245        }
7246    }
7247
7248    return (par_num);
7249}
7250
7251static int
7252bxe_check_blocks_with_parity1(struct bxe_softc *sc,
7253                              uint32_t         sig,
7254                              int              par_num,
7255                              uint8_t          *global,
7256                              uint8_t          print)
7257{
7258    int i = 0;
7259    uint32_t cur_bit = 0;
7260    for (i = 0; sig; i++) {
7261        cur_bit = ((uint32_t)0x1 << i);
7262        if (sig & cur_bit) {
7263            switch (cur_bit) {
7264            case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
7265                if (print)
7266                    bxe_print_next_block(sc, par_num++, "PBF");
7267                break;
7268            case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
7269                if (print)
7270                    bxe_print_next_block(sc, par_num++, "QM");
7271                break;
7272            case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
7273                if (print)
7274                    bxe_print_next_block(sc, par_num++, "TM");
7275                break;
7276            case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
7277                if (print)
7278                    bxe_print_next_block(sc, par_num++, "XSDM");
7279                break;
7280            case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
7281                if (print)
7282                    bxe_print_next_block(sc, par_num++, "XCM");
7283                break;
7284            case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
7285                if (print)
7286                    bxe_print_next_block(sc, par_num++, "XSEMI");
7287                break;
7288            case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
7289                if (print)
7290                    bxe_print_next_block(sc, par_num++, "DOORBELLQ");
7291                break;
7292            case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
7293                if (print)
7294                    bxe_print_next_block(sc, par_num++, "NIG");
7295                break;
7296            case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
7297                if (print)
7298                    bxe_print_next_block(sc, par_num++, "VAUX PCI CORE");
7299                *global = TRUE;
7300                break;
7301            case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
7302                if (print)
7303                    bxe_print_next_block(sc, par_num++, "DEBUG");
7304                break;
7305            case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
7306                if (print)
7307                    bxe_print_next_block(sc, par_num++, "USDM");
7308                break;
7309            case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
7310                if (print)
7311                    bxe_print_next_block(sc, par_num++, "UCM");
7312                break;
7313            case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
7314                if (print)
7315                    bxe_print_next_block(sc, par_num++, "USEMI");
7316                break;
7317            case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
7318                if (print)
7319                    bxe_print_next_block(sc, par_num++, "UPB");
7320                break;
7321            case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
7322                if (print)
7323                    bxe_print_next_block(sc, par_num++, "CSDM");
7324                break;
7325            case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
7326                if (print)
7327                    bxe_print_next_block(sc, par_num++, "CCM");
7328                break;
7329            }
7330
7331            /* Clear the bit */
7332            sig &= ~cur_bit;
7333        }
7334    }
7335
7336    return (par_num);
7337}
7338
7339static int
7340bxe_check_blocks_with_parity2(struct bxe_softc *sc,
7341                              uint32_t         sig,
7342                              int              par_num,
7343                              uint8_t          print)
7344{
7345    uint32_t cur_bit = 0;
7346    int i = 0;
7347
7348    for (i = 0; sig; i++) {
7349        cur_bit = ((uint32_t)0x1 << i);
7350        if (sig & cur_bit) {
7351            switch (cur_bit) {
7352            case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
7353                if (print)
7354                    bxe_print_next_block(sc, par_num++, "CSEMI");
7355                break;
7356            case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
7357                if (print)
7358                    bxe_print_next_block(sc, par_num++, "PXP");
7359                break;
7360            case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
7361                if (print)
7362                    bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT");
7363                break;
7364            case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
7365                if (print)
7366                    bxe_print_next_block(sc, par_num++, "CFC");
7367                break;
7368            case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
7369                if (print)
7370                    bxe_print_next_block(sc, par_num++, "CDU");
7371                break;
7372            case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
7373                if (print)
7374                    bxe_print_next_block(sc, par_num++, "DMAE");
7375                break;
7376            case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
7377                if (print)
7378                    bxe_print_next_block(sc, par_num++, "IGU");
7379                break;
7380            case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
7381                if (print)
7382                    bxe_print_next_block(sc, par_num++, "MISC");
7383                break;
7384            }
7385
7386            /* Clear the bit */
7387            sig &= ~cur_bit;
7388        }
7389    }
7390
7391    return (par_num);
7392}
7393
7394static int
7395bxe_check_blocks_with_parity3(struct bxe_softc *sc,
7396                              uint32_t         sig,
7397                              int              par_num,
7398                              uint8_t          *global,
7399                              uint8_t          print)
7400{
7401    uint32_t cur_bit = 0;
7402    int i = 0;
7403
7404    for (i = 0; sig; i++) {
7405        cur_bit = ((uint32_t)0x1 << i);
7406        if (sig & cur_bit) {
7407            switch (cur_bit) {
7408            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
7409                if (print)
7410                    bxe_print_next_block(sc, par_num++, "MCP ROM");
7411                *global = TRUE;
7412                break;
7413            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
7414                if (print)
7415                    bxe_print_next_block(sc, par_num++,
7416                              "MCP UMP RX");
7417                *global = TRUE;
7418                break;
7419            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
7420                if (print)
7421                    bxe_print_next_block(sc, par_num++,
7422                              "MCP UMP TX");
7423                *global = TRUE;
7424                break;
7425            case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
7426                if (print)
7427                    bxe_print_next_block(sc, par_num++,
7428                              "MCP SCPAD");
7429                *global = TRUE;
7430                break;
7431            }
7432
7433            /* Clear the bit */
7434            sig &= ~cur_bit;
7435        }
7436    }
7437
7438    return (par_num);
7439}
7440
7441static int
7442bxe_check_blocks_with_parity4(struct bxe_softc *sc,
7443                              uint32_t         sig,
7444                              int              par_num,
7445                              uint8_t          print)
7446{
7447    uint32_t cur_bit = 0;
7448    int i = 0;
7449
7450    for (i = 0; sig; i++) {
7451        cur_bit = ((uint32_t)0x1 << i);
7452        if (sig & cur_bit) {
7453            switch (cur_bit) {
7454            case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
7455                if (print)
7456                    bxe_print_next_block(sc, par_num++, "PGLUE_B");
7457                break;
7458            case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
7459                if (print)
7460                    bxe_print_next_block(sc, par_num++, "ATC");
7461                break;
7462            }
7463
7464            /* Clear the bit */
7465            sig &= ~cur_bit;
7466        }
7467    }
7468
7469    return (par_num);
7470}
7471
7472static uint8_t
7473bxe_parity_attn(struct bxe_softc *sc,
7474                uint8_t          *global,
7475                uint8_t          print,
7476                uint32_t         *sig)
7477{
7478    int par_num = 0;
7479
7480    if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
7481        (sig[1] & HW_PRTY_ASSERT_SET_1) ||
7482        (sig[2] & HW_PRTY_ASSERT_SET_2) ||
7483        (sig[3] & HW_PRTY_ASSERT_SET_3) ||
7484        (sig[4] & HW_PRTY_ASSERT_SET_4)) {
7485        BLOGE(sc, "Parity error: HW block parity attention:\n"
7486                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
7487              (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0),
7488              (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1),
7489              (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2),
7490              (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3),
7491              (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4));
7492
7493        if (print)
7494            BLOGI(sc, "Parity errors detected in blocks: ");
7495
7496        par_num =
7497            bxe_check_blocks_with_parity0(sc, sig[0] &
7498                                          HW_PRTY_ASSERT_SET_0,
7499                                          par_num, print);
7500        par_num =
7501            bxe_check_blocks_with_parity1(sc, sig[1] &
7502                                          HW_PRTY_ASSERT_SET_1,
7503                                          par_num, global, print);
7504        par_num =
7505            bxe_check_blocks_with_parity2(sc, sig[2] &
7506                                          HW_PRTY_ASSERT_SET_2,
7507                                          par_num, print);
7508        par_num =
7509            bxe_check_blocks_with_parity3(sc, sig[3] &
7510                                          HW_PRTY_ASSERT_SET_3,
7511                                          par_num, global, print);
7512        par_num =
7513            bxe_check_blocks_with_parity4(sc, sig[4] &
7514                                          HW_PRTY_ASSERT_SET_4,
7515                                          par_num, print);
7516
7517        if (print)
7518            BLOGI(sc, "\n");
7519
7520	if( *global == TRUE ) {
7521                BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
7522        }
7523
7524        return (TRUE);
7525    }
7526
7527    return (FALSE);
7528}
7529
7530static uint8_t
7531bxe_chk_parity_attn(struct bxe_softc *sc,
7532                    uint8_t          *global,
7533                    uint8_t          print)
7534{
7535    struct attn_route attn = { {0} };
7536    int port = SC_PORT(sc);
7537
7538    if(sc->state != BXE_STATE_OPEN)
7539        return FALSE;
7540
7541    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
7542    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
7543    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
7544    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
7545
7546    /*
7547     * Since MCP attentions can't be disabled inside the block, we need to
7548     * read AEU registers to see whether they're currently disabled
7549     */
7550    attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
7551                                      : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) &
7552                         MISC_AEU_ENABLE_MCP_PRTY_BITS) |
7553                        ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
7554
7555
7556    if (!CHIP_IS_E1x(sc))
7557        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
7558
7559    return (bxe_parity_attn(sc, global, print, attn.sig));
7560}
7561
7562static void
7563bxe_attn_int_deasserted4(struct bxe_softc *sc,
7564                         uint32_t         attn)
7565{
7566    uint32_t val;
7567    bool err_flg = false;
7568
7569    if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
7570        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
7571        BLOGE(sc, "PGLUE hw attention 0x%08x\n", val);
7572        err_flg = true;
7573        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
7574            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
7575        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
7576            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
7577        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
7578            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
7579        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
7580            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
7581        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
7582            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
7583        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
7584            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
7585        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
7586            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
7587        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
7588            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
7589        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
7590            BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
7591    }
7592
7593    if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
7594        val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
7595        BLOGE(sc, "ATC hw attention 0x%08x\n", val);
7596	err_flg = true;
7597        if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
7598            BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
7599        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
7600            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
7601        if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
7602            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
7603        if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
7604            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
7605        if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
7606            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
7607        if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
7608            BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
7609    }
7610
7611    if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7612                AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
7613        BLOGE(sc, "FATAL parity attention set4 0x%08x\n",
7614              (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
7615                                 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
7616	err_flg = true;
7617    }
7618    if (err_flg) {
7619	BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
7620	taskqueue_enqueue_timeout(taskqueue_thread,
7621	    &sc->sp_err_timeout_task, hz/10);
7622    }
7623
7624}
7625
7626static void
7627bxe_e1h_disable(struct bxe_softc *sc)
7628{
7629    int port = SC_PORT(sc);
7630
7631    bxe_tx_disable(sc);
7632
7633    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7634}
7635
7636static void
7637bxe_e1h_enable(struct bxe_softc *sc)
7638{
7639    int port = SC_PORT(sc);
7640
7641    REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7642
7643    // XXX bxe_tx_enable(sc);
7644}
7645
7646/*
7647 * called due to MCP event (on pmf):
7648 *   reread new bandwidth configuration
7649 *   configure FW
7650 *   notify others function about the change
7651 */
7652static void
7653bxe_config_mf_bw(struct bxe_softc *sc)
7654{
7655    if (sc->link_vars.link_up) {
7656        bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX);
7657        // XXX bxe_link_sync_notify(sc);
7658    }
7659
7660    storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7661}
7662
7663static void
7664bxe_set_mf_bw(struct bxe_softc *sc)
7665{
7666    bxe_config_mf_bw(sc);
7667    bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
7668}
7669
7670static void
7671bxe_handle_eee_event(struct bxe_softc *sc)
7672{
7673    BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7674    bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
7675}
7676
7677#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
7678
7679static void
7680bxe_drv_info_ether_stat(struct bxe_softc *sc)
7681{
7682    struct eth_stats_info *ether_stat =
7683        &sc->sp->drv_info_to_mcp.ether_stat;
7684
7685    strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7686            ETH_STAT_INFO_VERSION_LEN);
7687
7688    /* XXX (+ MAC_PAD) taken from other driver... verify this is right */
7689    sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7690                                          DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
7691                                          ether_stat->mac_local + MAC_PAD,
7692                                          MAC_PAD, ETH_ALEN);
7693
7694    ether_stat->mtu_size = sc->mtu;
7695
7696    ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7697    if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7698        ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7699    }
7700
7701    // XXX ether_stat->feature_flags |= ???;
7702
7703    ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7704
7705    ether_stat->txq_size = sc->tx_ring_size;
7706    ether_stat->rxq_size = sc->rx_ring_size;
7707}
7708
7709static void
7710bxe_handle_drv_info_req(struct bxe_softc *sc)
7711{
7712    enum drv_info_opcode op_code;
7713    uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control);
7714
7715    /* if drv_info version supported by MFW doesn't match - send NACK */
7716    if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
7717        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7718        return;
7719    }
7720
7721    op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
7722               DRV_INFO_CONTROL_OP_CODE_SHIFT);
7723
7724    memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7725
7726    switch (op_code) {
7727    case ETH_STATS_OPCODE:
7728        bxe_drv_info_ether_stat(sc);
7729        break;
7730    case FCOE_STATS_OPCODE:
7731    case ISCSI_STATS_OPCODE:
7732    default:
7733        /* if op code isn't supported - send NACK */
7734        bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0);
7735        return;
7736    }
7737
7738    /*
7739     * If we got drv_info attn from MFW then these fields are defined in
7740     * shmem2 for sure
7741     */
7742    SHMEM2_WR(sc, drv_info_host_addr_lo,
7743              U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7744    SHMEM2_WR(sc, drv_info_host_addr_hi,
7745              U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp)));
7746
7747    bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0);
7748}
7749
7750static void
7751bxe_dcc_event(struct bxe_softc *sc,
7752              uint32_t         dcc_event)
7753{
7754    BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event);
7755
7756    if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
7757        /*
7758         * This is the only place besides the function initialization
7759         * where the sc->flags can change so it is done without any
7760         * locks
7761         */
7762        if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7763            BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n");
7764            sc->flags |= BXE_MF_FUNC_DIS;
7765            bxe_e1h_disable(sc);
7766        } else {
7767            BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n");
7768            sc->flags &= ~BXE_MF_FUNC_DIS;
7769            bxe_e1h_enable(sc);
7770        }
7771        dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
7772    }
7773
7774    if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
7775        bxe_config_mf_bw(sc);
7776        dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
7777    }
7778
7779    /* Report results to MCP */
7780    if (dcc_event)
7781        bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0);
7782    else
7783        bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0);
7784}
7785
7786static void
7787bxe_pmf_update(struct bxe_softc *sc)
7788{
7789    int port = SC_PORT(sc);
7790    uint32_t val;
7791
7792    sc->port.pmf = 1;
7793    BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7794
7795    /*
7796     * We need the mb() to ensure the ordering between the writing to
7797     * sc->port.pmf here and reading it from the bxe_periodic_task().
7798     */
7799    mb();
7800
7801    /* queue a periodic task */
7802    // XXX schedule task...
7803
7804    // XXX bxe_dcbx_pmf_update(sc);
7805
7806    /* enable nig attention */
7807    val = (0xff0f | (1 << (SC_VN(sc) + 4)));
7808    if (sc->devinfo.int_block == INT_BLOCK_HC) {
7809        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val);
7810        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val);
7811    } else if (!CHIP_IS_E1x(sc)) {
7812        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
7813        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
7814    }
7815
7816    bxe_stats_handle(sc, STATS_EVENT_PMF);
7817}
7818
7819static int
7820bxe_mc_assert(struct bxe_softc *sc)
7821{
7822    char last_idx;
7823    int i, rc = 0;
7824    uint32_t row0, row1, row2, row3;
7825
7826    /* XSTORM */
7827    last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
7828    if (last_idx)
7829        BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7830
7831    /* print the asserts */
7832    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7833
7834        row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i));
7835        row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4);
7836        row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8);
7837        row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12);
7838
7839        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7840            BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7841                  i, row3, row2, row1, row0);
7842            rc++;
7843        } else {
7844            break;
7845        }
7846    }
7847
7848    /* TSTORM */
7849    last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
7850    if (last_idx) {
7851        BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7852    }
7853
7854    /* print the asserts */
7855    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7856
7857        row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i));
7858        row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4);
7859        row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8);
7860        row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12);
7861
7862        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7863            BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7864                  i, row3, row2, row1, row0);
7865            rc++;
7866        } else {
7867            break;
7868        }
7869    }
7870
7871    /* CSTORM */
7872    last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
7873    if (last_idx) {
7874        BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7875    }
7876
7877    /* print the asserts */
7878    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7879
7880        row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i));
7881        row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4);
7882        row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8);
7883        row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12);
7884
7885        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7886            BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7887                  i, row3, row2, row1, row0);
7888            rc++;
7889        } else {
7890            break;
7891        }
7892    }
7893
7894    /* USTORM */
7895    last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
7896    if (last_idx) {
7897        BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
7898    }
7899
7900    /* print the asserts */
7901    for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
7902
7903        row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i));
7904        row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4);
7905        row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8);
7906        row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12);
7907
7908        if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
7909            BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
7910                  i, row3, row2, row1, row0);
7911            rc++;
7912        } else {
7913            break;
7914        }
7915    }
7916
7917    return (rc);
7918}
7919
7920static void
7921bxe_attn_int_deasserted3(struct bxe_softc *sc,
7922                         uint32_t         attn)
7923{
7924    int func = SC_FUNC(sc);
7925    uint32_t val;
7926
7927    if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
7928
7929        if (attn & BXE_PMF_LINK_ASSERT(sc)) {
7930
7931            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7932            bxe_read_mf_cfg(sc);
7933            sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7934                MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
7935            val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status);
7936
7937            if (val & DRV_STATUS_DCC_EVENT_MASK)
7938                bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK));
7939
7940            if (val & DRV_STATUS_SET_MF_BW)
7941                bxe_set_mf_bw(sc);
7942
7943            if (val & DRV_STATUS_DRV_INFO_REQ)
7944                bxe_handle_drv_info_req(sc);
7945
7946            if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7947                bxe_pmf_update(sc);
7948
7949            if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
7950                bxe_handle_eee_event(sc);
7951
7952            if (sc->link_vars.periodic_flags &
7953                ELINK_PERIODIC_FLAGS_LINK_EVENT) {
7954                /* sync with link */
7955		bxe_acquire_phy_lock(sc);
7956                sc->link_vars.periodic_flags &=
7957                    ~ELINK_PERIODIC_FLAGS_LINK_EVENT;
7958		bxe_release_phy_lock(sc);
7959                if (IS_MF(sc))
7960                    ; // XXX bxe_link_sync_notify(sc);
7961                bxe_link_report(sc);
7962            }
7963
7964            /*
7965             * Always call it here: bxe_link_report() will
7966             * prevent the link indication duplication.
7967             */
7968            bxe_link_status_update(sc);
7969
7970        } else if (attn & BXE_MC_ASSERT_BITS) {
7971
7972            BLOGE(sc, "MC assert!\n");
7973            bxe_mc_assert(sc);
7974            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
7975            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
7976            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0);
7977            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0);
7978            bxe_int_disable(sc);
7979            BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT);
7980            taskqueue_enqueue_timeout(taskqueue_thread,
7981                &sc->sp_err_timeout_task, hz/10);
7982
7983        } else if (attn & BXE_MCP_ASSERT) {
7984
7985            BLOGE(sc, "MCP assert!\n");
7986            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
7987            BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT);
7988            taskqueue_enqueue_timeout(taskqueue_thread,
7989                &sc->sp_err_timeout_task, hz/10);
7990            bxe_int_disable(sc);  /*avoid repetive assert alert */
7991
7992
7993        } else {
7994            BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn);
7995        }
7996    }
7997
7998    if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
7999        BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn);
8000        if (attn & BXE_GRC_TIMEOUT) {
8001            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
8002            BLOGE(sc, "GRC time-out 0x%08x\n", val);
8003        }
8004        if (attn & BXE_GRC_RSV) {
8005            val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
8006            BLOGE(sc, "GRC reserved 0x%08x\n", val);
8007        }
8008        REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
8009    }
8010}
8011
8012static void
8013bxe_attn_int_deasserted2(struct bxe_softc *sc,
8014                         uint32_t         attn)
8015{
8016    int port = SC_PORT(sc);
8017    int reg_offset;
8018    uint32_t val0, mask0, val1, mask1;
8019    uint32_t val;
8020    bool err_flg = false;
8021
8022    if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
8023        val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
8024        BLOGE(sc, "CFC hw attention 0x%08x\n", val);
8025        /* CFC error attention */
8026        if (val & 0x2) {
8027            BLOGE(sc, "FATAL error from CFC\n");
8028	    err_flg = true;
8029        }
8030    }
8031
8032    if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
8033        val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
8034        BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8035        /* RQ_USDMDP_FIFO_OVERFLOW */
8036        if (val & 0x18000) {
8037            BLOGE(sc, "FATAL error from PXP\n");
8038	    err_flg = true;
8039        }
8040
8041        if (!CHIP_IS_E1x(sc)) {
8042            val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
8043            BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8044	    err_flg = true;
8045        }
8046    }
8047
8048#define PXP2_EOP_ERROR_BIT  PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
8049#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT
8050
8051    if (attn & AEU_PXP2_HW_INT_BIT) {
8052        /*  CQ47854 workaround do not panic on
8053         *  PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8054         */
8055        if (!CHIP_IS_E1x(sc)) {
8056            mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0);
8057            val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1);
8058            mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
8059            val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
8060            /*
8061             * If the only PXP2_EOP_ERROR_BIT is set in
8062             * STS0 and STS1 - clear it
8063             *
8064             * probably we lose additional attentions between
8065             * STS0 and STS_CLR0, in this case user will not
8066             * be notified about them
8067             */
8068            if (val0 & mask0 & PXP2_EOP_ERROR_BIT &&
8069                !(val1 & mask1))
8070                val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
8071
8072            /* print the register, since no one can restore it */
8073            BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0);
8074
8075            /*
8076             * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR
8077             * then notify
8078             */
8079            if (val0 & PXP2_EOP_ERROR_BIT) {
8080                BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n");
8081		err_flg = true;
8082
8083                /*
8084                 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
8085                 * set then clear attention from PXP2 block without panic
8086                 */
8087                if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) &&
8088                    ((val1 & mask1) == 0))
8089                    attn &= ~AEU_PXP2_HW_INT_BIT;
8090            }
8091        }
8092    }
8093
8094    if (attn & HW_INTERRUT_ASSERT_SET_2) {
8095        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
8096                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
8097
8098        val = REG_RD(sc, reg_offset);
8099        val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
8100        REG_WR(sc, reg_offset, val);
8101
8102        BLOGE(sc, "FATAL HW block attention set2 0x%x\n",
8103              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2));
8104	err_flg = true;
8105        bxe_panic(sc, ("HW block attention set2\n"));
8106    }
8107    if(err_flg) {
8108        BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL);
8109        taskqueue_enqueue_timeout(taskqueue_thread,
8110           &sc->sp_err_timeout_task, hz/10);
8111    }
8112
8113}
8114
8115static void
8116bxe_attn_int_deasserted1(struct bxe_softc *sc,
8117                         uint32_t         attn)
8118{
8119    int port = SC_PORT(sc);
8120    int reg_offset;
8121    uint32_t val;
8122    bool err_flg = false;
8123
8124    if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
8125        val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
8126        BLOGE(sc, "DB hw attention 0x%08x\n", val);
8127        /* DORQ discard attention */
8128        if (val & 0x2) {
8129            BLOGE(sc, "FATAL error from DORQ\n");
8130	    err_flg = true;
8131        }
8132    }
8133
8134    if (attn & HW_INTERRUT_ASSERT_SET_1) {
8135        reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
8136                             MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
8137
8138        val = REG_RD(sc, reg_offset);
8139        val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
8140        REG_WR(sc, reg_offset, val);
8141
8142        BLOGE(sc, "FATAL HW block attention set1 0x%08x\n",
8143              (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1));
8144        err_flg = true;
8145        bxe_panic(sc, ("HW block attention set1\n"));
8146    }
8147    if(err_flg) {
8148        BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8149        taskqueue_enqueue_timeout(taskqueue_thread,
8150           &sc->sp_err_timeout_task, hz/10);
8151    }
8152
8153}
8154
8155static void
8156bxe_attn_int_deasserted0(struct bxe_softc *sc,
8157                         uint32_t         attn)
8158{
8159    int port = SC_PORT(sc);
8160    int reg_offset;
8161    uint32_t val;
8162
8163    reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
8164                          MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
8165
8166    if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
8167        val = REG_RD(sc, reg_offset);
8168        val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
8169        REG_WR(sc, reg_offset, val);
8170
8171        BLOGW(sc, "SPIO5 hw attention\n");
8172
8173        /* Fan failure attention */
8174        elink_hw_reset_phy(&sc->link_params);
8175        bxe_fan_failure(sc);
8176    }
8177
8178    if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8179	bxe_acquire_phy_lock(sc);
8180        elink_handle_module_detect_int(&sc->link_params);
8181	bxe_release_phy_lock(sc);
8182    }
8183
8184    if (attn & HW_INTERRUT_ASSERT_SET_0) {
8185        val = REG_RD(sc, reg_offset);
8186        val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
8187        REG_WR(sc, reg_offset, val);
8188
8189
8190        BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC);
8191        taskqueue_enqueue_timeout(taskqueue_thread,
8192           &sc->sp_err_timeout_task, hz/10);
8193
8194        bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n",
8195                       (attn & HW_INTERRUT_ASSERT_SET_0)));
8196    }
8197}
8198
8199static void
8200bxe_attn_int_deasserted(struct bxe_softc *sc,
8201                        uint32_t         deasserted)
8202{
8203    struct attn_route attn;
8204    struct attn_route *group_mask;
8205    int port = SC_PORT(sc);
8206    int index;
8207    uint32_t reg_addr;
8208    uint32_t val;
8209    uint32_t aeu_mask;
8210    uint8_t global = FALSE;
8211
8212    /*
8213     * Need to take HW lock because MCP or other port might also
8214     * try to handle this event.
8215     */
8216    bxe_acquire_alr(sc);
8217
8218    if (bxe_chk_parity_attn(sc, &global, TRUE)) {
8219        /* XXX
8220         * In case of parity errors don't handle attentions so that
8221         * other function would "see" parity errors.
8222         */
8223        // XXX schedule a recovery task...
8224        /* disable HW interrupts */
8225        bxe_int_disable(sc);
8226        BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY);
8227        taskqueue_enqueue_timeout(taskqueue_thread,
8228           &sc->sp_err_timeout_task, hz/10);
8229        bxe_release_alr(sc);
8230        return;
8231    }
8232
8233    attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
8234    attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
8235    attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
8236    attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
8237    if (!CHIP_IS_E1x(sc)) {
8238        attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
8239    } else {
8240        attn.sig[4] = 0;
8241    }
8242
8243    BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
8244          attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
8245
8246    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
8247        if (deasserted & (1 << index)) {
8248            group_mask = &sc->attn_group[index];
8249
8250            BLOGD(sc, DBG_INTR,
8251                  "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index,
8252                  group_mask->sig[0], group_mask->sig[1],
8253                  group_mask->sig[2], group_mask->sig[3],
8254                  group_mask->sig[4]);
8255
8256            bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8257            bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8258            bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8259            bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8260            bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8261        }
8262    }
8263
8264    bxe_release_alr(sc);
8265
8266    if (sc->devinfo.int_block == INT_BLOCK_HC) {
8267        reg_addr = (HC_REG_COMMAND_REG + port*32 +
8268                    COMMAND_REG_ATTN_BITS_CLR);
8269    } else {
8270        reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
8271    }
8272
8273    val = ~deasserted;
8274    BLOGD(sc, DBG_INTR,
8275          "about to mask 0x%08x at %s addr 0x%08x\n", val,
8276          (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8277    REG_WR(sc, reg_addr, val);
8278
8279    if (~sc->attn_state & deasserted) {
8280        BLOGE(sc, "IGU error\n");
8281    }
8282
8283    reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8284                      MISC_REG_AEU_MASK_ATTN_FUNC_0;
8285
8286    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8287
8288    aeu_mask = REG_RD(sc, reg_addr);
8289
8290    BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n",
8291          aeu_mask, deasserted);
8292    aeu_mask |= (deasserted & 0x3ff);
8293    BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask);
8294
8295    REG_WR(sc, reg_addr, aeu_mask);
8296    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
8297
8298    BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8299    sc->attn_state &= ~deasserted;
8300    BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8301}
8302
8303static void
8304bxe_attn_int(struct bxe_softc *sc)
8305{
8306    /* read local copy of bits */
8307    uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8308    uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8309    uint32_t attn_state = sc->attn_state;
8310
8311    /* look for changed bits */
8312    uint32_t asserted   =  attn_bits & ~attn_ack & ~attn_state;
8313    uint32_t deasserted = ~attn_bits &  attn_ack &  attn_state;
8314
8315    BLOGD(sc, DBG_INTR,
8316          "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n",
8317          attn_bits, attn_ack, asserted, deasserted);
8318
8319    if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
8320        BLOGE(sc, "BAD attention state\n");
8321    }
8322
8323    /* handle bits that were raised */
8324    if (asserted) {
8325        bxe_attn_int_asserted(sc, asserted);
8326    }
8327
8328    if (deasserted) {
8329        bxe_attn_int_deasserted(sc, deasserted);
8330    }
8331}
8332
8333static uint16_t
8334bxe_update_dsb_idx(struct bxe_softc *sc)
8335{
8336    struct host_sp_status_block *def_sb = sc->def_sb;
8337    uint16_t rc = 0;
8338
8339    mb(); /* status block is written to by the chip */
8340
8341    if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8342        sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8343        rc |= BXE_DEF_SB_ATT_IDX;
8344    }
8345
8346    if (sc->def_idx != def_sb->sp_sb.running_index) {
8347        sc->def_idx = def_sb->sp_sb.running_index;
8348        rc |= BXE_DEF_SB_IDX;
8349    }
8350
8351    mb();
8352
8353    return (rc);
8354}
8355
8356static inline struct ecore_queue_sp_obj *
8357bxe_cid_to_q_obj(struct bxe_softc *sc,
8358                 uint32_t         cid)
8359{
8360    BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid);
8361    return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8362}
8363
8364static void
8365bxe_handle_mcast_eqe(struct bxe_softc *sc)
8366{
8367    struct ecore_mcast_ramrod_params rparam;
8368    int rc;
8369
8370    memset(&rparam, 0, sizeof(rparam));
8371
8372    rparam.mcast_obj = &sc->mcast_obj;
8373
8374    BXE_MCAST_LOCK(sc);
8375
8376    /* clear pending state for the last command */
8377    sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8378
8379    /* if there are pending mcast commands - send them */
8380    if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8381        rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
8382        if (rc < 0) {
8383            BLOGD(sc, DBG_SP,
8384                "ERROR: Failed to send pending mcast commands (%d)\n", rc);
8385        }
8386    }
8387
8388    BXE_MCAST_UNLOCK(sc);
8389}
8390
8391static void
8392bxe_handle_classification_eqe(struct bxe_softc      *sc,
8393                              union event_ring_elem *elem)
8394{
8395    unsigned long ramrod_flags = 0;
8396    int rc = 0;
8397    uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8398    struct ecore_vlan_mac_obj *vlan_mac_obj;
8399
8400    /* always push next commands out, don't wait here */
8401    bit_set(&ramrod_flags, RAMROD_CONT);
8402
8403    switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8404    case ECORE_FILTER_MAC_PENDING:
8405        BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n");
8406        vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8407        break;
8408
8409    case ECORE_FILTER_MCAST_PENDING:
8410        BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n");
8411        /*
8412         * This is only relevant for 57710 where multicast MACs are
8413         * configured as unicast MACs using the same ramrod.
8414         */
8415        bxe_handle_mcast_eqe(sc);
8416        return;
8417
8418    default:
8419        BLOGE(sc, "Unsupported classification command: %d\n",
8420              elem->message.data.eth_event.echo);
8421        return;
8422    }
8423
8424    rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8425
8426    if (rc < 0) {
8427        BLOGE(sc, "Failed to schedule new commands (%d)\n", rc);
8428    } else if (rc > 0) {
8429        BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n");
8430    }
8431}
8432
8433static void
8434bxe_handle_rx_mode_eqe(struct bxe_softc      *sc,
8435                       union event_ring_elem *elem)
8436{
8437    bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8438
8439    /* send rx_mode command again if was requested */
8440    if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED,
8441                               &sc->sp_state)) {
8442        bxe_set_storm_rx_mode(sc);
8443    }
8444}
8445
8446static void
8447bxe_update_eq_prod(struct bxe_softc *sc,
8448                   uint16_t         prod)
8449{
8450    storm_memset_eq_prod(sc, prod, SC_FUNC(sc));
8451    wmb(); /* keep prod updates ordered */
8452}
8453
8454static void
8455bxe_eq_int(struct bxe_softc *sc)
8456{
8457    uint16_t hw_cons, sw_cons, sw_prod;
8458    union event_ring_elem *elem;
8459    uint8_t echo;
8460    uint32_t cid;
8461    uint8_t opcode;
8462    int spqe_cnt = 0;
8463    struct ecore_queue_sp_obj *q_obj;
8464    struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8465    struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8466
8467    hw_cons = le16toh(*sc->eq_cons_sb);
8468
8469    /*
8470     * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8471     * when we get to the next-page we need to adjust so the loop
8472     * condition below will be met. The next element is the size of a
8473     * regular element and hence incrementing by 1
8474     */
8475    if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) {
8476        hw_cons++;
8477    }
8478
8479    /*
8480     * This function may never run in parallel with itself for a
8481     * specific sc and no need for a read memory barrier here.
8482     */
8483    sw_cons = sc->eq_cons;
8484    sw_prod = sc->eq_prod;
8485
8486    BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n",
8487          hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8488
8489    for (;
8490         sw_cons != hw_cons;
8491         sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
8492
8493        elem = &sc->eq[EQ_DESC(sw_cons)];
8494
8495        /* elem CID originates from FW, actually LE */
8496        cid = SW_CID(elem->message.data.cfc_del_event.cid);
8497        opcode = elem->message.opcode;
8498
8499        /* handle eq element */
8500        switch (opcode) {
8501
8502        case EVENT_RING_OPCODE_STAT_QUERY:
8503            BLOGD(sc, DBG_SP, "got statistics completion event %d\n",
8504                  sc->stats_comp++);
8505            /* nothing to do with stats comp */
8506            goto next_spqe;
8507
8508        case EVENT_RING_OPCODE_CFC_DEL:
8509            /* handle according to cid range */
8510            /* we may want to verify here that the sc state is HALTING */
8511            BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid);
8512            q_obj = bxe_cid_to_q_obj(sc, cid);
8513            if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8514                break;
8515            }
8516            goto next_spqe;
8517
8518        case EVENT_RING_OPCODE_STOP_TRAFFIC:
8519            BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n");
8520            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8521                break;
8522            }
8523            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED);
8524            goto next_spqe;
8525
8526        case EVENT_RING_OPCODE_START_TRAFFIC:
8527            BLOGD(sc, DBG_SP, "got START TRAFFIC\n");
8528            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8529                break;
8530            }
8531            // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED);
8532            goto next_spqe;
8533
8534        case EVENT_RING_OPCODE_FUNCTION_UPDATE:
8535            echo = elem->message.data.function_update_event.echo;
8536            if (echo == SWITCH_UPDATE) {
8537                BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n");
8538                if (f_obj->complete_cmd(sc, f_obj,
8539                                        ECORE_F_CMD_SWITCH_UPDATE)) {
8540                    break;
8541                }
8542            }
8543            else {
8544                BLOGD(sc, DBG_SP,
8545                      "AFEX: ramrod completed FUNCTION_UPDATE\n");
8546            }
8547            goto next_spqe;
8548
8549        case EVENT_RING_OPCODE_FORWARD_SETUP:
8550            q_obj = &bxe_fwd_sp_obj(sc, q_obj);
8551            if (q_obj->complete_cmd(sc, q_obj,
8552                                    ECORE_Q_CMD_SETUP_TX_ONLY)) {
8553                break;
8554            }
8555            goto next_spqe;
8556
8557        case EVENT_RING_OPCODE_FUNCTION_START:
8558            BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n");
8559            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8560                break;
8561            }
8562            goto next_spqe;
8563
8564        case EVENT_RING_OPCODE_FUNCTION_STOP:
8565            BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n");
8566            if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8567                break;
8568            }
8569            goto next_spqe;
8570        }
8571
8572        switch (opcode | sc->state) {
8573        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN):
8574        case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT):
8575            cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8576            BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid);
8577            rss_raw->clear_pending(rss_raw);
8578            break;
8579
8580        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN):
8581        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG):
8582        case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT):
8583        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN):
8584        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG):
8585        case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8586            BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n");
8587            bxe_handle_classification_eqe(sc, elem);
8588            break;
8589
8590        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN):
8591        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG):
8592        case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8593            BLOGD(sc, DBG_SP, "got mcast ramrod\n");
8594            bxe_handle_mcast_eqe(sc);
8595            break;
8596
8597        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN):
8598        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG):
8599        case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT):
8600            BLOGD(sc, DBG_SP, "got rx_mode ramrod\n");
8601            bxe_handle_rx_mode_eqe(sc, elem);
8602            break;
8603
8604        default:
8605            /* unknown event log error and continue */
8606            BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8607                  elem->message.opcode, sc->state);
8608        }
8609
8610next_spqe:
8611        spqe_cnt++;
8612    } /* for */
8613
8614    mb();
8615    atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8616
8617    sc->eq_cons = sw_cons;
8618    sc->eq_prod = sw_prod;
8619
8620    /* make sure that above mem writes were issued towards the memory */
8621    wmb();
8622
8623    /* update producer */
8624    bxe_update_eq_prod(sc, sc->eq_prod);
8625}
8626
8627static void
8628bxe_handle_sp_tq(void *context,
8629                 int  pending)
8630{
8631    struct bxe_softc *sc = (struct bxe_softc *)context;
8632    uint16_t status;
8633
8634    BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8635
8636    /* what work needs to be performed? */
8637    status = bxe_update_dsb_idx(sc);
8638
8639    BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status);
8640
8641    /* HW attentions */
8642    if (status & BXE_DEF_SB_ATT_IDX) {
8643        BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8644        bxe_attn_int(sc);
8645        status &= ~BXE_DEF_SB_ATT_IDX;
8646    }
8647
8648    /* SP events: STAT_QUERY and others */
8649    if (status & BXE_DEF_SB_IDX) {
8650        /* handle EQ completions */
8651        BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8652        bxe_eq_int(sc);
8653        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8654                   le16toh(sc->def_idx), IGU_INT_NOP, 1);
8655        status &= ~BXE_DEF_SB_IDX;
8656    }
8657
8658    /* if status is non zero then something went wrong */
8659    if (__predict_false(status)) {
8660        BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status);
8661    }
8662
8663    /* ack status block only if something was actually handled */
8664    bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8665               le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8666
8667    /*
8668     * Must be called after the EQ processing (since eq leads to sriov
8669     * ramrod completion flows).
8670     * This flow may have been scheduled by the arrival of a ramrod
8671     * completion, or by the sriov code rescheduling itself.
8672     */
8673    // XXX bxe_iov_sp_task(sc);
8674
8675}
8676
8677static void
8678bxe_handle_fp_tq(void *context,
8679                 int  pending)
8680{
8681    struct bxe_fastpath *fp = (struct bxe_fastpath *)context;
8682    struct bxe_softc *sc = fp->sc;
8683    /* uint8_t more_tx = FALSE; */
8684    uint8_t more_rx = FALSE;
8685
8686    BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8687
8688    /* XXX
8689     * IFF_DRV_RUNNING state can't be checked here since we process
8690     * slowpath events on a client queue during setup. Instead
8691     * we need to add a "process/continue" flag here that the driver
8692     * can use to tell the task here not to do anything.
8693     */
8694#if 0
8695    if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8696        return;
8697    }
8698#endif
8699
8700    /* update the fastpath index */
8701    bxe_update_fp_sb_idx(fp);
8702
8703    /* XXX add loop here if ever support multiple tx CoS */
8704    /* fp->txdata[cos] */
8705    if (bxe_has_tx_work(fp)) {
8706        BXE_FP_TX_LOCK(fp);
8707        /* more_tx = */ bxe_txeof(sc, fp);
8708        BXE_FP_TX_UNLOCK(fp);
8709    }
8710
8711    if (bxe_has_rx_work(fp)) {
8712        more_rx = bxe_rxeof(sc, fp);
8713    }
8714
8715    if (more_rx /*|| more_tx*/) {
8716        /* still more work to do */
8717        taskqueue_enqueue(fp->tq, &fp->tq_task);
8718        return;
8719    }
8720
8721    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8722               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8723}
8724
8725static void
8726bxe_task_fp(struct bxe_fastpath *fp)
8727{
8728    struct bxe_softc *sc = fp->sc;
8729    /* uint8_t more_tx = FALSE; */
8730    uint8_t more_rx = FALSE;
8731
8732    BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8733
8734    /* update the fastpath index */
8735    bxe_update_fp_sb_idx(fp);
8736
8737    /* XXX add loop here if ever support multiple tx CoS */
8738    /* fp->txdata[cos] */
8739    if (bxe_has_tx_work(fp)) {
8740        BXE_FP_TX_LOCK(fp);
8741        /* more_tx = */ bxe_txeof(sc, fp);
8742        BXE_FP_TX_UNLOCK(fp);
8743    }
8744
8745    if (bxe_has_rx_work(fp)) {
8746        more_rx = bxe_rxeof(sc, fp);
8747    }
8748
8749    if (more_rx /*|| more_tx*/) {
8750        /* still more work to do, bail out if this ISR and process later */
8751        taskqueue_enqueue(fp->tq, &fp->tq_task);
8752        return;
8753    }
8754
8755    /*
8756     * Here we write the fastpath index taken before doing any tx or rx work.
8757     * It is very well possible other hw events occurred up to this point and
8758     * they were actually processed accordingly above. Since we're going to
8759     * write an older fastpath index, an interrupt is coming which we might
8760     * not do any work in.
8761     */
8762    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8763               le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8764}
8765
8766/*
8767 * Legacy interrupt entry point.
8768 *
8769 * Verifies that the controller generated the interrupt and
8770 * then calls a separate routine to handle the various
8771 * interrupt causes: link, RX, and TX.
8772 */
8773static void
8774bxe_intr_legacy(void *xsc)
8775{
8776    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8777    struct bxe_fastpath *fp;
8778    uint16_t status, mask;
8779    int i;
8780
8781    BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8782
8783    /*
8784     * 0 for ustorm, 1 for cstorm
8785     * the bits returned from ack_int() are 0-15
8786     * bit 0 = attention status block
8787     * bit 1 = fast path status block
8788     * a mask of 0x2 or more = tx/rx event
8789     * a mask of 1 = slow path event
8790     */
8791
8792    status = bxe_ack_int(sc);
8793
8794    /* the interrupt is not for us */
8795    if (__predict_false(status == 0)) {
8796        BLOGD(sc, DBG_INTR, "Not our interrupt!\n");
8797        return;
8798    }
8799
8800    BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status);
8801
8802    FOR_EACH_ETH_QUEUE(sc, i) {
8803        fp = &sc->fp[i];
8804        mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8805        if (status & mask) {
8806            /* acknowledge and disable further fastpath interrupts */
8807            bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8808            bxe_task_fp(fp);
8809            status &= ~mask;
8810        }
8811    }
8812
8813    if (__predict_false(status & 0x1)) {
8814        /* acknowledge and disable further slowpath interrupts */
8815        bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8816
8817        /* schedule slowpath handler */
8818        taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8819
8820        status &= ~0x1;
8821    }
8822
8823    if (__predict_false(status)) {
8824        BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status);
8825    }
8826}
8827
8828/* slowpath interrupt entry point */
8829static void
8830bxe_intr_sp(void *xsc)
8831{
8832    struct bxe_softc *sc = (struct bxe_softc *)xsc;
8833
8834    BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8835
8836    /* acknowledge and disable further slowpath interrupts */
8837    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8838
8839    /* schedule slowpath handler */
8840    taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8841}
8842
8843/* fastpath interrupt entry point */
8844static void
8845bxe_intr_fp(void *xfp)
8846{
8847    struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp;
8848    struct bxe_softc *sc = fp->sc;
8849
8850    BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8851
8852    BLOGD(sc, DBG_INTR,
8853          "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8854          curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8855
8856    /* acknowledge and disable further fastpath interrupts */
8857    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8858
8859    bxe_task_fp(fp);
8860}
8861
8862/* Release all interrupts allocated by the driver. */
8863static void
8864bxe_interrupt_free(struct bxe_softc *sc)
8865{
8866    int i;
8867
8868    switch (sc->interrupt_mode) {
8869    case INTR_MODE_INTX:
8870        BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n");
8871        if (sc->intr[0].resource != NULL) {
8872            bus_release_resource(sc->dev,
8873                                 SYS_RES_IRQ,
8874                                 sc->intr[0].rid,
8875                                 sc->intr[0].resource);
8876        }
8877        break;
8878    case INTR_MODE_MSI:
8879        for (i = 0; i < sc->intr_count; i++) {
8880            BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i);
8881            if (sc->intr[i].resource && sc->intr[i].rid) {
8882                bus_release_resource(sc->dev,
8883                                     SYS_RES_IRQ,
8884                                     sc->intr[i].rid,
8885                                     sc->intr[i].resource);
8886            }
8887        }
8888        pci_release_msi(sc->dev);
8889        break;
8890    case INTR_MODE_MSIX:
8891        for (i = 0; i < sc->intr_count; i++) {
8892            BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8893            if (sc->intr[i].resource && sc->intr[i].rid) {
8894                bus_release_resource(sc->dev,
8895                                     SYS_RES_IRQ,
8896                                     sc->intr[i].rid,
8897                                     sc->intr[i].resource);
8898            }
8899        }
8900        pci_release_msi(sc->dev);
8901        break;
8902    default:
8903        /* nothing to do as initial allocation failed */
8904        break;
8905    }
8906}
8907
8908/*
8909 * This function determines and allocates the appropriate
8910 * interrupt based on system capabilites and user request.
8911 *
8912 * The user may force a particular interrupt mode, specify
8913 * the number of receive queues, specify the method for
8914 * distribuitng received frames to receive queues, or use
8915 * the default settings which will automatically select the
8916 * best supported combination.  In addition, the OS may or
8917 * may not support certain combinations of these settings.
8918 * This routine attempts to reconcile the settings requested
8919 * by the user with the capabilites available from the system
8920 * to select the optimal combination of features.
8921 *
8922 * Returns:
8923 *   0 = Success, !0 = Failure.
8924 */
8925static int
8926bxe_interrupt_alloc(struct bxe_softc *sc)
8927{
8928    int msix_count = 0;
8929    int msi_count = 0;
8930    int num_requested = 0;
8931    int num_allocated = 0;
8932    int rid, i, j;
8933    int rc;
8934
8935    /* get the number of available MSI/MSI-X interrupts from the OS */
8936    if (sc->interrupt_mode > 0) {
8937        if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8938            msix_count = pci_msix_count(sc->dev);
8939        }
8940
8941        if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8942            msi_count = pci_msi_count(sc->dev);
8943        }
8944
8945        BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8946              msi_count, msix_count);
8947    }
8948
8949    do { /* try allocating MSI-X interrupt resources (at least 2) */
8950        if (sc->interrupt_mode != INTR_MODE_MSIX) {
8951            break;
8952        }
8953
8954        if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8955            (msix_count < 2)) {
8956            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8957            break;
8958        }
8959
8960        /* ask for the necessary number of MSI-X vectors */
8961        num_requested = min((sc->num_queues + 1), msix_count);
8962
8963        BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8964
8965        num_allocated = num_requested;
8966        if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8967            BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8968            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8969            break;
8970        }
8971
8972        if (num_allocated < 2) { /* possible? */
8973            BLOGE(sc, "MSI-X allocation less than 2!\n");
8974            sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8975            pci_release_msi(sc->dev);
8976            break;
8977        }
8978
8979        BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8980              num_requested, num_allocated);
8981
8982        /* best effort so use the number of vectors allocated to us */
8983        sc->intr_count = num_allocated;
8984        sc->num_queues = num_allocated - 1;
8985
8986        rid = 1; /* initial resource identifier */
8987
8988        /* allocate the MSI-X vectors */
8989        for (i = 0; i < num_allocated; i++) {
8990            sc->intr[i].rid = (rid + i);
8991
8992            if ((sc->intr[i].resource =
8993                 bus_alloc_resource_any(sc->dev,
8994                                        SYS_RES_IRQ,
8995                                        &sc->intr[i].rid,
8996                                        RF_ACTIVE)) == NULL) {
8997                BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8998                      i, (rid + i));
8999
9000                for (j = (i - 1); j >= 0; j--) {
9001                    bus_release_resource(sc->dev,
9002                                         SYS_RES_IRQ,
9003                                         sc->intr[j].rid,
9004                                         sc->intr[j].resource);
9005                }
9006
9007                sc->intr_count = 0;
9008                sc->num_queues = 0;
9009                sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9010                pci_release_msi(sc->dev);
9011                break;
9012            }
9013
9014            BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9015        }
9016    } while (0);
9017
9018    do { /* try allocating MSI vector resources (at least 2) */
9019        if (sc->interrupt_mode != INTR_MODE_MSI) {
9020            break;
9021        }
9022
9023        if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9024            (msi_count < 1)) {
9025            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9026            break;
9027        }
9028
9029        /* ask for a single MSI vector */
9030        num_requested = 1;
9031
9032        BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested);
9033
9034        num_allocated = num_requested;
9035        if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9036            BLOGE(sc, "MSI alloc failed (%d)!\n", rc);
9037            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9038            break;
9039        }
9040
9041        if (num_allocated != 1) { /* possible? */
9042            BLOGE(sc, "MSI allocation is not 1!\n");
9043            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9044            pci_release_msi(sc->dev);
9045            break;
9046        }
9047
9048        BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n",
9049              num_requested, num_allocated);
9050
9051        /* best effort so use the number of vectors allocated to us */
9052        sc->intr_count = num_allocated;
9053        sc->num_queues = num_allocated;
9054
9055        rid = 1; /* initial resource identifier */
9056
9057        sc->intr[0].rid = rid;
9058
9059        if ((sc->intr[0].resource =
9060             bus_alloc_resource_any(sc->dev,
9061                                    SYS_RES_IRQ,
9062                                    &sc->intr[0].rid,
9063                                    RF_ACTIVE)) == NULL) {
9064            BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid);
9065            sc->intr_count = 0;
9066            sc->num_queues = 0;
9067            sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9068            pci_release_msi(sc->dev);
9069            break;
9070        }
9071
9072        BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid);
9073    } while (0);
9074
9075    do { /* try allocating INTx vector resources */
9076        if (sc->interrupt_mode != INTR_MODE_INTX) {
9077            break;
9078        }
9079
9080        BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n");
9081
9082        /* only one vector for INTx */
9083        sc->intr_count = 1;
9084        sc->num_queues = 1;
9085
9086        rid = 0; /* initial resource identifier */
9087
9088        sc->intr[0].rid = rid;
9089
9090        if ((sc->intr[0].resource =
9091             bus_alloc_resource_any(sc->dev,
9092                                    SYS_RES_IRQ,
9093                                    &sc->intr[0].rid,
9094                                    (RF_ACTIVE | RF_SHAREABLE))) == NULL) {
9095            BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid);
9096            sc->intr_count = 0;
9097            sc->num_queues = 0;
9098            sc->interrupt_mode = -1; /* Failed! */
9099            break;
9100        }
9101
9102        BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid);
9103    } while (0);
9104
9105    if (sc->interrupt_mode == -1) {
9106        BLOGE(sc, "Interrupt Allocation: FAILED!!!\n");
9107        rc = 1;
9108    } else {
9109        BLOGD(sc, DBG_LOAD,
9110              "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n",
9111              sc->interrupt_mode, sc->num_queues);
9112        rc = 0;
9113    }
9114
9115    return (rc);
9116}
9117
9118static void
9119bxe_interrupt_detach(struct bxe_softc *sc)
9120{
9121    struct bxe_fastpath *fp;
9122    int i;
9123
9124    /* release interrupt resources */
9125    for (i = 0; i < sc->intr_count; i++) {
9126        if (sc->intr[i].resource && sc->intr[i].tag) {
9127            BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i);
9128            bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9129        }
9130    }
9131
9132    for (i = 0; i < sc->num_queues; i++) {
9133        fp = &sc->fp[i];
9134        if (fp->tq) {
9135            taskqueue_drain(fp->tq, &fp->tq_task);
9136            taskqueue_drain(fp->tq, &fp->tx_task);
9137            while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9138                NULL))
9139                taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9140        }
9141
9142        for (i = 0; i < sc->num_queues; i++) {
9143            fp = &sc->fp[i];
9144            if (fp->tq != NULL) {
9145                taskqueue_free(fp->tq);
9146                fp->tq = NULL;
9147            }
9148        }
9149    }
9150
9151    if (sc->sp_tq) {
9152        taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9153        taskqueue_free(sc->sp_tq);
9154        sc->sp_tq = NULL;
9155    }
9156}
9157
9158/*
9159 * Enables interrupts and attach to the ISR.
9160 *
9161 * When using multiple MSI/MSI-X vectors the first vector
9162 * is used for slowpath operations while all remaining
9163 * vectors are used for fastpath operations.  If only a
9164 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9165 * ISR must look for both slowpath and fastpath completions.
9166 */
9167static int
9168bxe_interrupt_attach(struct bxe_softc *sc)
9169{
9170    struct bxe_fastpath *fp;
9171    int rc = 0;
9172    int i;
9173
9174    snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9175             "bxe%d_sp_tq", sc->unit);
9176    TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9177    sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9178                                 taskqueue_thread_enqueue,
9179                                 &sc->sp_tq);
9180    taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9181                            "%s", sc->sp_tq_name);
9182
9183
9184    for (i = 0; i < sc->num_queues; i++) {
9185        fp = &sc->fp[i];
9186        snprintf(fp->tq_name, sizeof(fp->tq_name),
9187                 "bxe%d_fp%d_tq", sc->unit, i);
9188        NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9189        TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9190        fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9191                                  taskqueue_thread_enqueue,
9192                                  &fp->tq);
9193        TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9194                          bxe_tx_mq_start_deferred, fp);
9195        taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9196                                "%s", fp->tq_name);
9197    }
9198
9199    /* setup interrupt handlers */
9200    if (sc->interrupt_mode == INTR_MODE_MSIX) {
9201        BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9202
9203        /*
9204         * Setup the interrupt handler. Note that we pass the driver instance
9205         * to the interrupt handler for the slowpath.
9206         */
9207        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9208                                 (INTR_TYPE_NET | INTR_MPSAFE),
9209                                 NULL, bxe_intr_sp, sc,
9210                                 &sc->intr[0].tag)) != 0) {
9211            BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9212            goto bxe_interrupt_attach_exit;
9213        }
9214
9215        bus_describe_intr(sc->dev, sc->intr[0].resource,
9216                          sc->intr[0].tag, "sp");
9217
9218        /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9219
9220        /* initialize the fastpath vectors (note the first was used for sp) */
9221        for (i = 0; i < sc->num_queues; i++) {
9222            fp = &sc->fp[i];
9223            BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9224
9225            /*
9226             * Setup the interrupt handler. Note that we pass the
9227             * fastpath context to the interrupt handler in this
9228             * case.
9229             */
9230            if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9231                                     (INTR_TYPE_NET | INTR_MPSAFE),
9232                                     NULL, bxe_intr_fp, fp,
9233                                     &sc->intr[i + 1].tag)) != 0) {
9234                BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9235                      (i + 1), rc);
9236                goto bxe_interrupt_attach_exit;
9237            }
9238
9239            bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9240                              sc->intr[i + 1].tag, "fp%02d", i);
9241
9242            /* bind the fastpath instance to a cpu */
9243            if (sc->num_queues > 1) {
9244                bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9245            }
9246
9247            fp->state = BXE_FP_STATE_IRQ;
9248        }
9249    } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9250        BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n");
9251
9252        /*
9253         * Setup the interrupt handler. Note that we pass the
9254         * driver instance to the interrupt handler which
9255         * will handle both the slowpath and fastpath.
9256         */
9257        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9258                                 (INTR_TYPE_NET | INTR_MPSAFE),
9259                                 NULL, bxe_intr_legacy, sc,
9260                                 &sc->intr[0].tag)) != 0) {
9261            BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc);
9262            goto bxe_interrupt_attach_exit;
9263        }
9264
9265    } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9266        BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n");
9267
9268        /*
9269         * Setup the interrupt handler. Note that we pass the
9270         * driver instance to the interrupt handler which
9271         * will handle both the slowpath and fastpath.
9272         */
9273        if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9274                                 (INTR_TYPE_NET | INTR_MPSAFE),
9275                                 NULL, bxe_intr_legacy, sc,
9276                                 &sc->intr[0].tag)) != 0) {
9277            BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc);
9278            goto bxe_interrupt_attach_exit;
9279        }
9280    }
9281
9282bxe_interrupt_attach_exit:
9283
9284    return (rc);
9285}
9286
9287static int  bxe_init_hw_common_chip(struct bxe_softc *sc);
9288static int  bxe_init_hw_common(struct bxe_softc *sc);
9289static int  bxe_init_hw_port(struct bxe_softc *sc);
9290static int  bxe_init_hw_func(struct bxe_softc *sc);
9291static void bxe_reset_common(struct bxe_softc *sc);
9292static void bxe_reset_port(struct bxe_softc *sc);
9293static void bxe_reset_func(struct bxe_softc *sc);
9294static int  bxe_gunzip_init(struct bxe_softc *sc);
9295static void bxe_gunzip_end(struct bxe_softc *sc);
9296static int  bxe_init_firmware(struct bxe_softc *sc);
9297static void bxe_release_firmware(struct bxe_softc *sc);
9298
9299static struct
9300ecore_func_sp_drv_ops bxe_func_sp_drv = {
9301    .init_hw_cmn_chip = bxe_init_hw_common_chip,
9302    .init_hw_cmn      = bxe_init_hw_common,
9303    .init_hw_port     = bxe_init_hw_port,
9304    .init_hw_func     = bxe_init_hw_func,
9305
9306    .reset_hw_cmn     = bxe_reset_common,
9307    .reset_hw_port    = bxe_reset_port,
9308    .reset_hw_func    = bxe_reset_func,
9309
9310    .gunzip_init      = bxe_gunzip_init,
9311    .gunzip_end       = bxe_gunzip_end,
9312
9313    .init_fw          = bxe_init_firmware,
9314    .release_fw       = bxe_release_firmware,
9315};
9316
9317static void
9318bxe_init_func_obj(struct bxe_softc *sc)
9319{
9320    sc->dmae_ready = 0;
9321
9322    ecore_init_func_obj(sc,
9323                        &sc->func_obj,
9324                        BXE_SP(sc, func_rdata),
9325                        BXE_SP_MAPPING(sc, func_rdata),
9326                        BXE_SP(sc, func_afex_rdata),
9327                        BXE_SP_MAPPING(sc, func_afex_rdata),
9328                        &bxe_func_sp_drv);
9329}
9330
9331static int
9332bxe_init_hw(struct bxe_softc *sc,
9333            uint32_t         load_code)
9334{
9335    struct ecore_func_state_params func_params = { NULL };
9336    int rc;
9337
9338    /* prepare the parameters for function state transitions */
9339    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
9340
9341    func_params.f_obj = &sc->func_obj;
9342    func_params.cmd = ECORE_F_CMD_HW_INIT;
9343
9344    func_params.params.hw_init.load_phase = load_code;
9345
9346    /*
9347     * Via a plethora of function pointers, we will eventually reach
9348     * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func().
9349     */
9350    rc = ecore_func_state_change(sc, &func_params);
9351
9352    return (rc);
9353}
9354
9355static void
9356bxe_fill(struct bxe_softc *sc,
9357         uint32_t         addr,
9358         int              fill,
9359         uint32_t         len)
9360{
9361    uint32_t i;
9362
9363    if (!(len % 4) && !(addr % 4)) {
9364        for (i = 0; i < len; i += 4) {
9365            REG_WR(sc, (addr + i), fill);
9366        }
9367    } else {
9368        for (i = 0; i < len; i++) {
9369            REG_WR8(sc, (addr + i), fill);
9370        }
9371    }
9372}
9373
9374/* writes FP SP data to FW - data_size in dwords */
9375static void
9376bxe_wr_fp_sb_data(struct bxe_softc *sc,
9377                  int              fw_sb_id,
9378                  uint32_t         *sb_data_p,
9379                  uint32_t         data_size)
9380{
9381    int index;
9382
9383    for (index = 0; index < data_size; index++) {
9384        REG_WR(sc,
9385               (BAR_CSTRORM_INTMEM +
9386                CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
9387                (sizeof(uint32_t) * index)),
9388               *(sb_data_p + index));
9389    }
9390}
9391
9392static void
9393bxe_zero_fp_sb(struct bxe_softc *sc,
9394               int              fw_sb_id)
9395{
9396    struct hc_status_block_data_e2 sb_data_e2;
9397    struct hc_status_block_data_e1x sb_data_e1x;
9398    uint32_t *sb_data_p;
9399    uint32_t data_size = 0;
9400
9401    if (!CHIP_IS_E1x(sc)) {
9402        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9403        sb_data_e2.common.state = SB_DISABLED;
9404        sb_data_e2.common.p_func.vf_valid = FALSE;
9405        sb_data_p = (uint32_t *)&sb_data_e2;
9406        data_size = (sizeof(struct hc_status_block_data_e2) /
9407                     sizeof(uint32_t));
9408    } else {
9409        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9410        sb_data_e1x.common.state = SB_DISABLED;
9411        sb_data_e1x.common.p_func.vf_valid = FALSE;
9412        sb_data_p = (uint32_t *)&sb_data_e1x;
9413        data_size = (sizeof(struct hc_status_block_data_e1x) /
9414                     sizeof(uint32_t));
9415    }
9416
9417    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9418
9419    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)),
9420             0, CSTORM_STATUS_BLOCK_SIZE);
9421    bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)),
9422             0, CSTORM_SYNC_BLOCK_SIZE);
9423}
9424
9425static void
9426bxe_wr_sp_sb_data(struct bxe_softc               *sc,
9427                  struct hc_sp_status_block_data *sp_sb_data)
9428{
9429    int i;
9430
9431    for (i = 0;
9432         i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t));
9433         i++) {
9434        REG_WR(sc,
9435               (BAR_CSTRORM_INTMEM +
9436                CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) +
9437                (i * sizeof(uint32_t))),
9438               *((uint32_t *)sp_sb_data + i));
9439    }
9440}
9441
9442static void
9443bxe_zero_sp_sb(struct bxe_softc *sc)
9444{
9445    struct hc_sp_status_block_data sp_sb_data;
9446
9447    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9448
9449    sp_sb_data.state           = SB_DISABLED;
9450    sp_sb_data.p_func.vf_valid = FALSE;
9451
9452    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9453
9454    bxe_fill(sc,
9455             (BAR_CSTRORM_INTMEM +
9456              CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))),
9457              0, CSTORM_SP_STATUS_BLOCK_SIZE);
9458    bxe_fill(sc,
9459             (BAR_CSTRORM_INTMEM +
9460              CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))),
9461              0, CSTORM_SP_SYNC_BLOCK_SIZE);
9462}
9463
9464static void
9465bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
9466                             int                       igu_sb_id,
9467                             int                       igu_seg_id)
9468{
9469    hc_sm->igu_sb_id      = igu_sb_id;
9470    hc_sm->igu_seg_id     = igu_seg_id;
9471    hc_sm->timer_value    = 0xFF;
9472    hc_sm->time_to_expire = 0xFFFFFFFF;
9473}
9474
9475static void
9476bxe_map_sb_state_machines(struct hc_index_data *index_data)
9477{
9478    /* zero out state machine indices */
9479
9480    /* rx indices */
9481    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
9482
9483    /* tx indices */
9484    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags      &= ~HC_INDEX_DATA_SM_ID;
9485    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
9486    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
9487    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
9488
9489    /* map indices */
9490
9491    /* rx indices */
9492    index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
9493        (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9494
9495    /* tx indices */
9496    index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
9497        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9498    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
9499        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9500    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
9501        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9502    index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
9503        (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT);
9504}
9505
9506static void
9507bxe_init_sb(struct bxe_softc *sc,
9508            bus_addr_t       busaddr,
9509            int              vfid,
9510            uint8_t          vf_valid,
9511            int              fw_sb_id,
9512            int              igu_sb_id)
9513{
9514    struct hc_status_block_data_e2  sb_data_e2;
9515    struct hc_status_block_data_e1x sb_data_e1x;
9516    struct hc_status_block_sm       *hc_sm_p;
9517    uint32_t *sb_data_p;
9518    int igu_seg_id;
9519    int data_size;
9520
9521    if (CHIP_INT_MODE_IS_BC(sc)) {
9522        igu_seg_id = HC_SEG_ACCESS_NORM;
9523    } else {
9524        igu_seg_id = IGU_SEG_ACCESS_NORM;
9525    }
9526
9527    bxe_zero_fp_sb(sc, fw_sb_id);
9528
9529    if (!CHIP_IS_E1x(sc)) {
9530        memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
9531        sb_data_e2.common.state = SB_ENABLED;
9532        sb_data_e2.common.p_func.pf_id = SC_FUNC(sc);
9533        sb_data_e2.common.p_func.vf_id = vfid;
9534        sb_data_e2.common.p_func.vf_valid = vf_valid;
9535        sb_data_e2.common.p_func.vnic_id = SC_VN(sc);
9536        sb_data_e2.common.same_igu_sb_1b = TRUE;
9537        sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr);
9538        sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr);
9539        hc_sm_p = sb_data_e2.common.state_machine;
9540        sb_data_p = (uint32_t *)&sb_data_e2;
9541        data_size = (sizeof(struct hc_status_block_data_e2) /
9542                     sizeof(uint32_t));
9543        bxe_map_sb_state_machines(sb_data_e2.index_data);
9544    } else {
9545        memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x));
9546        sb_data_e1x.common.state = SB_ENABLED;
9547        sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc);
9548        sb_data_e1x.common.p_func.vf_id = 0xff;
9549        sb_data_e1x.common.p_func.vf_valid = FALSE;
9550        sb_data_e1x.common.p_func.vnic_id = SC_VN(sc);
9551        sb_data_e1x.common.same_igu_sb_1b = TRUE;
9552        sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr);
9553        sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr);
9554        hc_sm_p = sb_data_e1x.common.state_machine;
9555        sb_data_p = (uint32_t *)&sb_data_e1x;
9556        data_size = (sizeof(struct hc_status_block_data_e1x) /
9557                     sizeof(uint32_t));
9558        bxe_map_sb_state_machines(sb_data_e1x.index_data);
9559    }
9560
9561    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id);
9562    bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id);
9563
9564    BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id);
9565
9566    /* write indices to HW - PCI guarantees endianity of regpairs */
9567    bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size);
9568}
9569
9570static inline uint8_t
9571bxe_fp_qzone_id(struct bxe_fastpath *fp)
9572{
9573    if (CHIP_IS_E1x(fp->sc)) {
9574        return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9575    } else {
9576        return (fp->cl_id);
9577    }
9578}
9579
9580static inline uint32_t
9581bxe_rx_ustorm_prods_offset(struct bxe_softc    *sc,
9582                           struct bxe_fastpath *fp)
9583{
9584    uint32_t offset = BAR_USTRORM_INTMEM;
9585
9586    if (!CHIP_IS_E1x(sc)) {
9587        offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9588    } else {
9589        offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9590    }
9591
9592    return (offset);
9593}
9594
9595static void
9596bxe_init_eth_fp(struct bxe_softc *sc,
9597                int              idx)
9598{
9599    struct bxe_fastpath *fp = &sc->fp[idx];
9600    uint32_t cids[ECORE_MULTI_TX_COS] = { 0 };
9601    unsigned long q_type = 0;
9602    int cos;
9603
9604    fp->sc    = sc;
9605    fp->index = idx;
9606
9607    fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9608    fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9609
9610    fp->cl_id = (CHIP_IS_E1x(sc)) ?
9611                    (SC_L_ID(sc) + idx) :
9612                    /* want client ID same as IGU SB ID for non-E1 */
9613                    fp->igu_sb_id;
9614    fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9615
9616    /* setup sb indices */
9617    if (!CHIP_IS_E1x(sc)) {
9618        fp->sb_index_values  = fp->status_block.e2_sb->sb.index_values;
9619        fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9620    } else {
9621        fp->sb_index_values  = fp->status_block.e1x_sb->sb.index_values;
9622        fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9623    }
9624
9625    /* init shortcut */
9626    fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9627
9628    fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9629
9630    /*
9631     * XXX If multiple CoS is ever supported then each fastpath structure
9632     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
9633     */
9634    for (cos = 0; cos < sc->max_cos; cos++) {
9635        cids[cos] = idx;
9636    }
9637    fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9638
9639    /* nothing more for a VF to do */
9640    if (IS_VF(sc)) {
9641        return;
9642    }
9643
9644    bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9645                fp->fw_sb_id, fp->igu_sb_id);
9646
9647    bxe_update_fp_sb_idx(fp);
9648
9649    /* Configure Queue State object */
9650    bit_set(&q_type, ECORE_Q_TYPE_HAS_RX);
9651    bit_set(&q_type, ECORE_Q_TYPE_HAS_TX);
9652
9653    ecore_init_queue_obj(sc,
9654                         &sc->sp_objs[idx].q_obj,
9655                         fp->cl_id,
9656                         cids,
9657                         sc->max_cos,
9658                         SC_FUNC(sc),
9659                         BXE_SP(sc, q_rdata),
9660                         BXE_SP_MAPPING(sc, q_rdata),
9661                         q_type);
9662
9663    /* configure classification DBs */
9664    ecore_init_mac_obj(sc,
9665                       &sc->sp_objs[idx].mac_obj,
9666                       fp->cl_id,
9667                       idx,
9668                       SC_FUNC(sc),
9669                       BXE_SP(sc, mac_rdata),
9670                       BXE_SP_MAPPING(sc, mac_rdata),
9671                       ECORE_FILTER_MAC_PENDING,
9672                       &sc->sp_state,
9673                       ECORE_OBJ_TYPE_RX_TX,
9674                       &sc->macs_pool);
9675
9676    BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n",
9677          idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9678}
9679
9680static inline void
9681bxe_update_rx_prod(struct bxe_softc    *sc,
9682                   struct bxe_fastpath *fp,
9683                   uint16_t            rx_bd_prod,
9684                   uint16_t            rx_cq_prod,
9685                   uint16_t            rx_sge_prod)
9686{
9687    struct ustorm_eth_rx_producers rx_prods = { 0 };
9688    uint32_t i;
9689
9690    /* update producers */
9691    rx_prods.bd_prod  = rx_bd_prod;
9692    rx_prods.cqe_prod = rx_cq_prod;
9693    rx_prods.sge_prod = rx_sge_prod;
9694
9695    /*
9696     * Make sure that the BD and SGE data is updated before updating the
9697     * producers since FW might read the BD/SGE right after the producer
9698     * is updated.
9699     * This is only applicable for weak-ordered memory model archs such
9700     * as IA-64. The following barrier is also mandatory since FW will
9701     * assumes BDs must have buffers.
9702     */
9703    wmb();
9704
9705    for (i = 0; i < (sizeof(rx_prods) / 4); i++) {
9706        REG_WR(sc,
9707               (fp->ustorm_rx_prods_offset + (i * 4)),
9708               ((uint32_t *)&rx_prods)[i]);
9709    }
9710
9711    wmb(); /* keep prod updates ordered */
9712
9713    BLOGD(sc, DBG_RX,
9714          "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n",
9715          fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9716}
9717
9718static void
9719bxe_init_rx_rings(struct bxe_softc *sc)
9720{
9721    struct bxe_fastpath *fp;
9722    int i;
9723
9724    for (i = 0; i < sc->num_queues; i++) {
9725        fp = &sc->fp[i];
9726
9727        fp->rx_bd_cons = 0;
9728
9729        /*
9730         * Activate the BD ring...
9731         * Warning, this will generate an interrupt (to the TSTORM)
9732         * so this can only be done after the chip is initialized
9733         */
9734        bxe_update_rx_prod(sc, fp,
9735                           fp->rx_bd_prod,
9736                           fp->rx_cq_prod,
9737                           fp->rx_sge_prod);
9738
9739        if (i != 0) {
9740            continue;
9741        }
9742
9743        if (CHIP_IS_E1(sc)) {
9744            REG_WR(sc,
9745                   (BAR_USTRORM_INTMEM +
9746                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))),
9747                   U64_LO(fp->rcq_dma.paddr));
9748            REG_WR(sc,
9749                   (BAR_USTRORM_INTMEM +
9750                    USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4),
9751                   U64_HI(fp->rcq_dma.paddr));
9752        }
9753    }
9754}
9755
9756static void
9757bxe_init_tx_ring_one(struct bxe_fastpath *fp)
9758{
9759    SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9760    fp->tx_db.data.zero_fill1 = 0;
9761    fp->tx_db.data.prod = 0;
9762
9763    fp->tx_pkt_prod = 0;
9764    fp->tx_pkt_cons = 0;
9765    fp->tx_bd_prod = 0;
9766    fp->tx_bd_cons = 0;
9767    fp->eth_q_stats.tx_pkts = 0;
9768}
9769
9770static inline void
9771bxe_init_tx_rings(struct bxe_softc *sc)
9772{
9773    int i;
9774
9775    for (i = 0; i < sc->num_queues; i++) {
9776        bxe_init_tx_ring_one(&sc->fp[i]);
9777    }
9778}
9779
9780static void
9781bxe_init_def_sb(struct bxe_softc *sc)
9782{
9783    struct host_sp_status_block *def_sb = sc->def_sb;
9784    bus_addr_t mapping = sc->def_sb_dma.paddr;
9785    int igu_sp_sb_index;
9786    int igu_seg_id;
9787    int port = SC_PORT(sc);
9788    int func = SC_FUNC(sc);
9789    int reg_offset, reg_offset_en5;
9790    uint64_t section;
9791    int index, sindex;
9792    struct hc_sp_status_block_data sp_sb_data;
9793
9794    memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
9795
9796    if (CHIP_INT_MODE_IS_BC(sc)) {
9797        igu_sp_sb_index = DEF_SB_IGU_ID;
9798        igu_seg_id = HC_SEG_ACCESS_DEF;
9799    } else {
9800        igu_sp_sb_index = sc->igu_dsb_id;
9801        igu_seg_id = IGU_SEG_ACCESS_DEF;
9802    }
9803
9804    /* attentions */
9805    section = ((uint64_t)mapping +
9806               offsetof(struct host_sp_status_block, atten_status_block));
9807    def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9808    sc->attn_state = 0;
9809
9810    reg_offset = (port) ?
9811                     MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
9812                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
9813    reg_offset_en5 = (port) ?
9814                         MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
9815                         MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0;
9816
9817    for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
9818        /* take care of sig[0]..sig[4] */
9819        for (sindex = 0; sindex < 4; sindex++) {
9820            sc->attn_group[index].sig[sindex] =
9821                REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index)));
9822        }
9823
9824        if (!CHIP_IS_E1x(sc)) {
9825            /*
9826             * enable5 is separate from the rest of the registers,
9827             * and the address skip is 4 and not 16 between the
9828             * different groups
9829             */
9830            sc->attn_group[index].sig[4] =
9831                REG_RD(sc, (reg_offset_en5 + (0x4 * index)));
9832        } else {
9833            sc->attn_group[index].sig[4] = 0;
9834        }
9835    }
9836
9837    if (sc->devinfo.int_block == INT_BLOCK_HC) {
9838        reg_offset = (port) ?
9839                         HC_REG_ATTN_MSG1_ADDR_L :
9840                         HC_REG_ATTN_MSG0_ADDR_L;
9841        REG_WR(sc, reg_offset, U64_LO(section));
9842        REG_WR(sc, (reg_offset + 4), U64_HI(section));
9843    } else if (!CHIP_IS_E1x(sc)) {
9844        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
9845        REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
9846    }
9847
9848    section = ((uint64_t)mapping +
9849               offsetof(struct host_sp_status_block, sp_sb));
9850
9851    bxe_zero_sp_sb(sc);
9852
9853    /* PCI guarantees endianity of regpair */
9854    sp_sb_data.state           = SB_ENABLED;
9855    sp_sb_data.host_sb_addr.lo = U64_LO(section);
9856    sp_sb_data.host_sb_addr.hi = U64_HI(section);
9857    sp_sb_data.igu_sb_id       = igu_sp_sb_index;
9858    sp_sb_data.igu_seg_id      = igu_seg_id;
9859    sp_sb_data.p_func.pf_id    = func;
9860    sp_sb_data.p_func.vnic_id  = SC_VN(sc);
9861    sp_sb_data.p_func.vf_id    = 0xff;
9862
9863    bxe_wr_sp_sb_data(sc, &sp_sb_data);
9864
9865    bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9866}
9867
9868static void
9869bxe_init_sp_ring(struct bxe_softc *sc)
9870{
9871    atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9872    sc->spq_prod_idx = 0;
9873    sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9874    sc->spq_prod_bd = sc->spq;
9875    sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9876}
9877
9878static void
9879bxe_init_eq_ring(struct bxe_softc *sc)
9880{
9881    union event_ring_elem *elem;
9882    int i;
9883
9884    for (i = 1; i <= NUM_EQ_PAGES; i++) {
9885        elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9886
9887        elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9888                                                 BCM_PAGE_SIZE *
9889                                                 (i % NUM_EQ_PAGES)));
9890        elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9891                                                 BCM_PAGE_SIZE *
9892                                                 (i % NUM_EQ_PAGES)));
9893    }
9894
9895    sc->eq_cons    = 0;
9896    sc->eq_prod    = NUM_EQ_DESC;
9897    sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9898
9899    atomic_store_rel_long(&sc->eq_spq_left,
9900                          (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9901                               NUM_EQ_DESC) - 1));
9902}
9903
9904static void
9905bxe_init_internal_common(struct bxe_softc *sc)
9906{
9907    int i;
9908
9909    /*
9910     * Zero this manually as its initialization is currently missing
9911     * in the initTool.
9912     */
9913    for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) {
9914        REG_WR(sc,
9915               (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)),
9916               0);
9917    }
9918
9919    if (!CHIP_IS_E1x(sc)) {
9920        REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET),
9921                CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
9922    }
9923}
9924
9925static void
9926bxe_init_internal(struct bxe_softc *sc,
9927                  uint32_t         load_code)
9928{
9929    switch (load_code) {
9930    case FW_MSG_CODE_DRV_LOAD_COMMON:
9931    case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
9932        bxe_init_internal_common(sc);
9933        /* no break */
9934
9935    case FW_MSG_CODE_DRV_LOAD_PORT:
9936        /* nothing to do */
9937        /* no break */
9938
9939    case FW_MSG_CODE_DRV_LOAD_FUNCTION:
9940        /* internal memory per function is initialized inside bxe_pf_init */
9941        break;
9942
9943    default:
9944        BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code);
9945        break;
9946    }
9947}
9948
9949static void
9950storm_memset_func_cfg(struct bxe_softc                         *sc,
9951                      struct tstorm_eth_function_common_config *tcfg,
9952                      uint16_t                                  abs_fid)
9953{
9954    uint32_t addr;
9955    size_t size;
9956
9957    addr = (BAR_TSTRORM_INTMEM +
9958            TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid));
9959    size = sizeof(struct tstorm_eth_function_common_config);
9960    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg);
9961}
9962
9963static void
9964bxe_func_init(struct bxe_softc            *sc,
9965              struct bxe_func_init_params *p)
9966{
9967    struct tstorm_eth_function_common_config tcfg = { 0 };
9968
9969    if (CHIP_IS_E1x(sc)) {
9970        storm_memset_func_cfg(sc, &tcfg, p->func_id);
9971    }
9972
9973    /* Enable the function in the FW */
9974    storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9975    storm_memset_func_en(sc, p->func_id, 1);
9976
9977    /* spq */
9978    if (p->func_flgs & FUNC_FLG_SPQ) {
9979        storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9980        REG_WR(sc,
9981               (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9982               p->spq_prod);
9983    }
9984}
9985
9986/*
9987 * Calculates the sum of vn_min_rates.
9988 * It's needed for further normalizing of the min_rates.
9989 * Returns:
9990 *   sum of vn_min_rates.
9991 *     or
9992 *   0 - if all the min_rates are 0.
9993 * In the later case fainess algorithm should be deactivated.
9994 * If all min rates are not zero then those that are zeroes will be set to 1.
9995 */
9996static void
9997bxe_calc_vn_min(struct bxe_softc       *sc,
9998                struct cmng_init_input *input)
9999{
10000    uint32_t vn_cfg;
10001    uint32_t vn_min_rate;
10002    int all_zero = 1;
10003    int vn;
10004
10005    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10006        vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10007        vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
10008                        FUNC_MF_CFG_MIN_BW_SHIFT) * 100);
10009
10010        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10011            /* skip hidden VNs */
10012            vn_min_rate = 0;
10013        } else if (!vn_min_rate) {
10014            /* If min rate is zero - set it to 100 */
10015            vn_min_rate = DEF_MIN_RATE;
10016        } else {
10017            all_zero = 0;
10018        }
10019
10020        input->vnic_min_rate[vn] = vn_min_rate;
10021    }
10022
10023    /* if ETS or all min rates are zeros - disable fairness */
10024    if (BXE_IS_ETS_ENABLED(sc)) {
10025        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10026        BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n");
10027    } else if (all_zero) {
10028        input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10029        BLOGD(sc, DBG_LOAD,
10030              "Fariness disabled (all MIN values are zeroes)\n");
10031    } else {
10032        input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10033    }
10034}
10035
10036static inline uint16_t
10037bxe_extract_max_cfg(struct bxe_softc *sc,
10038                    uint32_t         mf_cfg)
10039{
10040    uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
10041                        FUNC_MF_CFG_MAX_BW_SHIFT);
10042
10043    if (!max_cfg) {
10044        BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10045        max_cfg = 100;
10046    }
10047
10048    return (max_cfg);
10049}
10050
10051static void
10052bxe_calc_vn_max(struct bxe_softc       *sc,
10053                int                    vn,
10054                struct cmng_init_input *input)
10055{
10056    uint16_t vn_max_rate;
10057    uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10058    uint32_t max_cfg;
10059
10060    if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
10061        vn_max_rate = 0;
10062    } else {
10063        max_cfg = bxe_extract_max_cfg(sc, vn_cfg);
10064
10065        if (IS_MF_SI(sc)) {
10066            /* max_cfg in percents of linkspeed */
10067            vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10068        } else { /* SD modes */
10069            /* max_cfg is absolute in 100Mb units */
10070            vn_max_rate = (max_cfg * 100);
10071        }
10072    }
10073
10074    BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
10075
10076    input->vnic_max_rate[vn] = vn_max_rate;
10077}
10078
10079static void
10080bxe_cmng_fns_init(struct bxe_softc *sc,
10081                  uint8_t          read_cfg,
10082                  uint8_t          cmng_type)
10083{
10084    struct cmng_init_input input;
10085    int vn;
10086
10087    memset(&input, 0, sizeof(struct cmng_init_input));
10088
10089    input.port_rate = sc->link_vars.line_speed;
10090
10091    if (cmng_type == CMNG_FNS_MINMAX) {
10092        /* read mf conf from shmem */
10093        if (read_cfg) {
10094            bxe_read_mf_cfg(sc);
10095        }
10096
10097        /* get VN min rate and enable fairness if not 0 */
10098        bxe_calc_vn_min(sc, &input);
10099
10100        /* get VN max rate */
10101        if (sc->port.pmf) {
10102            for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10103                bxe_calc_vn_max(sc, vn, &input);
10104            }
10105        }
10106
10107        /* always enable rate shaping and fairness */
10108        input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
10109
10110        ecore_init_cmng(&input, &sc->cmng);
10111        return;
10112    }
10113
10114    /* rate shaping and fairness are disabled */
10115    BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n");
10116}
10117
10118static int
10119bxe_get_cmng_fns_mode(struct bxe_softc *sc)
10120{
10121    if (CHIP_REV_IS_SLOW(sc)) {
10122        return (CMNG_FNS_NONE);
10123    }
10124
10125    if (IS_MF(sc)) {
10126        return (CMNG_FNS_MINMAX);
10127    }
10128
10129    return (CMNG_FNS_NONE);
10130}
10131
10132static void
10133storm_memset_cmng(struct bxe_softc *sc,
10134                  struct cmng_init *cmng,
10135                  uint8_t          port)
10136{
10137    int vn;
10138    int func;
10139    uint32_t addr;
10140    size_t size;
10141
10142    addr = (BAR_XSTRORM_INTMEM +
10143            XSTORM_CMNG_PER_PORT_VARS_OFFSET(port));
10144    size = sizeof(struct cmng_struct_per_port);
10145    ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10146
10147    for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) {
10148        func = func_by_vn(sc, vn);
10149
10150        addr = (BAR_XSTRORM_INTMEM +
10151                XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func));
10152        size = sizeof(struct rate_shaping_vars_per_vn);
10153        ecore_storm_memset_struct(sc, addr, size,
10154                                  (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10155
10156        addr = (BAR_XSTRORM_INTMEM +
10157                XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func));
10158        size = sizeof(struct fairness_vars_per_vn);
10159        ecore_storm_memset_struct(sc, addr, size,
10160                                  (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10161    }
10162}
10163
10164static void
10165bxe_pf_init(struct bxe_softc *sc)
10166{
10167    struct bxe_func_init_params func_init = { 0 };
10168    struct event_ring_data eq_data = { { 0 } };
10169    uint16_t flags;
10170
10171    if (!CHIP_IS_E1x(sc)) {
10172        /* reset IGU PF statistics: MSIX + ATTN */
10173        /* PF */
10174        REG_WR(sc,
10175               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10176                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10177                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10178               0);
10179        /* ATTN */
10180        REG_WR(sc,
10181               (IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
10182                (BXE_IGU_STAS_MSG_VF_CNT * 4) +
10183                (BXE_IGU_STAS_MSG_PF_CNT * 4) +
10184                ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)),
10185               0);
10186    }
10187
10188    /* function setup flags */
10189    flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
10190
10191    /*
10192     * This flag is relevant for E1x only.
10193     * E2 doesn't have a TPA configuration in a function level.
10194     */
10195    flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10196
10197    func_init.func_flgs = flags;
10198    func_init.pf_id     = SC_FUNC(sc);
10199    func_init.func_id   = SC_FUNC(sc);
10200    func_init.spq_map   = sc->spq_dma.paddr;
10201    func_init.spq_prod  = sc->spq_prod_idx;
10202
10203    bxe_func_init(sc, &func_init);
10204
10205    memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10206
10207    /*
10208     * Congestion management values depend on the link rate.
10209     * There is no active link so initial link rate is set to 10Gbps.
10210     * When the link comes up the congestion management values are
10211     * re-calculated according to the actual link rate.
10212     */
10213    sc->link_vars.line_speed = SPEED_10000;
10214    bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc));
10215
10216    /* Only the PMF sets the HW */
10217    if (sc->port.pmf) {
10218        storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10219    }
10220
10221    /* init Event Queue - PCI bus guarantees correct endainity */
10222    eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10223    eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10224    eq_data.producer     = sc->eq_prod;
10225    eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
10226    eq_data.sb_id        = DEF_SB_ID;
10227    storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc));
10228}
10229
10230static void
10231bxe_hc_int_enable(struct bxe_softc *sc)
10232{
10233    int port = SC_PORT(sc);
10234    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10235    uint32_t val = REG_RD(sc, addr);
10236    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10237    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10238                           (sc->intr_count == 1)) ? TRUE : FALSE;
10239    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10240
10241    if (msix) {
10242        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10243                 HC_CONFIG_0_REG_INT_LINE_EN_0);
10244        val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10245                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10246        if (single_msix) {
10247            val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
10248        }
10249    } else if (msi) {
10250        val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
10251        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10252                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10253                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10254    } else {
10255        val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10256                HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10257                HC_CONFIG_0_REG_INT_LINE_EN_0 |
10258                HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10259
10260        if (!CHIP_IS_E1(sc)) {
10261            BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n",
10262                  val, port, addr);
10263
10264            REG_WR(sc, addr, val);
10265
10266            val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
10267        }
10268    }
10269
10270    if (CHIP_IS_E1(sc)) {
10271        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF);
10272    }
10273
10274    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
10275          val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10276
10277    REG_WR(sc, addr, val);
10278
10279    /* ensure that HC_CONFIG is written before leading/trailing edge config */
10280    mb();
10281
10282    if (!CHIP_IS_E1(sc)) {
10283        /* init leading/trailing edge */
10284        if (IS_MF(sc)) {
10285            val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10286            if (sc->port.pmf) {
10287                /* enable nig and gpio3 attention */
10288                val |= 0x1100;
10289            }
10290        } else {
10291            val = 0xffff;
10292        }
10293
10294        REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val);
10295        REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val);
10296    }
10297
10298    /* make sure that interrupts are indeed enabled from here on */
10299    mb();
10300}
10301
10302static void
10303bxe_igu_int_enable(struct bxe_softc *sc)
10304{
10305    uint32_t val;
10306    uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10307    uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10308                           (sc->intr_count == 1)) ? TRUE : FALSE;
10309    uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10310
10311    val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10312
10313    if (msix) {
10314        val &= ~(IGU_PF_CONF_INT_LINE_EN |
10315                 IGU_PF_CONF_SINGLE_ISR_EN);
10316        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10317                IGU_PF_CONF_ATTN_BIT_EN);
10318        if (single_msix) {
10319            val |= IGU_PF_CONF_SINGLE_ISR_EN;
10320        }
10321    } else if (msi) {
10322        val &= ~IGU_PF_CONF_INT_LINE_EN;
10323        val |= (IGU_PF_CONF_MSI_MSIX_EN |
10324                IGU_PF_CONF_ATTN_BIT_EN |
10325                IGU_PF_CONF_SINGLE_ISR_EN);
10326    } else {
10327        val &= ~IGU_PF_CONF_MSI_MSIX_EN;
10328        val |= (IGU_PF_CONF_INT_LINE_EN |
10329                IGU_PF_CONF_ATTN_BIT_EN |
10330                IGU_PF_CONF_SINGLE_ISR_EN);
10331    }
10332
10333    /* clean previous status - need to configure igu prior to ack*/
10334    if ((!msix) || single_msix) {
10335        REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10336        bxe_ack_int(sc);
10337    }
10338
10339    val |= IGU_PF_CONF_FUNC_EN;
10340
10341    BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n",
10342          val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10343
10344    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10345
10346    mb();
10347
10348    /* init leading/trailing edge */
10349    if (IS_MF(sc)) {
10350        val = (0xee0f | (1 << (SC_VN(sc) + 4)));
10351        if (sc->port.pmf) {
10352            /* enable nig and gpio3 attention */
10353            val |= 0x1100;
10354        }
10355    } else {
10356        val = 0xffff;
10357    }
10358
10359    REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val);
10360    REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val);
10361
10362    /* make sure that interrupts are indeed enabled from here on */
10363    mb();
10364}
10365
10366static void
10367bxe_int_enable(struct bxe_softc *sc)
10368{
10369    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10370        bxe_hc_int_enable(sc);
10371    } else {
10372        bxe_igu_int_enable(sc);
10373    }
10374}
10375
10376static void
10377bxe_hc_int_disable(struct bxe_softc *sc)
10378{
10379    int port = SC_PORT(sc);
10380    uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
10381    uint32_t val = REG_RD(sc, addr);
10382
10383    /*
10384     * In E1 we must use only PCI configuration space to disable MSI/MSIX
10385     * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC
10386     * block
10387     */
10388    if (CHIP_IS_E1(sc)) {
10389        /*
10390         * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register
10391         * to prevent from HC sending interrupts after we exit the function
10392         */
10393        REG_WR(sc, (HC_REG_INT_MASK + port*4), 0);
10394
10395        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10396                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10397                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10398    } else {
10399        val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
10400                 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
10401                 HC_CONFIG_0_REG_INT_LINE_EN_0 |
10402                 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
10403    }
10404
10405    BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr);
10406
10407    /* flush all outstanding writes */
10408    mb();
10409
10410    REG_WR(sc, addr, val);
10411    if (REG_RD(sc, addr) != val) {
10412        BLOGE(sc, "proper val not read from HC IGU!\n");
10413    }
10414}
10415
10416static void
10417bxe_igu_int_disable(struct bxe_softc *sc)
10418{
10419    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
10420
10421    val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
10422             IGU_PF_CONF_INT_LINE_EN |
10423             IGU_PF_CONF_ATTN_BIT_EN);
10424
10425    BLOGD(sc, DBG_INTR, "write %x to IGU\n", val);
10426
10427    /* flush all outstanding writes */
10428    mb();
10429
10430    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
10431    if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
10432        BLOGE(sc, "proper val not read from IGU!\n");
10433    }
10434}
10435
10436static void
10437bxe_int_disable(struct bxe_softc *sc)
10438{
10439    if (sc->devinfo.int_block == INT_BLOCK_HC) {
10440        bxe_hc_int_disable(sc);
10441    } else {
10442        bxe_igu_int_disable(sc);
10443    }
10444}
10445
10446static void
10447bxe_nic_init(struct bxe_softc *sc,
10448             int              load_code)
10449{
10450    int i;
10451
10452    for (i = 0; i < sc->num_queues; i++) {
10453        bxe_init_eth_fp(sc, i);
10454    }
10455
10456    rmb(); /* ensure status block indices were read */
10457
10458    bxe_init_rx_rings(sc);
10459    bxe_init_tx_rings(sc);
10460
10461    if (IS_VF(sc)) {
10462        return;
10463    }
10464
10465    /* initialize MOD_ABS interrupts */
10466    elink_init_mod_abs_int(sc, &sc->link_vars,
10467                           sc->devinfo.chip_id,
10468                           sc->devinfo.shmem_base,
10469                           sc->devinfo.shmem2_base,
10470                           SC_PORT(sc));
10471
10472    bxe_init_def_sb(sc);
10473    bxe_update_dsb_idx(sc);
10474    bxe_init_sp_ring(sc);
10475    bxe_init_eq_ring(sc);
10476    bxe_init_internal(sc, load_code);
10477    bxe_pf_init(sc);
10478    bxe_stats_init(sc);
10479
10480    /* flush all before enabling interrupts */
10481    mb();
10482
10483    bxe_int_enable(sc);
10484
10485    /* check for SPIO5 */
10486    bxe_attn_int_deasserted0(sc,
10487                             REG_RD(sc,
10488                                    (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
10489                                     SC_PORT(sc)*4)) &
10490                             AEU_INPUTS_ATTN_BITS_SPIO5);
10491}
10492
10493static inline void
10494bxe_init_objs(struct bxe_softc *sc)
10495{
10496    /* mcast rules must be added to tx if tx switching is enabled */
10497    ecore_obj_type o_type =
10498        (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10499                                         ECORE_OBJ_TYPE_RX;
10500
10501    /* RX_MODE controlling object */
10502    ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10503
10504    /* multicast configuration controlling object */
10505    ecore_init_mcast_obj(sc,
10506                         &sc->mcast_obj,
10507                         sc->fp[0].cl_id,
10508                         sc->fp[0].index,
10509                         SC_FUNC(sc),
10510                         SC_FUNC(sc),
10511                         BXE_SP(sc, mcast_rdata),
10512                         BXE_SP_MAPPING(sc, mcast_rdata),
10513                         ECORE_FILTER_MCAST_PENDING,
10514                         &sc->sp_state,
10515                         o_type);
10516
10517    /* Setup CAM credit pools */
10518    ecore_init_mac_credit_pool(sc,
10519                               &sc->macs_pool,
10520                               SC_FUNC(sc),
10521                               CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10522                                                 VNICS_PER_PATH(sc));
10523
10524    ecore_init_vlan_credit_pool(sc,
10525                                &sc->vlans_pool,
10526                                SC_ABS_FUNC(sc) >> 1,
10527                                CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) :
10528                                                  VNICS_PER_PATH(sc));
10529
10530    /* RSS configuration object */
10531    ecore_init_rss_config_obj(sc,
10532                              &sc->rss_conf_obj,
10533                              sc->fp[0].cl_id,
10534                              sc->fp[0].index,
10535                              SC_FUNC(sc),
10536                              SC_FUNC(sc),
10537                              BXE_SP(sc, rss_rdata),
10538                              BXE_SP_MAPPING(sc, rss_rdata),
10539                              ECORE_FILTER_RSS_CONF_PENDING,
10540                              &sc->sp_state, ECORE_OBJ_TYPE_RX);
10541}
10542
10543/*
10544 * Initialize the function. This must be called before sending CLIENT_SETUP
10545 * for the first client.
10546 */
10547static inline int
10548bxe_func_start(struct bxe_softc *sc)
10549{
10550    struct ecore_func_state_params func_params = { NULL };
10551    struct ecore_func_start_params *start_params = &func_params.params.start;
10552
10553    /* Prepare parameters for function state transitions */
10554    bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT);
10555
10556    func_params.f_obj = &sc->func_obj;
10557    func_params.cmd = ECORE_F_CMD_START;
10558
10559    /* Function parameters */
10560    start_params->mf_mode     = sc->devinfo.mf_info.mf_mode;
10561    start_params->sd_vlan_tag = OVLAN(sc);
10562
10563    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
10564        start_params->network_cos_mode = STATIC_COS;
10565    } else { /* CHIP_IS_E1X */
10566        start_params->network_cos_mode = FW_WRR;
10567    }
10568
10569    //start_params->gre_tunnel_mode = 0;
10570    //start_params->gre_tunnel_rss  = 0;
10571
10572    return (ecore_func_state_change(sc, &func_params));
10573}
10574
10575static int
10576bxe_set_power_state(struct bxe_softc *sc,
10577                    uint8_t          state)
10578{
10579    uint16_t pmcsr;
10580
10581    /* If there is no power capability, silently succeed */
10582    if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10583        BLOGW(sc, "No power capability\n");
10584        return (0);
10585    }
10586
10587    pmcsr = pci_read_config(sc->dev,
10588                            (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10589                            2);
10590
10591    switch (state) {
10592    case PCI_PM_D0:
10593        pci_write_config(sc->dev,
10594                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10595                         ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2);
10596
10597        if (pmcsr & PCIM_PSTAT_DMASK) {
10598            /* delay required during transition out of D3hot */
10599            DELAY(20000);
10600        }
10601
10602        break;
10603
10604    case PCI_PM_D3hot:
10605        /* XXX if there are other clients above don't shut down the power */
10606
10607        /* don't shut down the power for emulation and FPGA */
10608        if (CHIP_REV_IS_SLOW(sc)) {
10609            return (0);
10610        }
10611
10612        pmcsr &= ~PCIM_PSTAT_DMASK;
10613        pmcsr |= PCIM_PSTAT_D3;
10614
10615        if (sc->wol) {
10616            pmcsr |= PCIM_PSTAT_PMEENABLE;
10617        }
10618
10619        pci_write_config(sc->dev,
10620                         (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10621                         pmcsr, 4);
10622
10623        /*
10624         * No more memory access after this point until device is brought back
10625         * to D0 state.
10626         */
10627        break;
10628
10629    default:
10630        BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n",
10631            state, pmcsr);
10632        return (-1);
10633    }
10634
10635    return (0);
10636}
10637
10638
10639/* return true if succeeded to acquire the lock */
10640static uint8_t
10641bxe_trylock_hw_lock(struct bxe_softc *sc,
10642                    uint32_t         resource)
10643{
10644    uint32_t lock_status;
10645    uint32_t resource_bit = (1 << resource);
10646    int func = SC_FUNC(sc);
10647    uint32_t hw_lock_control_reg;
10648
10649    BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource);
10650
10651    /* Validating that the resource is within range */
10652    if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
10653        BLOGD(sc, DBG_LOAD,
10654              "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
10655              resource, HW_LOCK_MAX_RESOURCE_VALUE);
10656        return (FALSE);
10657    }
10658
10659    if (func <= 5) {
10660        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
10661    } else {
10662        hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10663    }
10664
10665    /* try to acquire the lock */
10666    REG_WR(sc, hw_lock_control_reg + 4, resource_bit);
10667    lock_status = REG_RD(sc, hw_lock_control_reg);
10668    if (lock_status & resource_bit) {
10669        return (TRUE);
10670    }
10671
10672    BLOGE(sc, "Failed to get a resource lock 0x%x func %d "
10673        "lock_status 0x%x resource_bit 0x%x\n", resource, func,
10674        lock_status, resource_bit);
10675
10676    return (FALSE);
10677}
10678
10679/*
10680 * Get the recovery leader resource id according to the engine this function
10681 * belongs to. Currently only only 2 engines is supported.
10682 */
10683static int
10684bxe_get_leader_lock_resource(struct bxe_softc *sc)
10685{
10686    if (SC_PATH(sc)) {
10687        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1);
10688    } else {
10689        return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0);
10690    }
10691}
10692
10693/* try to acquire a leader lock for current engine */
10694static uint8_t
10695bxe_trylock_leader_lock(struct bxe_softc *sc)
10696{
10697    return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10698}
10699
10700static int
10701bxe_release_leader_lock(struct bxe_softc *sc)
10702{
10703    return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc)));
10704}
10705
10706/* close gates #2, #3 and #4 */
10707static void
10708bxe_set_234_gates(struct bxe_softc *sc,
10709                  uint8_t          close)
10710{
10711    uint32_t val;
10712
10713    /* gates #2 and #4a are closed/opened for "not E1" only */
10714    if (!CHIP_IS_E1(sc)) {
10715        /* #4 */
10716        REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
10717        /* #2 */
10718        REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
10719    }
10720
10721    /* #3 */
10722    if (CHIP_IS_E1x(sc)) {
10723        /* prevent interrupts from HC on both ports */
10724        val = REG_RD(sc, HC_REG_CONFIG_1);
10725        REG_WR(sc, HC_REG_CONFIG_1,
10726               (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
10727               (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
10728
10729        val = REG_RD(sc, HC_REG_CONFIG_0);
10730        REG_WR(sc, HC_REG_CONFIG_0,
10731               (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
10732               (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
10733    } else {
10734        /* Prevent incoming interrupts in IGU */
10735        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
10736
10737        REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION,
10738               (!close) ?
10739               (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
10740               (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
10741    }
10742
10743    BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n",
10744          close ? "closing" : "opening");
10745
10746    wmb();
10747}
10748
10749/* poll for pending writes bit, it should get cleared in no more than 1s */
10750static int
10751bxe_er_poll_igu_vq(struct bxe_softc *sc)
10752{
10753    uint32_t cnt = 1000;
10754    uint32_t pend_bits = 0;
10755
10756    do {
10757        pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS);
10758
10759        if (pend_bits == 0) {
10760            break;
10761        }
10762
10763        DELAY(1000);
10764    } while (--cnt > 0);
10765
10766    if (cnt == 0) {
10767        BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits);
10768        return (-1);
10769    }
10770
10771    return (0);
10772}
10773
10774#define SHARED_MF_CLP_MAGIC  0x80000000 /* 'magic' bit */
10775
10776static void
10777bxe_clp_reset_prep(struct bxe_softc *sc,
10778                   uint32_t         *magic_val)
10779{
10780    /* Do some magic... */
10781    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10782    *magic_val = val & SHARED_MF_CLP_MAGIC;
10783    MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
10784}
10785
10786/* restore the value of the 'magic' bit */
10787static void
10788bxe_clp_reset_done(struct bxe_softc *sc,
10789                   uint32_t         magic_val)
10790{
10791    /* Restore the 'magic' bit value... */
10792    uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb);
10793    MFCFG_WR(sc, shared_mf_config.clp_mb,
10794              (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
10795}
10796
10797/* prepare for MCP reset, takes care of CLP configurations */
10798static void
10799bxe_reset_mcp_prep(struct bxe_softc *sc,
10800                   uint32_t         *magic_val)
10801{
10802    uint32_t shmem;
10803    uint32_t validity_offset;
10804
10805    /* set `magic' bit in order to save MF config */
10806    if (!CHIP_IS_E1(sc)) {
10807        bxe_clp_reset_prep(sc, magic_val);
10808    }
10809
10810    /* get shmem offset */
10811    shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10812    validity_offset =
10813        offsetof(struct shmem_region, validity_map[SC_PORT(sc)]);
10814
10815    /* Clear validity map flags */
10816    if (shmem > 0) {
10817        REG_WR(sc, shmem + validity_offset, 0);
10818    }
10819}
10820
10821#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
10822#define MCP_ONE_TIMEOUT  100    /* 100 ms */
10823
10824static void
10825bxe_mcp_wait_one(struct bxe_softc *sc)
10826{
10827    /* special handling for emulation and FPGA (10 times longer) */
10828    if (CHIP_REV_IS_SLOW(sc)) {
10829        DELAY((MCP_ONE_TIMEOUT*10) * 1000);
10830    } else {
10831        DELAY((MCP_ONE_TIMEOUT) * 1000);
10832    }
10833}
10834
10835/* initialize shmem_base and waits for validity signature to appear */
10836static int
10837bxe_init_shmem(struct bxe_softc *sc)
10838{
10839    int cnt = 0;
10840    uint32_t val = 0;
10841
10842    do {
10843        sc->devinfo.shmem_base     =
10844        sc->link_params.shmem_base =
10845            REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
10846
10847        if (sc->devinfo.shmem_base) {
10848            val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
10849            if (val & SHR_MEM_VALIDITY_MB)
10850                return (0);
10851        }
10852
10853        bxe_mcp_wait_one(sc);
10854
10855    } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
10856
10857    BLOGE(sc, "BAD MCP validity signature\n");
10858
10859    return (-1);
10860}
10861
10862static int
10863bxe_reset_mcp_comp(struct bxe_softc *sc,
10864                   uint32_t         magic_val)
10865{
10866    int rc = bxe_init_shmem(sc);
10867
10868    /* Restore the `magic' bit value */
10869    if (!CHIP_IS_E1(sc)) {
10870        bxe_clp_reset_done(sc, magic_val);
10871    }
10872
10873    return (rc);
10874}
10875
10876static void
10877bxe_pxp_prep(struct bxe_softc *sc)
10878{
10879    if (!CHIP_IS_E1(sc)) {
10880        REG_WR(sc, PXP2_REG_RD_START_INIT, 0);
10881        REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0);
10882        wmb();
10883    }
10884}
10885
10886/*
10887 * Reset the whole chip except for:
10888 *      - PCIE core
10889 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10890 *      - IGU
10891 *      - MISC (including AEU)
10892 *      - GRC
10893 *      - RBCN, RBCP
10894 */
10895static void
10896bxe_process_kill_chip_reset(struct bxe_softc *sc,
10897                            uint8_t          global)
10898{
10899    uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
10900    uint32_t global_bits2, stay_reset2;
10901
10902    /*
10903     * Bits that have to be set in reset_mask2 if we want to reset 'global'
10904     * (per chip) blocks.
10905     */
10906    global_bits2 =
10907        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
10908        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
10909
10910    /*
10911     * Don't reset the following blocks.
10912     * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
10913     *            reset, as in 4 port device they might still be owned
10914     *            by the MCP (there is only one leader per path).
10915     */
10916    not_reset_mask1 =
10917        MISC_REGISTERS_RESET_REG_1_RST_HC |
10918        MISC_REGISTERS_RESET_REG_1_RST_PXPV |
10919        MISC_REGISTERS_RESET_REG_1_RST_PXP;
10920
10921    not_reset_mask2 =
10922        MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
10923        MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
10924        MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
10925        MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
10926        MISC_REGISTERS_RESET_REG_2_RST_RBCN |
10927        MISC_REGISTERS_RESET_REG_2_RST_GRC  |
10928        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
10929        MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
10930        MISC_REGISTERS_RESET_REG_2_RST_ATC |
10931        MISC_REGISTERS_RESET_REG_2_PGLC |
10932        MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
10933        MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
10934        MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
10935        MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
10936        MISC_REGISTERS_RESET_REG_2_UMAC0 |
10937        MISC_REGISTERS_RESET_REG_2_UMAC1;
10938
10939    /*
10940     * Keep the following blocks in reset:
10941     *  - all xxMACs are handled by the elink code.
10942     */
10943    stay_reset2 =
10944        MISC_REGISTERS_RESET_REG_2_XMAC |
10945        MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
10946
10947    /* Full reset masks according to the chip */
10948    reset_mask1 = 0xffffffff;
10949
10950    if (CHIP_IS_E1(sc))
10951        reset_mask2 = 0xffff;
10952    else if (CHIP_IS_E1H(sc))
10953        reset_mask2 = 0x1ffff;
10954    else if (CHIP_IS_E2(sc))
10955        reset_mask2 = 0xfffff;
10956    else /* CHIP_IS_E3 */
10957        reset_mask2 = 0x3ffffff;
10958
10959    /* Don't reset global blocks unless we need to */
10960    if (!global)
10961        reset_mask2 &= ~global_bits2;
10962
10963    /*
10964     * In case of attention in the QM, we need to reset PXP
10965     * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
10966     * because otherwise QM reset would release 'close the gates' shortly
10967     * before resetting the PXP, then the PSWRQ would send a write
10968     * request to PGLUE. Then when PXP is reset, PGLUE would try to
10969     * read the payload data from PSWWR, but PSWWR would not
10970     * respond. The write queue in PGLUE would stuck, dmae commands
10971     * would not return. Therefore it's important to reset the second
10972     * reset register (containing the
10973     * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
10974     * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
10975     * bit).
10976     */
10977    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
10978           reset_mask2 & (~not_reset_mask2));
10979
10980    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
10981           reset_mask1 & (~not_reset_mask1));
10982
10983    mb();
10984    wmb();
10985
10986    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
10987           reset_mask2 & (~stay_reset2));
10988
10989    mb();
10990    wmb();
10991
10992    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
10993    wmb();
10994}
10995
10996static int
10997bxe_process_kill(struct bxe_softc *sc,
10998                 uint8_t          global)
10999{
11000    int cnt = 1000;
11001    uint32_t val = 0;
11002    uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
11003    uint32_t tags_63_32 = 0;
11004
11005    /* Empty the Tetris buffer, wait for 1s */
11006    do {
11007        sr_cnt  = REG_RD(sc, PXP2_REG_RD_SR_CNT);
11008        blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT);
11009        port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0);
11010        port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1);
11011        pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2);
11012        if (CHIP_IS_E3(sc)) {
11013            tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32);
11014        }
11015
11016        if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
11017            ((port_is_idle_0 & 0x1) == 0x1) &&
11018            ((port_is_idle_1 & 0x1) == 0x1) &&
11019            (pgl_exp_rom2 == 0xffffffff) &&
11020            (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff)))
11021            break;
11022        DELAY(1000);
11023    } while (cnt-- > 0);
11024
11025    if (cnt <= 0) {
11026        BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there "
11027                  "are still outstanding read requests after 1s! "
11028                  "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
11029                  "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
11030              sr_cnt, blk_cnt, port_is_idle_0,
11031              port_is_idle_1, pgl_exp_rom2);
11032        return (-1);
11033    }
11034
11035    mb();
11036
11037    /* Close gates #2, #3 and #4 */
11038    bxe_set_234_gates(sc, TRUE);
11039
11040    /* Poll for IGU VQs for 57712 and newer chips */
11041    if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) {
11042        return (-1);
11043    }
11044
11045    /* XXX indicate that "process kill" is in progress to MCP */
11046
11047    /* clear "unprepared" bit */
11048    REG_WR(sc, MISC_REG_UNPREPARED, 0);
11049    mb();
11050
11051    /* Make sure all is written to the chip before the reset */
11052    wmb();
11053
11054    /*
11055     * Wait for 1ms to empty GLUE and PCI-E core queues,
11056     * PSWHST, GRC and PSWRD Tetris buffer.
11057     */
11058    DELAY(1000);
11059
11060    /* Prepare to chip reset: */
11061    /* MCP */
11062    if (global) {
11063        bxe_reset_mcp_prep(sc, &val);
11064    }
11065
11066    /* PXP */
11067    bxe_pxp_prep(sc);
11068    mb();
11069
11070    /* reset the chip */
11071    bxe_process_kill_chip_reset(sc, global);
11072    mb();
11073
11074    /* clear errors in PGB */
11075    if (!CHIP_IS_E1(sc))
11076        REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
11077
11078    /* Recover after reset: */
11079    /* MCP */
11080    if (global && bxe_reset_mcp_comp(sc, val)) {
11081        return (-1);
11082    }
11083
11084    /* XXX add resetting the NO_MCP mode DB here */
11085
11086    /* Open the gates #2, #3 and #4 */
11087    bxe_set_234_gates(sc, FALSE);
11088
11089    /* XXX
11090     * IGU/AEU preparation bring back the AEU/IGU to a reset state
11091     * re-enable attentions
11092     */
11093
11094    return (0);
11095}
11096
11097static int
11098bxe_leader_reset(struct bxe_softc *sc)
11099{
11100    int rc = 0;
11101    uint8_t global = bxe_reset_is_global(sc);
11102    uint32_t load_code;
11103
11104    /*
11105     * If not going to reset MCP, load "fake" driver to reset HW while
11106     * driver is owner of the HW.
11107     */
11108    if (!global && !BXE_NOMCP(sc)) {
11109        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
11110                                   DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
11111        if (!load_code) {
11112            BLOGE(sc, "MCP response failure, aborting\n");
11113            rc = -1;
11114            goto exit_leader_reset;
11115        }
11116
11117        if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
11118            (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
11119            BLOGE(sc, "MCP unexpected response, aborting\n");
11120            rc = -1;
11121            goto exit_leader_reset2;
11122        }
11123
11124        load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
11125        if (!load_code) {
11126            BLOGE(sc, "MCP response failure, aborting\n");
11127            rc = -1;
11128            goto exit_leader_reset2;
11129        }
11130    }
11131
11132    /* try to recover after the failure */
11133    if (bxe_process_kill(sc, global)) {
11134        BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc));
11135        rc = -1;
11136        goto exit_leader_reset2;
11137    }
11138
11139    /*
11140     * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver
11141     * state.
11142     */
11143    bxe_set_reset_done(sc);
11144    if (global) {
11145        bxe_clear_reset_global(sc);
11146    }
11147
11148exit_leader_reset2:
11149
11150    /* unload "fake driver" if it was loaded */
11151    if (!global && !BXE_NOMCP(sc)) {
11152        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
11153        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
11154    }
11155
11156exit_leader_reset:
11157
11158    sc->is_leader = 0;
11159    bxe_release_leader_lock(sc);
11160
11161    mb();
11162    return (rc);
11163}
11164
11165/*
11166 * prepare INIT transition, parameters configured:
11167 *   - HC configuration
11168 *   - Queue's CDU context
11169 */
11170static void
11171bxe_pf_q_prep_init(struct bxe_softc               *sc,
11172                   struct bxe_fastpath            *fp,
11173                   struct ecore_queue_init_params *init_params)
11174{
11175    uint8_t cos;
11176    int cxt_index, cxt_offset;
11177
11178    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11179    bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11180
11181    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11182    bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11183
11184    /* HC rate */
11185    init_params->rx.hc_rate =
11186        sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11187    init_params->tx.hc_rate =
11188        sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11189
11190    /* FW SB ID */
11191    init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11192
11193    /* CQ index among the SB indices */
11194    init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11195    init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11196
11197    /* set maximum number of COSs supported by this queue */
11198    init_params->max_cos = sc->max_cos;
11199
11200    BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n",
11201          fp->index, init_params->max_cos);
11202
11203    /* set the context pointers queue object */
11204    for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11205        /* XXX change index/cid here if ever support multiple tx CoS */
11206        /* fp->txdata[cos]->cid */
11207        cxt_index = fp->index / ILT_PAGE_CIDS;
11208        cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11209        init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11210    }
11211}
11212
11213/* set flags that are common for the Tx-only and not normal connections */
11214static unsigned long
11215bxe_get_common_flags(struct bxe_softc    *sc,
11216                     struct bxe_fastpath *fp,
11217                     uint8_t             zero_stats)
11218{
11219    unsigned long flags = 0;
11220
11221    /* PF driver will always initialize the Queue to an ACTIVE state */
11222    bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags);
11223
11224    /*
11225     * tx only connections collect statistics (on the same index as the
11226     * parent connection). The statistics are zeroed when the parent
11227     * connection is initialized.
11228     */
11229
11230    bxe_set_bit(ECORE_Q_FLG_STATS, &flags);
11231    if (zero_stats) {
11232        bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags);
11233    }
11234
11235    /*
11236     * tx only connections can support tx-switching, though their
11237     * CoS-ness doesn't survive the loopback
11238     */
11239    if (sc->flags & BXE_TX_SWITCHING) {
11240        bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags);
11241    }
11242
11243    bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags);
11244
11245    return (flags);
11246}
11247
11248static unsigned long
11249bxe_get_q_flags(struct bxe_softc    *sc,
11250                struct bxe_fastpath *fp,
11251                uint8_t             leading)
11252{
11253    unsigned long flags = 0;
11254
11255    if (IS_MF_SD(sc)) {
11256        bxe_set_bit(ECORE_Q_FLG_OV, &flags);
11257    }
11258
11259    if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11260        bxe_set_bit(ECORE_Q_FLG_TPA, &flags);
11261        bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags);
11262    }
11263
11264    if (leading) {
11265        bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags);
11266        bxe_set_bit(ECORE_Q_FLG_MCAST, &flags);
11267    }
11268
11269    bxe_set_bit(ECORE_Q_FLG_VLAN, &flags);
11270
11271    /* merge with common flags */
11272    return (flags | bxe_get_common_flags(sc, fp, TRUE));
11273}
11274
11275static void
11276bxe_pf_q_prep_general(struct bxe_softc                  *sc,
11277                      struct bxe_fastpath               *fp,
11278                      struct ecore_general_setup_params *gen_init,
11279                      uint8_t                           cos)
11280{
11281    gen_init->stat_id = bxe_stats_id(fp);
11282    gen_init->spcl_id = fp->cl_id;
11283    gen_init->mtu = sc->mtu;
11284    gen_init->cos = cos;
11285}
11286
11287static void
11288bxe_pf_rx_q_prep(struct bxe_softc              *sc,
11289                 struct bxe_fastpath           *fp,
11290                 struct rxq_pause_params       *pause,
11291                 struct ecore_rxq_setup_params *rxq_init)
11292{
11293    uint8_t max_sge = 0;
11294    uint16_t sge_sz = 0;
11295    uint16_t tpa_agg_size = 0;
11296
11297    pause->sge_th_lo = SGE_TH_LO(sc);
11298    pause->sge_th_hi = SGE_TH_HI(sc);
11299
11300    /* validate SGE ring has enough to cross high threshold */
11301    if (sc->dropless_fc &&
11302            (pause->sge_th_hi + FW_PREFETCH_CNT) >
11303            (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) {
11304        BLOGW(sc, "sge ring threshold limit\n");
11305    }
11306
11307    /* minimum max_aggregation_size is 2*MTU (two full buffers) */
11308    tpa_agg_size = (2 * sc->mtu);
11309    if (tpa_agg_size < sc->max_aggregation_size) {
11310        tpa_agg_size = sc->max_aggregation_size;
11311    }
11312
11313    max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11314    max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11315                   (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11316    sge_sz = (uint16_t)min(SGE_PAGES, 0xffff);
11317
11318    /* pause - not for e1 */
11319    if (!CHIP_IS_E1(sc)) {
11320        pause->bd_th_lo = BD_TH_LO(sc);
11321        pause->bd_th_hi = BD_TH_HI(sc);
11322
11323        pause->rcq_th_lo = RCQ_TH_LO(sc);
11324        pause->rcq_th_hi = RCQ_TH_HI(sc);
11325
11326        /* validate rings have enough entries to cross high thresholds */
11327        if (sc->dropless_fc &&
11328            pause->bd_th_hi + FW_PREFETCH_CNT >
11329            sc->rx_ring_size) {
11330            BLOGW(sc, "rx bd ring threshold limit\n");
11331        }
11332
11333        if (sc->dropless_fc &&
11334            pause->rcq_th_hi + FW_PREFETCH_CNT >
11335            RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) {
11336            BLOGW(sc, "rcq ring threshold limit\n");
11337        }
11338
11339        pause->pri_map = 1;
11340    }
11341
11342    /* rxq setup */
11343    rxq_init->dscr_map   = fp->rx_dma.paddr;
11344    rxq_init->sge_map    = fp->rx_sge_dma.paddr;
11345    rxq_init->rcq_map    = fp->rcq_dma.paddr;
11346    rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11347
11348    /*
11349     * This should be a maximum number of data bytes that may be
11350     * placed on the BD (not including paddings).
11351     */
11352    rxq_init->buf_sz = (fp->rx_buf_size -
11353                        IP_HEADER_ALIGNMENT_PADDING);
11354
11355    rxq_init->cl_qzone_id     = fp->cl_qzone_id;
11356    rxq_init->tpa_agg_sz      = tpa_agg_size;
11357    rxq_init->sge_buf_sz      = sge_sz;
11358    rxq_init->max_sges_pkt    = max_sge;
11359    rxq_init->rss_engine_id   = SC_FUNC(sc);
11360    rxq_init->mcast_engine_id = SC_FUNC(sc);
11361
11362    /*
11363     * Maximum number or simultaneous TPA aggregation for this Queue.
11364     * For PF Clients it should be the maximum available number.
11365     * VF driver(s) may want to define it to a smaller value.
11366     */
11367    rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11368
11369    rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11370    rxq_init->fw_sb_id = fp->fw_sb_id;
11371
11372    rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11373
11374    /*
11375     * configure silent vlan removal
11376     * if multi function mode is afex, then mask default vlan
11377     */
11378    if (IS_MF_AFEX(sc)) {
11379        rxq_init->silent_removal_value =
11380            sc->devinfo.mf_info.afex_def_vlan_tag;
11381        rxq_init->silent_removal_mask = EVL_VLID_MASK;
11382    }
11383}
11384
11385static void
11386bxe_pf_tx_q_prep(struct bxe_softc              *sc,
11387                 struct bxe_fastpath           *fp,
11388                 struct ecore_txq_setup_params *txq_init,
11389                 uint8_t                       cos)
11390{
11391    /*
11392     * XXX If multiple CoS is ever supported then each fastpath structure
11393     * will need to maintain tx producer/consumer/dma/etc values *per* CoS.
11394     * fp->txdata[cos]->tx_dma.paddr;
11395     */
11396    txq_init->dscr_map     = fp->tx_dma.paddr;
11397    txq_init->sb_cq_index  = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11398    txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11399    txq_init->fw_sb_id     = fp->fw_sb_id;
11400
11401    /*
11402     * set the TSS leading client id for TX classfication to the
11403     * leading RSS client id
11404     */
11405    txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11406}
11407
11408/*
11409 * This function performs 2 steps in a queue state machine:
11410 *   1) RESET->INIT
11411 *   2) INIT->SETUP
11412 */
11413static int
11414bxe_setup_queue(struct bxe_softc    *sc,
11415                struct bxe_fastpath *fp,
11416                uint8_t             leading)
11417{
11418    struct ecore_queue_state_params q_params = { NULL };
11419    struct ecore_queue_setup_params *setup_params =
11420                        &q_params.params.setup;
11421    int rc;
11422
11423    BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11424
11425    bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11426
11427    q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj;
11428
11429    /* we want to wait for completion in this context */
11430    bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
11431
11432    /* prepare the INIT parameters */
11433    bxe_pf_q_prep_init(sc, fp, &q_params.params.init);
11434
11435    /* Set the command */
11436    q_params.cmd = ECORE_Q_CMD_INIT;
11437
11438    /* Change the state to INIT */
11439    rc = ecore_queue_state_change(sc, &q_params);
11440    if (rc) {
11441        BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11442        return (rc);
11443    }
11444
11445    BLOGD(sc, DBG_LOAD, "init complete\n");
11446
11447    /* now move the Queue to the SETUP state */
11448    memset(setup_params, 0, sizeof(*setup_params));
11449
11450    /* set Queue flags */
11451    setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11452
11453    /* set general SETUP parameters */
11454    bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11455                          FIRST_TX_COS_INDEX);
11456
11457    bxe_pf_rx_q_prep(sc, fp,
11458                     &setup_params->pause_params,
11459                     &setup_params->rxq_params);
11460
11461    bxe_pf_tx_q_prep(sc, fp,
11462                     &setup_params->txq_params,
11463                     FIRST_TX_COS_INDEX);
11464
11465    /* Set the command */
11466    q_params.cmd = ECORE_Q_CMD_SETUP;
11467
11468    /* change the state to SETUP */
11469    rc = ecore_queue_state_change(sc, &q_params);
11470    if (rc) {
11471        BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11472        return (rc);
11473    }
11474
11475    return (rc);
11476}
11477
11478static int
11479bxe_setup_leading(struct bxe_softc *sc)
11480{
11481    return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11482}
11483
11484static int
11485bxe_config_rss_pf(struct bxe_softc            *sc,
11486                  struct ecore_rss_config_obj *rss_obj,
11487                  uint8_t                     config_hash)
11488{
11489    struct ecore_config_rss_params params = { NULL };
11490    int i;
11491
11492    /*
11493     * Although RSS is meaningless when there is a single HW queue we
11494     * still need it enabled in order to have HW Rx hash generated.
11495     */
11496
11497    params.rss_obj = rss_obj;
11498
11499    bxe_set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
11500
11501    bxe_set_bit(ECORE_RSS_MODE_REGULAR, &params.rss_flags);
11502
11503    /* RSS configuration */
11504    bxe_set_bit(ECORE_RSS_IPV4, &params.rss_flags);
11505    bxe_set_bit(ECORE_RSS_IPV4_TCP, &params.rss_flags);
11506    bxe_set_bit(ECORE_RSS_IPV6, &params.rss_flags);
11507    bxe_set_bit(ECORE_RSS_IPV6_TCP, &params.rss_flags);
11508    if (rss_obj->udp_rss_v4) {
11509        bxe_set_bit(ECORE_RSS_IPV4_UDP, &params.rss_flags);
11510    }
11511    if (rss_obj->udp_rss_v6) {
11512        bxe_set_bit(ECORE_RSS_IPV6_UDP, &params.rss_flags);
11513    }
11514
11515    /* Hash bits */
11516    params.rss_result_mask = MULTI_MASK;
11517
11518    memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11519
11520    if (config_hash) {
11521        /* RSS keys */
11522        for (i = 0; i < sizeof(params.rss_key) / 4; i++) {
11523            params.rss_key[i] = arc4random();
11524        }
11525
11526        bxe_set_bit(ECORE_RSS_SET_SRCH, &params.rss_flags);
11527    }
11528
11529    return (ecore_config_rss(sc, &params));
11530}
11531
11532static int
11533bxe_config_rss_eth(struct bxe_softc *sc,
11534                   uint8_t          config_hash)
11535{
11536    return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11537}
11538
11539static int
11540bxe_init_rss_pf(struct bxe_softc *sc)
11541{
11542    uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc);
11543    int i;
11544
11545    /*
11546     * Prepare the initial contents of the indirection table if
11547     * RSS is enabled
11548     */
11549    for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11550        sc->rss_conf_obj.ind_table[i] =
11551            (sc->fp->cl_id + (i % num_eth_queues));
11552    }
11553
11554    if (sc->udp_rss) {
11555        sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11556    }
11557
11558    /*
11559     * For 57710 and 57711 SEARCHER configuration (rss_keys) is
11560     * per-port, so if explicit configuration is needed, do it only
11561     * for a PMF.
11562     *
11563     * For 57712 and newer it's a per-function configuration.
11564     */
11565    return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11566}
11567
11568static int
11569bxe_set_mac_one(struct bxe_softc          *sc,
11570                uint8_t                   *mac,
11571                struct ecore_vlan_mac_obj *obj,
11572                uint8_t                   set,
11573                int                       mac_type,
11574                unsigned long             *ramrod_flags)
11575{
11576    struct ecore_vlan_mac_ramrod_params ramrod_param;
11577    int rc;
11578
11579    memset(&ramrod_param, 0, sizeof(ramrod_param));
11580
11581    /* fill in general parameters */
11582    ramrod_param.vlan_mac_obj = obj;
11583    ramrod_param.ramrod_flags = *ramrod_flags;
11584
11585    /* fill a user request section if needed */
11586    if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) {
11587        memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
11588
11589        bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
11590
11591        /* Set the command: ADD or DEL */
11592        ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD :
11593                                            ECORE_VLAN_MAC_DEL;
11594    }
11595
11596    rc = ecore_config_vlan_mac(sc, &ramrod_param);
11597
11598    if (rc == ECORE_EXISTS) {
11599        BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
11600        /* do not treat adding same MAC as error */
11601        rc = 0;
11602    } else if (rc < 0) {
11603        BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc);
11604    }
11605
11606    return (rc);
11607}
11608
11609static int
11610bxe_set_eth_mac(struct bxe_softc *sc,
11611                uint8_t          set)
11612{
11613    unsigned long ramrod_flags = 0;
11614
11615    BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n");
11616
11617    bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
11618
11619    /* Eth MAC is set on RSS leading client (fp[0]) */
11620    return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11621                            &sc->sp_objs->mac_obj,
11622                            set, ECORE_ETH_MAC, &ramrod_flags));
11623}
11624
11625static int
11626bxe_get_cur_phy_idx(struct bxe_softc *sc)
11627{
11628    uint32_t sel_phy_idx = 0;
11629
11630    if (sc->link_params.num_phys <= 1) {
11631        return (ELINK_INT_PHY);
11632    }
11633
11634    if (sc->link_vars.link_up) {
11635        sel_phy_idx = ELINK_EXT_PHY1;
11636        /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */
11637        if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11638            (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11639             ELINK_SUPPORTED_FIBRE))
11640            sel_phy_idx = ELINK_EXT_PHY2;
11641    } else {
11642        switch (elink_phy_selection(&sc->link_params)) {
11643        case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
11644        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
11645        case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
11646               sel_phy_idx = ELINK_EXT_PHY1;
11647               break;
11648        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
11649        case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
11650               sel_phy_idx = ELINK_EXT_PHY2;
11651               break;
11652        }
11653    }
11654
11655    return (sel_phy_idx);
11656}
11657
11658static int
11659bxe_get_link_cfg_idx(struct bxe_softc *sc)
11660{
11661    uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc);
11662
11663    /*
11664     * The selected activated PHY is always after swapping (in case PHY
11665     * swapping is enabled). So when swapping is enabled, we need to reverse
11666     * the configuration
11667     */
11668
11669    if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11670        if (sel_phy_idx == ELINK_EXT_PHY1)
11671            sel_phy_idx = ELINK_EXT_PHY2;
11672        else if (sel_phy_idx == ELINK_EXT_PHY2)
11673            sel_phy_idx = ELINK_EXT_PHY1;
11674    }
11675
11676    return (ELINK_LINK_CONFIG_IDX(sel_phy_idx));
11677}
11678
11679static void
11680bxe_set_requested_fc(struct bxe_softc *sc)
11681{
11682    /*
11683     * Initialize link parameters structure variables
11684     * It is recommended to turn off RX FC for jumbo frames
11685     * for better performance
11686     */
11687    if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11688        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11689    } else {
11690        sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11691    }
11692}
11693
11694static void
11695bxe_calc_fc_adv(struct bxe_softc *sc)
11696{
11697    uint8_t cfg_idx = bxe_get_link_cfg_idx(sc);
11698
11699
11700    sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11701                                           ADVERTISED_Pause);
11702
11703    switch (sc->link_vars.ieee_fc &
11704            MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
11705
11706    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
11707        sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11708                                          ADVERTISED_Pause);
11709        break;
11710
11711    case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
11712        sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11713        break;
11714
11715    default:
11716        break;
11717
11718    }
11719}
11720
11721static uint16_t
11722bxe_get_mf_speed(struct bxe_softc *sc)
11723{
11724    uint16_t line_speed = sc->link_vars.line_speed;
11725    if (IS_MF(sc)) {
11726        uint16_t maxCfg =
11727            bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11728
11729        /* calculate the current MAX line speed limit for the MF devices */
11730        if (IS_MF_SI(sc)) {
11731            line_speed = (line_speed * maxCfg) / 100;
11732        } else { /* SD mode */
11733            uint16_t vn_max_rate = maxCfg * 100;
11734
11735            if (vn_max_rate < line_speed) {
11736                line_speed = vn_max_rate;
11737            }
11738        }
11739    }
11740
11741    return (line_speed);
11742}
11743
11744static void
11745bxe_fill_report_data(struct bxe_softc            *sc,
11746                     struct bxe_link_report_data *data)
11747{
11748    uint16_t line_speed = bxe_get_mf_speed(sc);
11749
11750    memset(data, 0, sizeof(*data));
11751
11752    /* fill the report data with the effective line speed */
11753    data->line_speed = line_speed;
11754
11755    /* Link is down */
11756    if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11757        bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11758    }
11759
11760    /* Full DUPLEX */
11761    if (sc->link_vars.duplex == DUPLEX_FULL) {
11762        bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11763    }
11764
11765    /* Rx Flow Control is ON */
11766    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11767        bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11768    }
11769
11770    /* Tx Flow Control is ON */
11771    if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11772        bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11773    }
11774}
11775
11776/* report link status to OS, should be called under phy_lock */
11777static void
11778bxe_link_report_locked(struct bxe_softc *sc)
11779{
11780    struct bxe_link_report_data cur_data;
11781
11782    /* reread mf_cfg */
11783    if (IS_PF(sc) && !CHIP_IS_E1(sc)) {
11784        bxe_read_mf_cfg(sc);
11785    }
11786
11787    /* Read the current link report info */
11788    bxe_fill_report_data(sc, &cur_data);
11789
11790    /* Don't report link down or exactly the same link status twice */
11791    if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11792        (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11793                      &sc->last_reported_link.link_report_flags) &&
11794         bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11795                      &cur_data.link_report_flags))) {
11796        return;
11797    }
11798
11799	ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n",
11800					cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11801    sc->link_cnt++;
11802
11803	ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11804    /* report new link params and remember the state for the next time */
11805    memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11806
11807    if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN,
11808                     &cur_data.link_report_flags)) {
11809        if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11810    } else {
11811        const char *duplex;
11812        const char *flow;
11813
11814        if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX,
11815                                   &cur_data.link_report_flags)) {
11816            duplex = "full";
11817			ELINK_DEBUG_P0(sc, "link set to full duplex\n");
11818        } else {
11819            duplex = "half";
11820			ELINK_DEBUG_P0(sc, "link set to half duplex\n");
11821        }
11822
11823        /*
11824         * Handle the FC at the end so that only these flags would be
11825         * possibly set. This way we may easily check if there is no FC
11826         * enabled.
11827         */
11828        if (cur_data.link_report_flags) {
11829            if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11830                             &cur_data.link_report_flags) &&
11831                bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11832                             &cur_data.link_report_flags)) {
11833                flow = "ON - receive & transmit";
11834            } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11835                                    &cur_data.link_report_flags) &&
11836                       !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11837                                     &cur_data.link_report_flags)) {
11838                flow = "ON - receive";
11839            } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON,
11840                                     &cur_data.link_report_flags) &&
11841                       bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON,
11842                                    &cur_data.link_report_flags)) {
11843                flow = "ON - transmit";
11844            } else {
11845                flow = "none"; /* possible? */
11846            }
11847        } else {
11848            flow = "none";
11849        }
11850
11851        if_link_state_change(sc->ifp, LINK_STATE_UP);
11852        BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
11853              cur_data.line_speed, duplex, flow);
11854    }
11855}
11856
11857static void
11858bxe_link_report(struct bxe_softc *sc)
11859{
11860    bxe_acquire_phy_lock(sc);
11861    bxe_link_report_locked(sc);
11862    bxe_release_phy_lock(sc);
11863}
11864
11865static void
11866bxe_link_status_update(struct bxe_softc *sc)
11867{
11868    if (sc->state != BXE_STATE_OPEN) {
11869        return;
11870    }
11871
11872    if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) {
11873        elink_link_status_update(&sc->link_params, &sc->link_vars);
11874    } else {
11875        sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11876                                  ELINK_SUPPORTED_10baseT_Full |
11877                                  ELINK_SUPPORTED_100baseT_Half |
11878                                  ELINK_SUPPORTED_100baseT_Full |
11879                                  ELINK_SUPPORTED_1000baseT_Full |
11880                                  ELINK_SUPPORTED_2500baseX_Full |
11881                                  ELINK_SUPPORTED_10000baseT_Full |
11882                                  ELINK_SUPPORTED_TP |
11883                                  ELINK_SUPPORTED_FIBRE |
11884                                  ELINK_SUPPORTED_Autoneg |
11885                                  ELINK_SUPPORTED_Pause |
11886                                  ELINK_SUPPORTED_Asym_Pause);
11887        sc->port.advertising[0] = sc->port.supported[0];
11888
11889        sc->link_params.sc                = sc;
11890        sc->link_params.port              = SC_PORT(sc);
11891        sc->link_params.req_duplex[0]     = DUPLEX_FULL;
11892        sc->link_params.req_flow_ctrl[0]  = ELINK_FLOW_CTRL_NONE;
11893        sc->link_params.req_line_speed[0] = SPEED_10000;
11894        sc->link_params.speed_cap_mask[0] = 0x7f0000;
11895        sc->link_params.switch_cfg        = ELINK_SWITCH_CFG_10G;
11896
11897        if (CHIP_REV_IS_FPGA(sc)) {
11898            sc->link_vars.mac_type    = ELINK_MAC_TYPE_EMAC;
11899            sc->link_vars.line_speed  = ELINK_SPEED_1000;
11900            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11901                                         LINK_STATUS_SPEED_AND_DUPLEX_1000TFD);
11902        } else {
11903            sc->link_vars.mac_type    = ELINK_MAC_TYPE_BMAC;
11904            sc->link_vars.line_speed  = ELINK_SPEED_10000;
11905            sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11906                                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
11907        }
11908
11909        sc->link_vars.link_up = 1;
11910
11911        sc->link_vars.duplex    = DUPLEX_FULL;
11912        sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11913
11914        if (IS_PF(sc)) {
11915            REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11916            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11917            bxe_link_report(sc);
11918        }
11919    }
11920
11921    if (IS_PF(sc)) {
11922        if (sc->link_vars.link_up) {
11923            bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11924        } else {
11925            bxe_stats_handle(sc, STATS_EVENT_STOP);
11926        }
11927        bxe_link_report(sc);
11928    } else {
11929        bxe_link_report(sc);
11930        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11931    }
11932}
11933
11934static int
11935bxe_initial_phy_init(struct bxe_softc *sc,
11936                     int              load_mode)
11937{
11938    int rc, cfg_idx = bxe_get_link_cfg_idx(sc);
11939    uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11940    struct elink_params *lp = &sc->link_params;
11941
11942    bxe_set_requested_fc(sc);
11943
11944    if (CHIP_REV_IS_SLOW(sc)) {
11945        uint32_t bond = CHIP_BOND_ID(sc);
11946        uint32_t feat = 0;
11947
11948        if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
11949            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11950        } else if (bond & 0x4) {
11951            if (CHIP_IS_E3(sc)) {
11952                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
11953            } else {
11954                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
11955            }
11956        } else if (bond & 0x8) {
11957            if (CHIP_IS_E3(sc)) {
11958                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
11959            } else {
11960                feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11961            }
11962        }
11963
11964        /* disable EMAC for E3 and above */
11965        if (bond & 0x2) {
11966            feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
11967        }
11968
11969        sc->link_params.feature_config_flags |= feat;
11970    }
11971
11972    bxe_acquire_phy_lock(sc);
11973
11974    if (load_mode == LOAD_DIAG) {
11975        lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11976        /* Prefer doing PHY loopback at 10G speed, if possible */
11977        if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11978            if (lp->speed_cap_mask[cfg_idx] &
11979                PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
11980                lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11981            } else {
11982                lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11983            }
11984        }
11985    }
11986
11987    if (load_mode == LOAD_LOOPBACK_EXT) {
11988        lp->loopback_mode = ELINK_LOOPBACK_EXT;
11989    }
11990
11991    rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11992
11993    bxe_release_phy_lock(sc);
11994
11995    bxe_calc_fc_adv(sc);
11996
11997    if (sc->link_vars.link_up) {
11998        bxe_stats_handle(sc, STATS_EVENT_LINK_UP);
11999        bxe_link_report(sc);
12000    }
12001
12002    if (!CHIP_REV_IS_SLOW(sc)) {
12003        bxe_periodic_start(sc);
12004    }
12005
12006    sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12007    return (rc);
12008}
12009
12010static u_int
12011bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12012{
12013    struct ecore_mcast_list_elem *mc_mac = arg;
12014
12015    mc_mac += cnt;
12016    mc_mac->mac = (uint8_t *)LLADDR(sdl);
12017
12018    return (1);
12019}
12020
12021static int
12022bxe_init_mcast_macs_list(struct bxe_softc                 *sc,
12023                         struct ecore_mcast_ramrod_params *p)
12024{
12025    if_t ifp = sc->ifp;
12026    int mc_count;
12027    struct ecore_mcast_list_elem *mc_mac;
12028
12029    ECORE_LIST_INIT(&p->mcast_list);
12030    p->mcast_list_len = 0;
12031
12032    /* XXXGL: multicast count may change later */
12033    mc_count = if_llmaddr_count(ifp);
12034
12035    if (!mc_count) {
12036        return (0);
12037    }
12038
12039    mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF,
12040                    (M_NOWAIT | M_ZERO));
12041    if (!mc_mac) {
12042        BLOGE(sc, "Failed to allocate temp mcast list\n");
12043        return (-1);
12044    }
12045    bzero(mc_mac, (sizeof(*mc_mac) * mc_count));
12046    if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac);
12047
12048    for (int i = 0; i < mc_count; i ++) {
12049        ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
12050        BLOGD(sc, DBG_LOAD,
12051              "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n",
12052              mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2],
12053              mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5],
12054              mc_count);
12055    }
12056
12057    p->mcast_list_len = mc_count;
12058
12059    return (0);
12060}
12061
12062static void
12063bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p)
12064{
12065    struct ecore_mcast_list_elem *mc_mac =
12066        ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12067                               struct ecore_mcast_list_elem,
12068                               link);
12069
12070    if (mc_mac) {
12071        /* only a single free as all mc_macs are in the same heap array */
12072        free(mc_mac, M_DEVBUF);
12073    }
12074}
12075static int
12076bxe_set_mc_list(struct bxe_softc *sc)
12077{
12078    struct ecore_mcast_ramrod_params rparam = { NULL };
12079    int rc = 0;
12080
12081    rparam.mcast_obj = &sc->mcast_obj;
12082
12083    BXE_MCAST_LOCK(sc);
12084
12085    /* first, clear all configured multicast MACs */
12086    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
12087    if (rc < 0) {
12088        BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc);
12089        /* Manual backport parts of FreeBSD upstream r284470. */
12090        BXE_MCAST_UNLOCK(sc);
12091        return (rc);
12092    }
12093
12094    /* configure a new MACs list */
12095    rc = bxe_init_mcast_macs_list(sc, &rparam);
12096    if (rc) {
12097        BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc);
12098        BXE_MCAST_UNLOCK(sc);
12099        return (rc);
12100    }
12101
12102    /* Now add the new MACs */
12103    rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD);
12104    if (rc < 0) {
12105        BLOGE(sc, "Failed to set new mcast config (%d)\n", rc);
12106    }
12107
12108    bxe_free_mcast_macs_list(&rparam);
12109
12110    BXE_MCAST_UNLOCK(sc);
12111
12112    return (rc);
12113}
12114
12115struct bxe_set_addr_ctx {
12116   struct bxe_softc *sc;
12117   unsigned long ramrod_flags;
12118   int rc;
12119};
12120
12121static u_int
12122bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
12123{
12124    struct bxe_set_addr_ctx *ctx = arg;
12125    struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
12126    int rc;
12127
12128    if (ctx->rc < 0)
12129	return (0);
12130
12131    rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
12132                         ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
12133
12134    /* do not treat adding same MAC as an error */
12135    if (rc == -EEXIST)
12136	BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12137    else if (rc < 0) {
12138            BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
12139            ctx->rc = rc;
12140    }
12141
12142    return (1);
12143}
12144
12145static int
12146bxe_set_uc_list(struct bxe_softc *sc)
12147{
12148    if_t ifp = sc->ifp;
12149    struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12150    struct bxe_set_addr_ctx ctx = { sc, 0, 0 };
12151    int rc;
12152
12153    /* first schedule a cleanup up of old configuration */
12154    rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE);
12155    if (rc < 0) {
12156        BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc);
12157        return (rc);
12158    }
12159
12160    if_foreach_lladdr(ifp, bxe_set_addr, &ctx);
12161    if (ctx.rc < 0)
12162	return (ctx.rc);
12163
12164    /* Execute the pending commands */
12165    bit_set(&ctx.ramrod_flags, RAMROD_CONT);
12166    return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */,
12167                            ECORE_UC_LIST_MAC, &ctx.ramrod_flags));
12168}
12169
12170static void
12171bxe_set_rx_mode(struct bxe_softc *sc)
12172{
12173    if_t ifp = sc->ifp;
12174    uint32_t rx_mode = BXE_RX_MODE_NORMAL;
12175
12176    if (sc->state != BXE_STATE_OPEN) {
12177        BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12178        return;
12179    }
12180
12181    BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12182
12183    if (if_getflags(ifp) & IFF_PROMISC) {
12184        rx_mode = BXE_RX_MODE_PROMISC;
12185    } else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
12186               ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
12187                CHIP_IS_E1(sc))) {
12188        rx_mode = BXE_RX_MODE_ALLMULTI;
12189    } else {
12190        if (IS_PF(sc)) {
12191            /* some multicasts */
12192            if (bxe_set_mc_list(sc) < 0) {
12193                rx_mode = BXE_RX_MODE_ALLMULTI;
12194            }
12195            if (bxe_set_uc_list(sc) < 0) {
12196                rx_mode = BXE_RX_MODE_PROMISC;
12197            }
12198        }
12199    }
12200
12201    sc->rx_mode = rx_mode;
12202
12203    /* schedule the rx_mode command */
12204    if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12205        BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n");
12206        bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12207        return;
12208    }
12209
12210    if (IS_PF(sc)) {
12211        bxe_set_storm_rx_mode(sc);
12212    }
12213}
12214
12215
12216/* update flags in shmem */
12217static void
12218bxe_update_drv_flags(struct bxe_softc *sc,
12219                     uint32_t         flags,
12220                     uint32_t         set)
12221{
12222    uint32_t drv_flags;
12223
12224    if (SHMEM2_HAS(sc, drv_flags)) {
12225        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12226        drv_flags = SHMEM2_RD(sc, drv_flags);
12227
12228        if (set) {
12229            SET_FLAGS(drv_flags, flags);
12230        } else {
12231            RESET_FLAGS(drv_flags, flags);
12232        }
12233
12234        SHMEM2_WR(sc, drv_flags, drv_flags);
12235        BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags);
12236
12237        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS);
12238    }
12239}
12240
12241/* periodic timer callout routine, only runs when the interface is up */
12242
12243static void
12244bxe_periodic_callout_func(void *xsc)
12245{
12246    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12247    int i;
12248
12249    if (!BXE_CORE_TRYLOCK(sc)) {
12250        /* just bail and try again next time */
12251
12252        if ((sc->state == BXE_STATE_OPEN) &&
12253            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12254            /* schedule the next periodic callout */
12255            callout_reset(&sc->periodic_callout, hz,
12256                          bxe_periodic_callout_func, sc);
12257        }
12258
12259        return;
12260    }
12261
12262    if ((sc->state != BXE_STATE_OPEN) ||
12263        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12264        BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12265        BXE_CORE_UNLOCK(sc);
12266        return;
12267        }
12268
12269
12270    /* Check for TX timeouts on any fastpath. */
12271    FOR_EACH_QUEUE(sc, i) {
12272        if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12273            /* Ruh-Roh, chip was reset! */
12274            break;
12275        }
12276    }
12277
12278    if (!CHIP_REV_IS_SLOW(sc)) {
12279        /*
12280         * This barrier is needed to ensure the ordering between the writing
12281         * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12282         * the reading here.
12283         */
12284        mb();
12285        if (sc->port.pmf) {
12286	    bxe_acquire_phy_lock(sc);
12287            elink_period_func(&sc->link_params, &sc->link_vars);
12288	    bxe_release_phy_lock(sc);
12289        }
12290    }
12291
12292    if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12293        int mb_idx = SC_FW_MB_IDX(sc);
12294        uint32_t drv_pulse;
12295        uint32_t mcp_pulse;
12296
12297        ++sc->fw_drv_pulse_wr_seq;
12298        sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12299
12300        drv_pulse = sc->fw_drv_pulse_wr_seq;
12301        bxe_drv_pulse(sc);
12302
12303        mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) &
12304                     MCP_PULSE_SEQ_MASK);
12305
12306        /*
12307         * The delta between driver pulse and mcp response should
12308         * be 1 (before mcp response) or 0 (after mcp response).
12309         */
12310        if ((drv_pulse != mcp_pulse) &&
12311            (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
12312            /* someone lost a heartbeat... */
12313            BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
12314                  drv_pulse, mcp_pulse);
12315        }
12316    }
12317
12318    /* state is BXE_STATE_OPEN */
12319    bxe_stats_handle(sc, STATS_EVENT_UPDATE);
12320
12321    BXE_CORE_UNLOCK(sc);
12322
12323    if ((sc->state == BXE_STATE_OPEN) &&
12324        (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12325        /* schedule the next periodic callout */
12326        callout_reset(&sc->periodic_callout, hz,
12327                      bxe_periodic_callout_func, sc);
12328    }
12329}
12330
12331static void
12332bxe_periodic_start(struct bxe_softc *sc)
12333{
12334    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12335    callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12336}
12337
12338static void
12339bxe_periodic_stop(struct bxe_softc *sc)
12340{
12341    atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12342    callout_drain(&sc->periodic_callout);
12343}
12344
12345void
12346bxe_parity_recover(struct bxe_softc *sc)
12347{
12348    uint8_t global = FALSE;
12349    uint32_t error_recovered, error_unrecovered;
12350
12351
12352    if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12353        (sc->state == BXE_STATE_ERROR)) {
12354        BLOGE(sc, "RECOVERY failed, "
12355            "stack notified driver is NOT running! "
12356            "Please reboot/power cycle the system.\n");
12357        return;
12358    }
12359
12360    while (1) {
12361        BLOGD(sc, DBG_SP,
12362           "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n",
12363            __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12364
12365        switch(sc->recovery_state) {
12366
12367        case BXE_RECOVERY_INIT:
12368            bxe_chk_parity_attn(sc, &global, FALSE);
12369
12370            if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ||
12371                (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12372                (sc->error_status & BXE_ERR_GLOBAL)) {
12373
12374                BXE_CORE_LOCK(sc);
12375                if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12376                    bxe_periodic_stop(sc);
12377                }
12378                bxe_nic_unload(sc, UNLOAD_RECOVERY, false);
12379                sc->state = BXE_STATE_ERROR;
12380                sc->recovery_state = BXE_RECOVERY_FAILED;
12381                BLOGE(sc, " No Recovery tried for error 0x%x"
12382                    " stack notified driver is NOT running!"
12383                    " Please reboot/power cycle the system.\n",
12384                    sc->error_status);
12385                BXE_CORE_UNLOCK(sc);
12386                return;
12387            }
12388
12389
12390           /* Try to get a LEADER_LOCK HW lock */
12391            if (bxe_trylock_leader_lock(sc)) {
12392
12393                bxe_set_reset_in_progress(sc);
12394                /*
12395                 * Check if there is a global attention and if
12396                 * there was a global attention, set the global
12397                 * reset bit.
12398                 */
12399                if (global) {
12400                    bxe_set_reset_global(sc);
12401                }
12402                sc->is_leader = 1;
12403            }
12404
12405            /* If interface has been removed - break */
12406
12407            if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12408                bxe_periodic_stop(sc);
12409            }
12410
12411            BXE_CORE_LOCK(sc);
12412            bxe_nic_unload(sc,UNLOAD_RECOVERY, false);
12413            sc->recovery_state = BXE_RECOVERY_WAIT;
12414            BXE_CORE_UNLOCK(sc);
12415
12416            /*
12417             * Ensure "is_leader", MCP command sequence and
12418             * "recovery_state" update values are seen on other
12419             * CPUs.
12420             */
12421            mb();
12422            break;
12423        case BXE_RECOVERY_WAIT:
12424
12425            if (sc->is_leader) {
12426                int other_engine = SC_PATH(sc) ? 0 : 1;
12427                bool other_load_status =
12428                    bxe_get_load_status(sc, other_engine);
12429                bool load_status =
12430                    bxe_get_load_status(sc, SC_PATH(sc));
12431                global = bxe_reset_is_global(sc);
12432
12433                /*
12434                 * In case of a parity in a global block, let
12435                 * the first leader that performs a
12436                 * leader_reset() reset the global blocks in
12437                 * order to clear global attentions. Otherwise
12438                 * the gates will remain closed for that
12439                 * engine.
12440                 */
12441                if (load_status ||
12442                    (global && other_load_status)) {
12443                    /*
12444                     * Wait until all other functions get
12445                     * down.
12446                     */
12447                    taskqueue_enqueue_timeout(taskqueue_thread,
12448                        &sc->sp_err_timeout_task, hz/10);
12449                    return;
12450                } else {
12451                    /*
12452                     * If all other functions got down
12453                     * try to bring the chip back to
12454                     * normal. In any case it's an exit
12455                     * point for a leader.
12456                     */
12457                    if (bxe_leader_reset(sc)) {
12458                        BLOGE(sc, "RECOVERY failed, "
12459                            "stack notified driver is NOT running!\n");
12460                        sc->recovery_state = BXE_RECOVERY_FAILED;
12461                        sc->state = BXE_STATE_ERROR;
12462                        mb();
12463                        return;
12464                    }
12465
12466                    /*
12467                     * If we are here, means that the
12468                     * leader has succeeded and doesn't
12469                     * want to be a leader any more. Try
12470                     * to continue as a none-leader.
12471                     */
12472                break;
12473                }
12474
12475            } else { /* non-leader */
12476                if (!bxe_reset_is_done(sc, SC_PATH(sc))) {
12477                    /*
12478                     * Try to get a LEADER_LOCK HW lock as
12479                     * long as a former leader may have
12480                     * been unloaded by the user or
12481                     * released a leadership by another
12482                     * reason.
12483                     */
12484                    if (bxe_trylock_leader_lock(sc)) {
12485                        /*
12486                         * I'm a leader now! Restart a
12487                         * switch case.
12488                         */
12489                        sc->is_leader = 1;
12490                        break;
12491                    }
12492
12493                    taskqueue_enqueue_timeout(taskqueue_thread,
12494                        &sc->sp_err_timeout_task, hz/10);
12495                    return;
12496
12497                } else {
12498                    /*
12499                     * If there was a global attention, wait
12500                     * for it to be cleared.
12501                     */
12502                    if (bxe_reset_is_global(sc)) {
12503                        taskqueue_enqueue_timeout(taskqueue_thread,
12504                            &sc->sp_err_timeout_task, hz/10);
12505                        return;
12506                     }
12507
12508                     error_recovered =
12509                         sc->eth_stats.recoverable_error;
12510                     error_unrecovered =
12511                         sc->eth_stats.unrecoverable_error;
12512                     BXE_CORE_LOCK(sc);
12513                     sc->recovery_state =
12514                         BXE_RECOVERY_NIC_LOADING;
12515                     if (bxe_nic_load(sc, LOAD_NORMAL)) {
12516                         error_unrecovered++;
12517                         sc->recovery_state = BXE_RECOVERY_FAILED;
12518                         sc->state = BXE_STATE_ERROR;
12519                         BLOGE(sc, "Recovery is NOT successful, "
12520                            " state=0x%x recovery_state=0x%x error=%x\n",
12521                            sc->state, sc->recovery_state, sc->error_status);
12522                         sc->error_status = 0;
12523                     } else {
12524                         sc->recovery_state =
12525                             BXE_RECOVERY_DONE;
12526                         error_recovered++;
12527                         BLOGI(sc, "Recovery is successful from errors %x,"
12528                            " state=0x%x"
12529                            " recovery_state=0x%x \n", sc->error_status,
12530                            sc->state, sc->recovery_state);
12531                         mb();
12532                     }
12533                     sc->error_status = 0;
12534                     BXE_CORE_UNLOCK(sc);
12535                     sc->eth_stats.recoverable_error =
12536                         error_recovered;
12537                     sc->eth_stats.unrecoverable_error =
12538                         error_unrecovered;
12539
12540                     return;
12541                 }
12542             }
12543         default:
12544             return;
12545         }
12546    }
12547}
12548void
12549bxe_handle_error(struct bxe_softc * sc)
12550{
12551
12552    if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12553        return;
12554    }
12555    if(sc->error_status) {
12556        if (sc->state == BXE_STATE_OPEN)  {
12557            bxe_int_disable(sc);
12558        }
12559        if (sc->link_vars.link_up) {
12560            if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12561        }
12562        sc->recovery_state = BXE_RECOVERY_INIT;
12563        BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n",
12564            sc->unit, sc->error_status, sc->recovery_state);
12565        bxe_parity_recover(sc);
12566   }
12567}
12568
12569static void
12570bxe_sp_err_timeout_task(void *arg, int pending)
12571{
12572
12573    struct bxe_softc *sc = (struct bxe_softc *)arg;
12574
12575    BLOGD(sc, DBG_SP,
12576        "%s state = 0x%x rec state=0x%x error_status=%x\n",
12577        __func__, sc->state, sc->recovery_state, sc->error_status);
12578
12579    if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12580       (sc->state == BXE_STATE_ERROR)) {
12581        return;
12582    }
12583    /* if can be taken */
12584    if ((sc->error_status) && (sc->trigger_grcdump)) {
12585        bxe_grc_dump(sc);
12586    }
12587    if (sc->recovery_state != BXE_RECOVERY_DONE) {
12588        bxe_handle_error(sc);
12589        bxe_parity_recover(sc);
12590    } else if (sc->error_status) {
12591        bxe_handle_error(sc);
12592    }
12593
12594    return;
12595}
12596
12597/* start the controller */
12598static __noinline int
12599bxe_nic_load(struct bxe_softc *sc,
12600             int              load_mode)
12601{
12602    uint32_t val;
12603    int load_code = 0;
12604    int i, rc = 0;
12605
12606    BXE_CORE_LOCK_ASSERT(sc);
12607
12608    BLOGD(sc, DBG_LOAD, "Starting NIC load...\n");
12609
12610    sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12611
12612    if (IS_PF(sc)) {
12613        /* must be called before memory allocation and HW init */
12614        bxe_ilt_set_info(sc);
12615    }
12616
12617    sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12618
12619    bxe_set_fp_rx_buf_size(sc);
12620
12621    if (bxe_alloc_fp_buffers(sc) != 0) {
12622        BLOGE(sc, "Failed to allocate fastpath memory\n");
12623        sc->state = BXE_STATE_CLOSED;
12624        rc = ENOMEM;
12625        goto bxe_nic_load_error0;
12626    }
12627
12628    if (bxe_alloc_mem(sc) != 0) {
12629        sc->state = BXE_STATE_CLOSED;
12630        rc = ENOMEM;
12631        goto bxe_nic_load_error0;
12632    }
12633
12634    if (bxe_alloc_fw_stats_mem(sc) != 0) {
12635        sc->state = BXE_STATE_CLOSED;
12636        rc = ENOMEM;
12637        goto bxe_nic_load_error0;
12638    }
12639
12640    if (IS_PF(sc)) {
12641        /* set pf load just before approaching the MCP */
12642        bxe_set_pf_load(sc);
12643
12644        /* if MCP exists send load request and analyze response */
12645        if (!BXE_NOMCP(sc)) {
12646            /* attempt to load pf */
12647            if (bxe_nic_load_request(sc, &load_code) != 0) {
12648                sc->state = BXE_STATE_CLOSED;
12649                rc = ENXIO;
12650                goto bxe_nic_load_error1;
12651            }
12652
12653            /* what did the MCP say? */
12654            if (bxe_nic_load_analyze_req(sc, load_code) != 0) {
12655                bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12656                sc->state = BXE_STATE_CLOSED;
12657                rc = ENXIO;
12658                goto bxe_nic_load_error2;
12659            }
12660        } else {
12661            BLOGI(sc, "Device has no MCP!\n");
12662            load_code = bxe_nic_load_no_mcp(sc);
12663        }
12664
12665        /* mark PMF if applicable */
12666        bxe_nic_load_pmf(sc, load_code);
12667
12668        /* Init Function state controlling object */
12669        bxe_init_func_obj(sc);
12670
12671        /* Initialize HW */
12672        if (bxe_init_hw(sc, load_code) != 0) {
12673            BLOGE(sc, "HW init failed\n");
12674            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12675            sc->state = BXE_STATE_CLOSED;
12676            rc = ENXIO;
12677            goto bxe_nic_load_error2;
12678        }
12679    }
12680
12681    /* set ALWAYS_ALIVE bit in shmem */
12682    sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12683    bxe_drv_pulse(sc);
12684    sc->flags |= BXE_NO_PULSE;
12685
12686    /* attach interrupts */
12687    if (bxe_interrupt_attach(sc) != 0) {
12688        sc->state = BXE_STATE_CLOSED;
12689        rc = ENXIO;
12690        goto bxe_nic_load_error2;
12691    }
12692
12693    bxe_nic_init(sc, load_code);
12694
12695    /* Init per-function objects */
12696    if (IS_PF(sc)) {
12697        bxe_init_objs(sc);
12698        // XXX bxe_iov_nic_init(sc);
12699
12700        /* set AFEX default VLAN tag to an invalid value */
12701        sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12702        // XXX bxe_nic_load_afex_dcc(sc, load_code);
12703
12704        sc->state = BXE_STATE_OPENING_WAITING_PORT;
12705        rc = bxe_func_start(sc);
12706        if (rc) {
12707            BLOGE(sc, "Function start failed! rc = %d\n", rc);
12708            bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12709            sc->state = BXE_STATE_ERROR;
12710            goto bxe_nic_load_error3;
12711        }
12712
12713        /* send LOAD_DONE command to MCP */
12714        if (!BXE_NOMCP(sc)) {
12715            load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
12716            if (!load_code) {
12717                BLOGE(sc, "MCP response failure, aborting\n");
12718                sc->state = BXE_STATE_ERROR;
12719                rc = ENXIO;
12720                goto bxe_nic_load_error3;
12721            }
12722        }
12723
12724        rc = bxe_setup_leading(sc);
12725        if (rc) {
12726            BLOGE(sc, "Setup leading failed! rc = %d\n", rc);
12727            sc->state = BXE_STATE_ERROR;
12728            goto bxe_nic_load_error3;
12729        }
12730
12731        FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) {
12732            rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12733            if (rc) {
12734                BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc);
12735                sc->state = BXE_STATE_ERROR;
12736                goto bxe_nic_load_error3;
12737            }
12738        }
12739
12740        rc = bxe_init_rss_pf(sc);
12741        if (rc) {
12742            BLOGE(sc, "PF RSS init failed\n");
12743            sc->state = BXE_STATE_ERROR;
12744            goto bxe_nic_load_error3;
12745        }
12746    }
12747    /* XXX VF */
12748
12749    /* now when Clients are configured we are ready to work */
12750    sc->state = BXE_STATE_OPEN;
12751
12752    /* Configure a ucast MAC */
12753    if (IS_PF(sc)) {
12754        rc = bxe_set_eth_mac(sc, TRUE);
12755    }
12756    if (rc) {
12757        BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc);
12758        sc->state = BXE_STATE_ERROR;
12759        goto bxe_nic_load_error3;
12760    }
12761
12762    if (sc->port.pmf) {
12763        rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN);
12764        if (rc) {
12765            sc->state = BXE_STATE_ERROR;
12766            goto bxe_nic_load_error3;
12767        }
12768    }
12769
12770    sc->link_params.feature_config_flags &=
12771        ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN;
12772
12773    /* start fast path */
12774
12775    /* Initialize Rx filter */
12776    bxe_set_rx_mode(sc);
12777
12778    /* start the Tx */
12779    switch (/* XXX load_mode */LOAD_OPEN) {
12780    case LOAD_NORMAL:
12781    case LOAD_OPEN:
12782        break;
12783
12784    case LOAD_DIAG:
12785    case LOAD_LOOPBACK_EXT:
12786        sc->state = BXE_STATE_DIAG;
12787        break;
12788
12789    default:
12790        break;
12791    }
12792
12793    if (sc->port.pmf) {
12794        bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0);
12795    } else {
12796        bxe_link_status_update(sc);
12797    }
12798
12799    /* start the periodic timer callout */
12800    bxe_periodic_start(sc);
12801
12802    if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
12803        /* mark driver is loaded in shmem2 */
12804        val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
12805        SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)],
12806                  (val |
12807                   DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
12808                   DRV_FLAGS_CAPABILITIES_LOADED_L2));
12809    }
12810
12811    /* wait for all pending SP commands to complete */
12812    if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) {
12813        BLOGE(sc, "Timeout waiting for all SPs to complete!\n");
12814        bxe_periodic_stop(sc);
12815        bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE);
12816        return (ENXIO);
12817    }
12818
12819    /* Tell the stack the driver is running! */
12820    if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12821
12822    BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n");
12823
12824    return (0);
12825
12826bxe_nic_load_error3:
12827
12828    if (IS_PF(sc)) {
12829        bxe_int_disable_sync(sc, 1);
12830
12831        /* clean out queued objects */
12832        bxe_squeeze_objects(sc);
12833    }
12834
12835    bxe_interrupt_detach(sc);
12836
12837bxe_nic_load_error2:
12838
12839    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
12840        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
12841        bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0);
12842    }
12843
12844    sc->port.pmf = 0;
12845
12846bxe_nic_load_error1:
12847
12848    /* clear pf_load status, as it was already set */
12849    if (IS_PF(sc)) {
12850        bxe_clear_pf_load(sc);
12851    }
12852
12853bxe_nic_load_error0:
12854
12855    bxe_free_fw_stats_mem(sc);
12856    bxe_free_fp_buffers(sc);
12857    bxe_free_mem(sc);
12858
12859    return (rc);
12860}
12861
12862static int
12863bxe_init_locked(struct bxe_softc *sc)
12864{
12865    int other_engine = SC_PATH(sc) ? 0 : 1;
12866    uint8_t other_load_status, load_status;
12867    uint8_t global = FALSE;
12868    int rc;
12869
12870    BXE_CORE_LOCK_ASSERT(sc);
12871
12872    /* check if the driver is already running */
12873    if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12874        BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n");
12875        return (0);
12876    }
12877
12878    if((sc->state == BXE_STATE_ERROR) &&
12879        (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12880        BLOGE(sc, "Initialization not done, "
12881                  "as previous recovery failed."
12882                  "Reboot/Power-cycle the system\n" );
12883        return (ENXIO);
12884    }
12885
12886
12887    bxe_set_power_state(sc, PCI_PM_D0);
12888
12889    /*
12890     * If parity occurred during the unload, then attentions and/or
12891     * RECOVERY_IN_PROGRES may still be set. If so we want the first function
12892     * loaded on the current engine to complete the recovery. Parity recovery
12893     * is only relevant for PF driver.
12894     */
12895    if (IS_PF(sc)) {
12896        other_load_status = bxe_get_load_status(sc, other_engine);
12897        load_status = bxe_get_load_status(sc, SC_PATH(sc));
12898
12899        if (!bxe_reset_is_done(sc, SC_PATH(sc)) ||
12900            bxe_chk_parity_attn(sc, &global, TRUE)) {
12901            do {
12902                /*
12903                 * If there are attentions and they are in global blocks, set
12904                 * the GLOBAL_RESET bit regardless whether it will be this
12905                 * function that will complete the recovery or not.
12906                 */
12907                if (global) {
12908                    bxe_set_reset_global(sc);
12909                }
12910
12911                /*
12912                 * Only the first function on the current engine should try
12913                 * to recover in open. In case of attentions in global blocks
12914                 * only the first in the chip should try to recover.
12915                 */
12916                if ((!load_status && (!global || !other_load_status)) &&
12917                    bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) {
12918                    BLOGI(sc, "Recovered during init\n");
12919                    break;
12920                }
12921
12922                /* recovery has failed... */
12923                bxe_set_power_state(sc, PCI_PM_D3hot);
12924                sc->recovery_state = BXE_RECOVERY_FAILED;
12925
12926                BLOGE(sc, "Recovery flow hasn't properly "
12927                          "completed yet, try again later. "
12928                          "If you still see this message after a "
12929                          "few retries then power cycle is required.\n");
12930
12931                rc = ENXIO;
12932                goto bxe_init_locked_done;
12933            } while (0);
12934        }
12935    }
12936
12937    sc->recovery_state = BXE_RECOVERY_DONE;
12938
12939    rc = bxe_nic_load(sc, LOAD_OPEN);
12940
12941bxe_init_locked_done:
12942
12943    if (rc) {
12944        /* Tell the stack the driver is NOT running! */
12945        BLOGE(sc, "Initialization failed, "
12946                  "stack notified driver is NOT running!\n");
12947	if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12948    }
12949
12950    return (rc);
12951}
12952
12953static int
12954bxe_stop_locked(struct bxe_softc *sc)
12955{
12956    BXE_CORE_LOCK_ASSERT(sc);
12957    return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE));
12958}
12959
12960/*
12961 * Handles controller initialization when called from an unlocked routine.
12962 * ifconfig calls this function.
12963 *
12964 * Returns:
12965 *   void
12966 */
12967static void
12968bxe_init(void *xsc)
12969{
12970    struct bxe_softc *sc = (struct bxe_softc *)xsc;
12971
12972    BXE_CORE_LOCK(sc);
12973    bxe_init_locked(sc);
12974    BXE_CORE_UNLOCK(sc);
12975}
12976
12977static int
12978bxe_init_ifnet(struct bxe_softc *sc)
12979{
12980    if_t ifp;
12981    int capabilities;
12982
12983    /* ifconfig entrypoint for media type/status reporting */
12984    ifmedia_init(&sc->ifmedia, IFM_IMASK,
12985                 bxe_ifmedia_update,
12986                 bxe_ifmedia_status);
12987
12988    /* set the default interface values */
12989    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12990    ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12991    ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12992
12993    sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12994	BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12995
12996    /* allocate the ifnet structure */
12997    if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
12998        BLOGE(sc, "Interface allocation failed!\n");
12999        return (ENXIO);
13000    }
13001
13002    if_setsoftc(ifp, sc);
13003    if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
13004    if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST));
13005    if_setioctlfn(ifp, bxe_ioctl);
13006    if_setstartfn(ifp, bxe_tx_start);
13007    if_setgetcounterfn(ifp, bxe_get_counter);
13008    if_settransmitfn(ifp, bxe_tx_mq_start);
13009    if_setqflushfn(ifp, bxe_mq_flush);
13010    if_setinitfn(ifp, bxe_init);
13011    if_setmtu(ifp, sc->mtu);
13012    if_sethwassist(ifp, (CSUM_IP      |
13013                        CSUM_TCP      |
13014                        CSUM_UDP      |
13015                        CSUM_TSO      |
13016                        CSUM_TCP_IPV6 |
13017                        CSUM_UDP_IPV6));
13018
13019    capabilities =
13020        (IFCAP_VLAN_MTU       |
13021         IFCAP_VLAN_HWTAGGING |
13022         IFCAP_VLAN_HWTSO     |
13023         IFCAP_VLAN_HWFILTER  |
13024         IFCAP_VLAN_HWCSUM    |
13025         IFCAP_HWCSUM         |
13026         IFCAP_JUMBO_MTU      |
13027         IFCAP_LRO            |
13028         IFCAP_TSO4           |
13029         IFCAP_TSO6           |
13030         IFCAP_WOL_MAGIC);
13031    if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */
13032    if_setcapenable(ifp, if_getcapabilities(ifp));
13033    if_setbaudrate(ifp, IF_Gbps(10));
13034/* XXX */
13035    if_setsendqlen(ifp, sc->tx_ring_size);
13036    if_setsendqready(ifp);
13037/* XXX */
13038
13039    sc->ifp = ifp;
13040
13041    /* attach to the Ethernet interface list */
13042    ether_ifattach(ifp, sc->link_params.mac_addr);
13043
13044    /* Attach driver debugnet methods. */
13045    DEBUGNET_SET(ifp, bxe);
13046
13047    return (0);
13048}
13049
13050static void
13051bxe_deallocate_bars(struct bxe_softc *sc)
13052{
13053    int i;
13054
13055    for (i = 0; i < MAX_BARS; i++) {
13056        if (sc->bar[i].resource != NULL) {
13057            bus_release_resource(sc->dev,
13058                                 SYS_RES_MEMORY,
13059                                 sc->bar[i].rid,
13060                                 sc->bar[i].resource);
13061            BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n",
13062                  i, PCIR_BAR(i));
13063        }
13064    }
13065}
13066
13067static int
13068bxe_allocate_bars(struct bxe_softc *sc)
13069{
13070    u_int flags;
13071    int i;
13072
13073    memset(sc->bar, 0, sizeof(sc->bar));
13074
13075    for (i = 0; i < MAX_BARS; i++) {
13076
13077        /* memory resources reside at BARs 0, 2, 4 */
13078        /* Run `pciconf -lb` to see mappings */
13079        if ((i != 0) && (i != 2) && (i != 4)) {
13080            continue;
13081        }
13082
13083        sc->bar[i].rid = PCIR_BAR(i);
13084
13085        flags = RF_ACTIVE;
13086        if (i == 0) {
13087            flags |= RF_SHAREABLE;
13088        }
13089
13090        if ((sc->bar[i].resource =
13091             bus_alloc_resource_any(sc->dev,
13092                                    SYS_RES_MEMORY,
13093                                    &sc->bar[i].rid,
13094                                    flags)) == NULL) {
13095            return (0);
13096        }
13097
13098        sc->bar[i].tag    = rman_get_bustag(sc->bar[i].resource);
13099        sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13100        sc->bar[i].kva    = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13101
13102        BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
13103              i, PCIR_BAR(i),
13104              rman_get_start(sc->bar[i].resource),
13105              rman_get_end(sc->bar[i].resource),
13106              rman_get_size(sc->bar[i].resource),
13107              (uintmax_t)sc->bar[i].kva);
13108    }
13109
13110    return (0);
13111}
13112
13113static void
13114bxe_get_function_num(struct bxe_softc *sc)
13115{
13116    uint32_t val = 0;
13117
13118    /*
13119     * Read the ME register to get the function number. The ME register
13120     * holds the relative-function number and absolute-function number. The
13121     * absolute-function number appears only in E2 and above. Before that
13122     * these bits always contained zero, therefore we cannot blindly use them.
13123     */
13124
13125    val = REG_RD(sc, BAR_ME_REGISTER);
13126
13127    sc->pfunc_rel =
13128        (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT);
13129    sc->path_id =
13130        (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1;
13131
13132    if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13133        sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13134    } else {
13135        sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13136    }
13137
13138    BLOGD(sc, DBG_LOAD,
13139          "Relative function %d, Absolute function %d, Path %d\n",
13140          sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13141}
13142
13143static uint32_t
13144bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc)
13145{
13146    uint32_t shmem2_size;
13147    uint32_t offset;
13148    uint32_t mf_cfg_offset_value;
13149
13150    /* Non 57712 */
13151    offset = (SHMEM_RD(sc, func_mb) +
13152              (MAX_FUNC_NUM * sizeof(struct drv_func_mb)));
13153
13154    /* 57712 plus */
13155    if (sc->devinfo.shmem2_base != 0) {
13156        shmem2_size = SHMEM2_RD(sc, size);
13157        if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) {
13158            mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr);
13159            if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) {
13160                offset = mf_cfg_offset_value;
13161            }
13162        }
13163    }
13164
13165    return (offset);
13166}
13167
13168static uint32_t
13169bxe_pcie_capability_read(struct bxe_softc *sc,
13170                         int    reg,
13171                         int    width)
13172{
13173    int pcie_reg;
13174
13175    /* ensure PCIe capability is enabled */
13176    if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13177        if (pcie_reg != 0) {
13178            BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg);
13179            return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13180        }
13181    }
13182
13183    BLOGE(sc, "PCIe capability NOT FOUND!!!\n");
13184
13185    return (0);
13186}
13187
13188static uint8_t
13189bxe_is_pcie_pending(struct bxe_softc *sc)
13190{
13191    return (bxe_pcie_capability_read(sc, PCIER_DEVICE_STA, 2) &
13192            PCIEM_STA_TRANSACTION_PND);
13193}
13194
13195/*
13196 * Walk the PCI capabiites list for the device to find what features are
13197 * supported. These capabilites may be enabled/disabled by firmware so it's
13198 * best to walk the list rather than make assumptions.
13199 */
13200static void
13201bxe_probe_pci_caps(struct bxe_softc *sc)
13202{
13203    uint16_t link_status;
13204    int reg;
13205
13206    /* check if PCI Power Management is enabled */
13207    if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
13208        if (reg != 0) {
13209            BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg);
13210
13211            sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13212            sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13213        }
13214    }
13215
13216    link_status = bxe_pcie_capability_read(sc, PCIER_LINK_STA, 2);
13217
13218    /* handle PCIe 2.0 workarounds for 57710 */
13219    if (CHIP_IS_E1(sc)) {
13220        /* workaround for 57710 errata E4_57710_27462 */
13221        sc->devinfo.pcie_link_speed =
13222            (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1;
13223
13224        /* workaround for 57710 errata E4_57710_27488 */
13225        sc->devinfo.pcie_link_width =
13226            ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13227        if (sc->devinfo.pcie_link_speed > 1) {
13228            sc->devinfo.pcie_link_width =
13229                ((link_status & PCIEM_LINK_STA_WIDTH) >> 4) >> 1;
13230        }
13231    } else {
13232        sc->devinfo.pcie_link_speed =
13233            (link_status & PCIEM_LINK_STA_SPEED);
13234        sc->devinfo.pcie_link_width =
13235            ((link_status & PCIEM_LINK_STA_WIDTH) >> 4);
13236    }
13237
13238    BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n",
13239          sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13240
13241    sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13242    sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13243
13244    /* check if MSI capability is enabled */
13245    if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13246        if (reg != 0) {
13247            BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg);
13248
13249            sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13250            sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13251        }
13252    }
13253
13254    /* check if MSI-X capability is enabled */
13255    if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13256        if (reg != 0) {
13257            BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13258
13259            sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13260            sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13261        }
13262    }
13263}
13264
13265static int
13266bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc)
13267{
13268    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13269    uint32_t val;
13270
13271    /* get the outer vlan if we're in switch-dependent mode */
13272
13273    val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13274    mf_info->ext_id = (uint16_t)val;
13275
13276    mf_info->multi_vnics_mode = 1;
13277
13278    if (!VALID_OVLAN(mf_info->ext_id)) {
13279        BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13280        return (1);
13281    }
13282
13283    /* get the capabilities */
13284    if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13285        FUNC_MF_CFG_PROTOCOL_ISCSI) {
13286        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13287    } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13288               FUNC_MF_CFG_PROTOCOL_FCOE) {
13289        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13290    } else {
13291        mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13292    }
13293
13294    mf_info->vnics_per_port =
13295        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13296
13297    return (0);
13298}
13299
13300static uint32_t
13301bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc)
13302{
13303    uint32_t retval = 0;
13304    uint32_t val;
13305
13306    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13307
13308    if (val & MACP_FUNC_CFG_FLAGS_ENABLED) {
13309        if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) {
13310            retval |= MF_PROTO_SUPPORT_ETHERNET;
13311        }
13312        if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
13313            retval |= MF_PROTO_SUPPORT_ISCSI;
13314        }
13315        if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
13316            retval |= MF_PROTO_SUPPORT_FCOE;
13317        }
13318    }
13319
13320    return (retval);
13321}
13322
13323static int
13324bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc)
13325{
13326    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13327    uint32_t val;
13328
13329    /*
13330     * There is no outer vlan if we're in switch-independent mode.
13331     * If the mac is valid then assume multi-function.
13332     */
13333
13334    val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg);
13335
13336    mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13337
13338    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13339
13340    mf_info->vnics_per_port =
13341        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13342
13343    return (0);
13344}
13345
13346static int
13347bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc)
13348{
13349    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13350    uint32_t e1hov_tag;
13351    uint32_t func_config;
13352    uint32_t niv_config;
13353
13354    mf_info->multi_vnics_mode = 1;
13355
13356    e1hov_tag   = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13357    func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13358    niv_config  = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config);
13359
13360    mf_info->ext_id =
13361        (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >>
13362                   FUNC_MF_CFG_E1HOV_TAG_SHIFT);
13363
13364    mf_info->default_vlan =
13365        (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >>
13366                   FUNC_MF_CFG_AFEX_VLAN_SHIFT);
13367
13368    mf_info->niv_allowed_priorities =
13369        (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
13370                  FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT);
13371
13372    mf_info->niv_default_cos =
13373        (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
13374                  FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT);
13375
13376    mf_info->afex_vlan_mode =
13377        ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
13378         FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT);
13379
13380    mf_info->niv_mba_enabled =
13381        ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >>
13382         FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT);
13383
13384    mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13385
13386    mf_info->vnics_per_port =
13387        (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4;
13388
13389    return (0);
13390}
13391
13392static int
13393bxe_check_valid_mf_cfg(struct bxe_softc *sc)
13394{
13395    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13396    uint32_t mf_cfg1;
13397    uint32_t mf_cfg2;
13398    uint32_t ovlan1;
13399    uint32_t ovlan2;
13400    uint8_t i, j;
13401
13402    BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n",
13403          SC_PORT(sc));
13404    BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n",
13405          mf_info->mf_config[SC_VN(sc)]);
13406    BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n",
13407          mf_info->multi_vnics_mode);
13408    BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n",
13409          mf_info->vnics_per_port);
13410    BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n",
13411          mf_info->ext_id);
13412    BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n",
13413          mf_info->min_bw[0], mf_info->min_bw[1],
13414          mf_info->min_bw[2], mf_info->min_bw[3]);
13415    BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n",
13416          mf_info->max_bw[0], mf_info->max_bw[1],
13417          mf_info->max_bw[2], mf_info->max_bw[3]);
13418    BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n",
13419          sc->mac_addr_str);
13420
13421    /* various MF mode sanity checks... */
13422
13423    if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13424        BLOGE(sc, "Enumerated function %d is marked as hidden\n",
13425              SC_PORT(sc));
13426        return (1);
13427    }
13428
13429    if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13430        BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n",
13431              mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13432        return (1);
13433    }
13434
13435    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13436        /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13437        if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
13438            BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n",
13439                  SC_VN(sc), OVLAN(sc));
13440            return (1);
13441        }
13442
13443        if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13444            BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n",
13445                  mf_info->multi_vnics_mode, OVLAN(sc));
13446            return (1);
13447        }
13448
13449        /*
13450         * Verify all functions are either MF or SF mode. If MF, make sure
13451         * sure that all non-hidden functions have a valid ovlan. If SF,
13452         * make sure that all non-hidden functions have an invalid ovlan.
13453         */
13454        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13455            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13456            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13457            if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13458                (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13459                 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13460                BLOGE(sc, "mf_mode=SD function %d MF config "
13461                          "mismatch, multi_vnics_mode=%d ovlan=%d\n",
13462                      i, mf_info->multi_vnics_mode, ovlan1);
13463                return (1);
13464            }
13465        }
13466
13467        /* Verify all funcs on the same port each have a different ovlan. */
13468        FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13469            mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config);
13470            ovlan1  = MFCFG_RD(sc, func_mf_config[i].e1hov_tag);
13471            /* iterate from the next function on the port to the max func */
13472            for (j = i + 2; j < MAX_FUNC_NUM; j += 2) {
13473                mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config);
13474                ovlan2  = MFCFG_RD(sc, func_mf_config[j].e1hov_tag);
13475                if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) &&
13476                    VALID_OVLAN(ovlan1) &&
13477                    !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) &&
13478                    VALID_OVLAN(ovlan2) &&
13479                    (ovlan1 == ovlan2)) {
13480                    BLOGE(sc, "mf_mode=SD functions %d and %d "
13481                              "have the same ovlan (%d)\n",
13482                          i, j, ovlan1);
13483                    return (1);
13484                }
13485            }
13486        }
13487    } /* MULTI_FUNCTION_SD */
13488
13489    return (0);
13490}
13491
13492static int
13493bxe_get_mf_cfg_info(struct bxe_softc *sc)
13494{
13495    struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13496    uint32_t val, mac_upper;
13497    uint8_t i, vnic;
13498
13499    /* initialize mf_info defaults */
13500    mf_info->vnics_per_port   = 1;
13501    mf_info->multi_vnics_mode = FALSE;
13502    mf_info->path_has_ovlan   = FALSE;
13503    mf_info->mf_mode          = SINGLE_FUNCTION;
13504
13505    if (!CHIP_IS_MF_CAP(sc)) {
13506        return (0);
13507    }
13508
13509    if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13510        BLOGE(sc, "Invalid mf_cfg_base!\n");
13511        return (1);
13512    }
13513
13514    /* get the MF mode (switch dependent / independent / single-function) */
13515
13516    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13517
13518    switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)
13519    {
13520    case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
13521
13522        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13523
13524        /* check for legal upper mac bytes */
13525        if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
13526            mf_info->mf_mode = MULTI_FUNCTION_SI;
13527        } else {
13528            BLOGE(sc, "Invalid config for Switch Independent mode\n");
13529        }
13530
13531        break;
13532
13533    case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
13534    case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4:
13535
13536        /* get outer vlan configuration */
13537        val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag);
13538
13539        if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) !=
13540            FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
13541            mf_info->mf_mode = MULTI_FUNCTION_SD;
13542        } else {
13543            BLOGE(sc, "Invalid config for Switch Dependent mode\n");
13544        }
13545
13546        break;
13547
13548    case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
13549
13550        /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */
13551        return (0);
13552
13553    case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
13554
13555        /*
13556         * Mark MF mode as NIV if MCP version includes NPAR-SD support
13557         * and the MAC address is valid.
13558         */
13559        mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13560
13561        if ((SHMEM2_HAS(sc, afex_driver_support)) &&
13562            (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
13563            mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13564        } else {
13565            BLOGE(sc, "Invalid config for AFEX mode\n");
13566        }
13567
13568        break;
13569
13570    default:
13571
13572        BLOGE(sc, "Unknown MF mode (0x%08x)\n",
13573              (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
13574
13575        return (1);
13576    }
13577
13578    /* set path mf_mode (which could be different than function mf_mode) */
13579    if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13580        mf_info->path_has_ovlan = TRUE;
13581    } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13582        /*
13583         * Decide on path multi vnics mode. If we're not in MF mode and in
13584         * 4-port mode, this is good enough to check vnic-0 of the other port
13585         * on the same path
13586         */
13587        if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) {
13588            uint8_t other_port = !(PORT_ID(sc) & 1);
13589            uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port));
13590
13591            val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag);
13592
13593            mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13594        }
13595    }
13596
13597    if (mf_info->mf_mode == SINGLE_FUNCTION) {
13598        /* invalid MF config */
13599        if (SC_VN(sc) >= 1) {
13600            BLOGE(sc, "VNIC ID >= 1 in SF mode\n");
13601            return (1);
13602        }
13603
13604        return (0);
13605    }
13606
13607    /* get the MF configuration */
13608    mf_info->mf_config[SC_VN(sc)] =
13609        MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config);
13610
13611    switch(mf_info->mf_mode)
13612    {
13613    case MULTI_FUNCTION_SD:
13614
13615        bxe_get_shmem_mf_cfg_info_sd(sc);
13616        break;
13617
13618    case MULTI_FUNCTION_SI:
13619
13620        bxe_get_shmem_mf_cfg_info_si(sc);
13621        break;
13622
13623    case MULTI_FUNCTION_AFEX:
13624
13625        bxe_get_shmem_mf_cfg_info_niv(sc);
13626        break;
13627
13628    default:
13629
13630        BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n",
13631              mf_info->mf_mode);
13632        return (1);
13633    }
13634
13635    /* get the congestion management parameters */
13636
13637    vnic = 0;
13638    FOREACH_ABS_FUNC_IN_PORT(sc, i) {
13639        /* get min/max bw */
13640        val = MFCFG_RD(sc, func_mf_config[i].config);
13641        mf_info->min_bw[vnic] =
13642            ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
13643        mf_info->max_bw[vnic] =
13644            ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
13645        vnic++;
13646    }
13647
13648    return (bxe_check_valid_mf_cfg(sc));
13649}
13650
13651static int
13652bxe_get_shmem_info(struct bxe_softc *sc)
13653{
13654    int port;
13655    uint32_t mac_hi, mac_lo, val;
13656
13657    port = SC_PORT(sc);
13658    mac_hi = mac_lo = 0;
13659
13660    sc->link_params.sc   = sc;
13661    sc->link_params.port = port;
13662
13663    /* get the hardware config info */
13664    sc->devinfo.hw_config =
13665        SHMEM_RD(sc, dev_info.shared_hw_config.config);
13666    sc->devinfo.hw_config2 =
13667        SHMEM_RD(sc, dev_info.shared_hw_config.config2);
13668
13669    sc->link_params.hw_led_mode =
13670        ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13671         SHARED_HW_CFG_LED_MODE_SHIFT);
13672
13673    /* get the port feature config */
13674    sc->port.config =
13675        SHMEM_RD(sc, dev_info.port_feature_config[port].config);
13676
13677    /* get the link params */
13678    sc->link_params.speed_cap_mask[0] =
13679        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask);
13680    sc->link_params.speed_cap_mask[1] =
13681        SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2);
13682
13683    /* get the lane config */
13684    sc->link_params.lane_config =
13685        SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config);
13686
13687    /* get the link config */
13688    val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config);
13689    sc->port.link_config[ELINK_INT_PHY] = val;
13690    sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13691    sc->port.link_config[ELINK_EXT_PHY1] =
13692        SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2);
13693
13694    /* get the override preemphasis flag and enable it or turn it off */
13695    val = SHMEM_RD(sc, dev_info.shared_feature_config.config);
13696    if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) {
13697        sc->link_params.feature_config_flags |=
13698            ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13699    } else {
13700        sc->link_params.feature_config_flags &=
13701            ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
13702    }
13703
13704    /* get the initial value of the link params */
13705    sc->link_params.multi_phy_config =
13706        SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config);
13707
13708    /* get external phy info */
13709    sc->port.ext_phy_config =
13710        SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config);
13711
13712    /* get the multifunction configuration */
13713    bxe_get_mf_cfg_info(sc);
13714
13715    /* get the mac address */
13716    if (IS_MF(sc)) {
13717        mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper);
13718        mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower);
13719    } else {
13720        mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper);
13721        mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower);
13722    }
13723
13724    if ((mac_lo == 0) && (mac_hi == 0)) {
13725        *sc->mac_addr_str = 0;
13726        BLOGE(sc, "No Ethernet address programmed!\n");
13727    } else {
13728        sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13729        sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13730        sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13731        sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13732        sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13733        sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13734        snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13735                 "%02x:%02x:%02x:%02x:%02x:%02x",
13736                 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13737                 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13738                 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13739        BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13740    }
13741
13742    return (0);
13743}
13744
13745static void
13746bxe_get_tunable_params(struct bxe_softc *sc)
13747{
13748    /* sanity checks */
13749
13750    if ((bxe_interrupt_mode != INTR_MODE_INTX) &&
13751        (bxe_interrupt_mode != INTR_MODE_MSI)  &&
13752        (bxe_interrupt_mode != INTR_MODE_MSIX)) {
13753        BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode);
13754        bxe_interrupt_mode = INTR_MODE_MSIX;
13755    }
13756
13757    if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) {
13758        BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count);
13759        bxe_queue_count = 0;
13760    }
13761
13762    if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) {
13763        if (bxe_max_rx_bufs == 0) {
13764            bxe_max_rx_bufs = RX_BD_USABLE;
13765        } else {
13766            BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs);
13767            bxe_max_rx_bufs = 2048;
13768        }
13769    }
13770
13771    if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) {
13772        BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks);
13773        bxe_hc_rx_ticks = 25;
13774    }
13775
13776    if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) {
13777        BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks);
13778        bxe_hc_tx_ticks = 50;
13779    }
13780
13781    if (bxe_max_aggregation_size == 0) {
13782        bxe_max_aggregation_size = TPA_AGG_SIZE;
13783    }
13784
13785    if (bxe_max_aggregation_size > 0xffff) {
13786        BLOGW(sc, "invalid max_aggregation_size (%d)\n",
13787              bxe_max_aggregation_size);
13788        bxe_max_aggregation_size = TPA_AGG_SIZE;
13789    }
13790
13791    if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13792        BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs);
13793        bxe_mrrs = -1;
13794    }
13795
13796    if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) {
13797        BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen);
13798        bxe_autogreeen = 0;
13799    }
13800
13801    if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) {
13802        BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss);
13803        bxe_udp_rss = 0;
13804    }
13805
13806    /* pull in user settings */
13807
13808    sc->interrupt_mode       = bxe_interrupt_mode;
13809    sc->max_rx_bufs          = bxe_max_rx_bufs;
13810    sc->hc_rx_ticks          = bxe_hc_rx_ticks;
13811    sc->hc_tx_ticks          = bxe_hc_tx_ticks;
13812    sc->max_aggregation_size = bxe_max_aggregation_size;
13813    sc->mrrs                 = bxe_mrrs;
13814    sc->autogreeen           = bxe_autogreeen;
13815    sc->udp_rss              = bxe_udp_rss;
13816
13817    if (bxe_interrupt_mode == INTR_MODE_INTX) {
13818        sc->num_queues = 1;
13819    } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */
13820        sc->num_queues =
13821            min((bxe_queue_count ? bxe_queue_count : mp_ncpus),
13822                MAX_RSS_CHAINS);
13823        if (sc->num_queues > mp_ncpus) {
13824            sc->num_queues = mp_ncpus;
13825        }
13826    }
13827
13828    BLOGD(sc, DBG_LOAD,
13829          "User Config: "
13830          "debug=0x%lx "
13831          "interrupt_mode=%d "
13832          "queue_count=%d "
13833          "hc_rx_ticks=%d "
13834          "hc_tx_ticks=%d "
13835          "rx_budget=%d "
13836          "max_aggregation_size=%d "
13837          "mrrs=%d "
13838          "autogreeen=%d "
13839          "udp_rss=%d\n",
13840          bxe_debug,
13841          sc->interrupt_mode,
13842          sc->num_queues,
13843          sc->hc_rx_ticks,
13844          sc->hc_tx_ticks,
13845          bxe_rx_budget,
13846          sc->max_aggregation_size,
13847          sc->mrrs,
13848          sc->autogreeen,
13849          sc->udp_rss);
13850}
13851
13852static int
13853bxe_media_detect(struct bxe_softc *sc)
13854{
13855    int port_type;
13856    uint32_t phy_idx = bxe_get_cur_phy_idx(sc);
13857
13858    switch (sc->link_params.phy[phy_idx].media_type) {
13859    case ELINK_ETH_PHY_SFPP_10G_FIBER:
13860    case ELINK_ETH_PHY_XFP_FIBER:
13861        BLOGI(sc, "Found 10Gb Fiber media.\n");
13862        sc->media = IFM_10G_SR;
13863        port_type = PORT_FIBRE;
13864        break;
13865    case ELINK_ETH_PHY_SFP_1G_FIBER:
13866        BLOGI(sc, "Found 1Gb Fiber media.\n");
13867        sc->media = IFM_1000_SX;
13868        port_type = PORT_FIBRE;
13869        break;
13870    case ELINK_ETH_PHY_KR:
13871    case ELINK_ETH_PHY_CX4:
13872        BLOGI(sc, "Found 10GBase-CX4 media.\n");
13873        sc->media = IFM_10G_CX4;
13874        port_type = PORT_FIBRE;
13875        break;
13876    case ELINK_ETH_PHY_DA_TWINAX:
13877        BLOGI(sc, "Found 10Gb Twinax media.\n");
13878        sc->media = IFM_10G_TWINAX;
13879        port_type = PORT_DA;
13880        break;
13881    case ELINK_ETH_PHY_BASE_T:
13882        if (sc->link_params.speed_cap_mask[0] &
13883            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
13884            BLOGI(sc, "Found 10GBase-T media.\n");
13885            sc->media = IFM_10G_T;
13886            port_type = PORT_TP;
13887        } else {
13888            BLOGI(sc, "Found 1000Base-T media.\n");
13889            sc->media = IFM_1000_T;
13890            port_type = PORT_TP;
13891        }
13892        break;
13893    case ELINK_ETH_PHY_NOT_PRESENT:
13894        BLOGI(sc, "Media not present.\n");
13895        sc->media = 0;
13896        port_type = PORT_OTHER;
13897        break;
13898    case ELINK_ETH_PHY_UNSPECIFIED:
13899    default:
13900        BLOGI(sc, "Unknown media!\n");
13901        sc->media = 0;
13902        port_type = PORT_OTHER;
13903        break;
13904    }
13905    return port_type;
13906}
13907
13908#define GET_FIELD(value, fname)                     \
13909    (((value) & (fname##_MASK)) >> (fname##_SHIFT))
13910#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
13911#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
13912
13913static int
13914bxe_get_igu_cam_info(struct bxe_softc *sc)
13915{
13916    int pfid = SC_FUNC(sc);
13917    int igu_sb_id;
13918    uint32_t val;
13919    uint8_t fid, igu_sb_cnt = 0;
13920
13921    sc->igu_base_sb = 0xff;
13922
13923    if (CHIP_INT_MODE_IS_BC(sc)) {
13924        int vn = SC_VN(sc);
13925        igu_sb_cnt = sc->igu_sb_cnt;
13926        sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13927                           FP_SB_MAX_E1x);
13928        sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13929                          (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn));
13930        return (0);
13931    }
13932
13933    /* IGU in normal mode - read CAM */
13934    for (igu_sb_id = 0;
13935         igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
13936         igu_sb_id++) {
13937        val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
13938        if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) {
13939            continue;
13940        }
13941        fid = IGU_FID(val);
13942        if ((fid & IGU_FID_ENCODE_IS_PF)) {
13943            if ((fid & IGU_FID_PF_NUM_MASK) != pfid) {
13944                continue;
13945            }
13946            if (IGU_VEC(val) == 0) {
13947                /* default status block */
13948                sc->igu_dsb_id = igu_sb_id;
13949            } else {
13950                if (sc->igu_base_sb == 0xff) {
13951                    sc->igu_base_sb = igu_sb_id;
13952                }
13953                igu_sb_cnt++;
13954            }
13955        }
13956    }
13957
13958    /*
13959     * Due to new PF resource allocation by MFW T7.4 and above, it's optional
13960     * that number of CAM entries will not be equal to the value advertised in
13961     * PCI. Driver should use the minimal value of both as the actual status
13962     * block count
13963     */
13964    sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13965
13966    if (igu_sb_cnt == 0) {
13967        BLOGE(sc, "CAM configuration error\n");
13968        return (-1);
13969    }
13970
13971    return (0);
13972}
13973
13974/*
13975 * Gather various information from the device config space, the device itself,
13976 * shmem, and the user input.
13977 */
13978static int
13979bxe_get_device_info(struct bxe_softc *sc)
13980{
13981    uint32_t val;
13982    int rc;
13983
13984    /* Get the data for the device */
13985    sc->devinfo.vendor_id    = pci_get_vendor(sc->dev);
13986    sc->devinfo.device_id    = pci_get_device(sc->dev);
13987    sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13988    sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13989
13990    /* get the chip revision (chip metal comes from pci config space) */
13991    sc->devinfo.chip_id     =
13992    sc->link_params.chip_id =
13993        (((REG_RD(sc, MISC_REG_CHIP_NUM)                   & 0xffff) << 16) |
13994         ((REG_RD(sc, MISC_REG_CHIP_REV)                   & 0xf)    << 12) |
13995         (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf)    << 4)  |
13996         ((REG_RD(sc, MISC_REG_BOND_ID)                    & 0xf)    << 0));
13997
13998    /* force 57811 according to MISC register */
13999    if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
14000        if (CHIP_IS_57810(sc)) {
14001            sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
14002                                   (sc->devinfo.chip_id & 0x0000ffff));
14003        } else if (CHIP_IS_57810_MF(sc)) {
14004            sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
14005                                   (sc->devinfo.chip_id & 0x0000ffff));
14006        }
14007        sc->devinfo.chip_id |= 0x1;
14008    }
14009
14010    BLOGD(sc, DBG_LOAD,
14011          "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n",
14012          sc->devinfo.chip_id,
14013          ((sc->devinfo.chip_id >> 16) & 0xffff),
14014          ((sc->devinfo.chip_id >> 12) & 0xf),
14015          ((sc->devinfo.chip_id >>  4) & 0xff),
14016          ((sc->devinfo.chip_id >>  0) & 0xf));
14017
14018    val = (REG_RD(sc, 0x2874) & 0x55);
14019    if ((sc->devinfo.chip_id & 0x1) ||
14020        (CHIP_IS_E1(sc) && val) ||
14021        (CHIP_IS_E1H(sc) && (val == 0x55))) {
14022        sc->flags |= BXE_ONE_PORT_FLAG;
14023        BLOGD(sc, DBG_LOAD, "single port device\n");
14024    }
14025
14026    /* set the doorbell size */
14027    sc->doorbell_size = (1 << BXE_DB_SHIFT);
14028
14029    /* determine whether the device is in 2 port or 4 port mode */
14030    sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14031    if (CHIP_IS_E2E3(sc)) {
14032        /*
14033         * Read port4mode_en_ovwr[0]:
14034         *   If 1, four port mode is in port4mode_en_ovwr[1].
14035         *   If 0, four port mode is in port4mode_en[0].
14036         */
14037        val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
14038        if (val & 1) {
14039            val = ((val >> 1) & 1);
14040        } else {
14041            val = REG_RD(sc, MISC_REG_PORT4MODE_EN);
14042        }
14043
14044        sc->devinfo.chip_port_mode =
14045            (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
14046
14047        BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2");
14048    }
14049
14050    /* get the function and path info for the device */
14051    bxe_get_function_num(sc);
14052
14053    /* get the shared memory base address */
14054    sc->devinfo.shmem_base     =
14055    sc->link_params.shmem_base =
14056        REG_RD(sc, MISC_REG_SHARED_MEM_ADDR);
14057    sc->devinfo.shmem2_base =
14058        REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 :
14059                                  MISC_REG_GENERIC_CR_0));
14060
14061    BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n",
14062          sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14063
14064    if (!sc->devinfo.shmem_base) {
14065        /* this should ONLY prevent upcoming shmem reads */
14066        BLOGI(sc, "MCP not active\n");
14067        sc->flags |= BXE_NO_MCP_FLAG;
14068        return (0);
14069    }
14070
14071    /* make sure the shared memory contents are valid */
14072    val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
14073    if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
14074        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
14075        BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val);
14076        return (0);
14077    }
14078    BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val);
14079
14080    /* get the bootcode version */
14081    sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14082    snprintf(sc->devinfo.bc_ver_str,
14083             sizeof(sc->devinfo.bc_ver_str),
14084             "%d.%d.%d",
14085             ((sc->devinfo.bc_ver >> 24) & 0xff),
14086             ((sc->devinfo.bc_ver >> 16) & 0xff),
14087             ((sc->devinfo.bc_ver >>  8) & 0xff));
14088    BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14089
14090    /* get the bootcode shmem address */
14091    sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14092    BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14093
14094    /* clean indirect addresses as they're not used */
14095    pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14096    if (IS_PF(sc)) {
14097        REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0);
14098        REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0);
14099        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0);
14100        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0);
14101        if (CHIP_IS_E1x(sc)) {
14102            REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0);
14103            REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0);
14104            REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
14105            REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
14106        }
14107
14108        /*
14109         * Enable internal target-read (in case we are probed after PF
14110         * FLR). Must be done prior to any BAR read access. Only for
14111         * 57712 and up
14112         */
14113        if (!CHIP_IS_E1x(sc)) {
14114            REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
14115        }
14116    }
14117
14118    /* get the nvram size */
14119    val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4);
14120    sc->devinfo.flash_size =
14121        (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
14122    BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14123
14124    /* get PCI capabilites */
14125    bxe_probe_pci_caps(sc);
14126
14127    bxe_set_power_state(sc, PCI_PM_D0);
14128
14129    /* get various configuration parameters from shmem */
14130    bxe_get_shmem_info(sc);
14131
14132    if (sc->devinfo.pcie_msix_cap_reg != 0) {
14133        val = pci_read_config(sc->dev,
14134                              (sc->devinfo.pcie_msix_cap_reg +
14135                               PCIR_MSIX_CTRL),
14136                              2);
14137        sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14138    } else {
14139        sc->igu_sb_cnt = 1;
14140    }
14141
14142    sc->igu_base_addr = BAR_IGU_INTMEM;
14143
14144    /* initialize IGU parameters */
14145    if (CHIP_IS_E1x(sc)) {
14146        sc->devinfo.int_block = INT_BLOCK_HC;
14147        sc->igu_dsb_id = DEF_SB_IGU_ID;
14148        sc->igu_base_sb = 0;
14149    } else {
14150        sc->devinfo.int_block = INT_BLOCK_IGU;
14151
14152        /* do not allow device reset during IGU info preocessing */
14153        bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14154
14155        val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
14156
14157        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14158            int tout = 5000;
14159
14160            BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n");
14161
14162            val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
14163            REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val);
14164            REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f);
14165
14166            while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14167                tout--;
14168                DELAY(1000);
14169            }
14170
14171            if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
14172                BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n");
14173                bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14174                return (-1);
14175            }
14176        }
14177
14178        if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
14179            BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n");
14180            sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14181        } else {
14182            BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n");
14183        }
14184
14185        rc = bxe_get_igu_cam_info(sc);
14186
14187        bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
14188
14189        if (rc) {
14190            return (rc);
14191        }
14192    }
14193
14194    /*
14195     * Get base FW non-default (fast path) status block ID. This value is
14196     * used to initialize the fw_sb_id saved on the fp/queue structure to
14197     * determine the id used by the FW.
14198     */
14199    if (CHIP_IS_E1x(sc)) {
14200        sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14201    } else {
14202        /*
14203         * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14204         * the same queue are indicated on the same IGU SB). So we prefer
14205         * FW and IGU SBs to be the same value.
14206         */
14207        sc->base_fw_ndsb = sc->igu_base_sb;
14208    }
14209
14210    BLOGD(sc, DBG_LOAD,
14211          "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n",
14212          sc->igu_dsb_id, sc->igu_base_sb,
14213          sc->igu_sb_cnt, sc->base_fw_ndsb);
14214
14215    elink_phy_probe(&sc->link_params);
14216
14217    return (0);
14218}
14219
14220static void
14221bxe_link_settings_supported(struct bxe_softc *sc,
14222                            uint32_t         switch_cfg)
14223{
14224    uint32_t cfg_size = 0;
14225    uint32_t idx;
14226    uint8_t port = SC_PORT(sc);
14227
14228    /* aggregation of supported attributes of all external phys */
14229    sc->port.supported[0] = 0;
14230    sc->port.supported[1] = 0;
14231
14232    switch (sc->link_params.num_phys) {
14233    case 1:
14234        sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14235        cfg_size = 1;
14236        break;
14237    case 2:
14238        sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14239        cfg_size = 1;
14240        break;
14241    case 3:
14242        if (sc->link_params.multi_phy_config &
14243            PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
14244            sc->port.supported[1] =
14245                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14246            sc->port.supported[0] =
14247                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14248        } else {
14249            sc->port.supported[0] =
14250                sc->link_params.phy[ELINK_EXT_PHY1].supported;
14251            sc->port.supported[1] =
14252                sc->link_params.phy[ELINK_EXT_PHY2].supported;
14253        }
14254        cfg_size = 2;
14255        break;
14256    }
14257
14258    if (!(sc->port.supported[0] || sc->port.supported[1])) {
14259        BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n",
14260              SHMEM_RD(sc,
14261                       dev_info.port_hw_config[port].external_phy_config),
14262              SHMEM_RD(sc,
14263                       dev_info.port_hw_config[port].external_phy_config2));
14264        return;
14265    }
14266
14267    if (CHIP_IS_E3(sc))
14268        sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14269    else {
14270        switch (switch_cfg) {
14271        case ELINK_SWITCH_CFG_1G:
14272            sc->port.phy_addr =
14273                REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
14274            break;
14275        case ELINK_SWITCH_CFG_10G:
14276            sc->port.phy_addr =
14277                REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
14278            break;
14279        default:
14280            BLOGE(sc, "Invalid switch config in link_config=0x%08x\n",
14281                  sc->port.link_config[0]);
14282            return;
14283        }
14284    }
14285
14286    BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14287
14288    /* mask what we support according to speed_cap_mask per configuration */
14289    for (idx = 0; idx < cfg_size; idx++) {
14290        if (!(sc->link_params.speed_cap_mask[idx] &
14291              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
14292            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14293        }
14294
14295        if (!(sc->link_params.speed_cap_mask[idx] &
14296              PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) {
14297            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14298        }
14299
14300        if (!(sc->link_params.speed_cap_mask[idx] &
14301              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
14302            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14303        }
14304
14305        if (!(sc->link_params.speed_cap_mask[idx] &
14306              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) {
14307            sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14308        }
14309
14310        if (!(sc->link_params.speed_cap_mask[idx] &
14311              PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) {
14312            sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14313        }
14314
14315        if (!(sc->link_params.speed_cap_mask[idx] &
14316              PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) {
14317            sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14318        }
14319
14320        if (!(sc->link_params.speed_cap_mask[idx] &
14321              PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
14322            sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14323        }
14324
14325        if (!(sc->link_params.speed_cap_mask[idx] &
14326              PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) {
14327            sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14328        }
14329    }
14330
14331    BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n",
14332          sc->port.supported[0], sc->port.supported[1]);
14333	ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n",
14334					sc->port.supported[0], sc->port.supported[1]);
14335}
14336
14337static void
14338bxe_link_settings_requested(struct bxe_softc *sc)
14339{
14340    uint32_t link_config;
14341    uint32_t idx;
14342    uint32_t cfg_size = 0;
14343
14344    sc->port.advertising[0] = 0;
14345    sc->port.advertising[1] = 0;
14346
14347    switch (sc->link_params.num_phys) {
14348    case 1:
14349    case 2:
14350        cfg_size = 1;
14351        break;
14352    case 3:
14353        cfg_size = 2;
14354        break;
14355    }
14356
14357    for (idx = 0; idx < cfg_size; idx++) {
14358        sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14359        link_config = sc->port.link_config[idx];
14360
14361        switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
14362        case PORT_FEATURE_LINK_SPEED_AUTO:
14363            if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14364                sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14365                sc->port.advertising[idx] |= sc->port.supported[idx];
14366                if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14367                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
14368                    sc->port.advertising[idx] |=
14369                        (ELINK_SUPPORTED_100baseT_Half |
14370                         ELINK_SUPPORTED_100baseT_Full);
14371            } else {
14372                /* force 10G, no AN */
14373                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14374                sc->port.advertising[idx] |=
14375                    (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
14376                continue;
14377            }
14378            break;
14379
14380        case PORT_FEATURE_LINK_SPEED_10M_FULL:
14381            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14382                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14383                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14384                                              ADVERTISED_TP);
14385            } else {
14386                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14387                          "speed_cap_mask=0x%08x\n",
14388                      link_config, sc->link_params.speed_cap_mask[idx]);
14389                return;
14390            }
14391            break;
14392
14393        case PORT_FEATURE_LINK_SPEED_10M_HALF:
14394            if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14395                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14396                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14397                sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14398                                              ADVERTISED_TP);
14399				ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n",
14400								sc->link_params.req_duplex[idx]);
14401            } else {
14402                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14403                          "speed_cap_mask=0x%08x\n",
14404                      link_config, sc->link_params.speed_cap_mask[idx]);
14405                return;
14406            }
14407            break;
14408
14409        case PORT_FEATURE_LINK_SPEED_100M_FULL:
14410            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14411                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14412                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14413                                              ADVERTISED_TP);
14414            } else {
14415                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14416                          "speed_cap_mask=0x%08x\n",
14417                      link_config, sc->link_params.speed_cap_mask[idx]);
14418                return;
14419            }
14420            break;
14421
14422        case PORT_FEATURE_LINK_SPEED_100M_HALF:
14423            if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14424                sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14425                sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14426                sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14427                                              ADVERTISED_TP);
14428            } else {
14429                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14430                          "speed_cap_mask=0x%08x\n",
14431                      link_config, sc->link_params.speed_cap_mask[idx]);
14432                return;
14433            }
14434            break;
14435
14436        case PORT_FEATURE_LINK_SPEED_1G:
14437            if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14438                sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14439                sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14440                                              ADVERTISED_TP);
14441            } else {
14442                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14443                          "speed_cap_mask=0x%08x\n",
14444                      link_config, sc->link_params.speed_cap_mask[idx]);
14445                return;
14446            }
14447            break;
14448
14449        case PORT_FEATURE_LINK_SPEED_2_5G:
14450            if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14451                sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14452                sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14453                                              ADVERTISED_TP);
14454            } else {
14455                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14456                          "speed_cap_mask=0x%08x\n",
14457                      link_config, sc->link_params.speed_cap_mask[idx]);
14458                return;
14459            }
14460            break;
14461
14462        case PORT_FEATURE_LINK_SPEED_10G_CX4:
14463            if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14464                sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14465                sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14466                                              ADVERTISED_FIBRE);
14467            } else {
14468                BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14469                          "speed_cap_mask=0x%08x\n",
14470                      link_config, sc->link_params.speed_cap_mask[idx]);
14471                return;
14472            }
14473            break;
14474
14475        case PORT_FEATURE_LINK_SPEED_20G:
14476            sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14477            break;
14478
14479        default:
14480            BLOGE(sc, "Invalid NVRAM config link_config=0x%08x "
14481                      "speed_cap_mask=0x%08x\n",
14482                  link_config, sc->link_params.speed_cap_mask[idx]);
14483            sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14484            sc->port.advertising[idx] = sc->port.supported[idx];
14485            break;
14486        }
14487
14488        sc->link_params.req_flow_ctrl[idx] =
14489            (link_config & PORT_FEATURE_FLOW_CONTROL_MASK);
14490
14491        if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14492            if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14493                sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14494            } else {
14495                bxe_set_requested_fc(sc);
14496            }
14497        }
14498
14499        BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d "
14500                            "req_flow_ctrl=0x%x advertising=0x%x\n",
14501              sc->link_params.req_line_speed[idx],
14502              sc->link_params.req_duplex[idx],
14503              sc->link_params.req_flow_ctrl[idx],
14504              sc->port.advertising[idx]);
14505		ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d "
14506						"advertising=0x%x\n",
14507						sc->link_params.req_line_speed[idx],
14508						sc->link_params.req_duplex[idx],
14509						sc->port.advertising[idx]);
14510    }
14511}
14512
14513static void
14514bxe_get_phy_info(struct bxe_softc *sc)
14515{
14516    uint8_t port = SC_PORT(sc);
14517    uint32_t config = sc->port.config;
14518    uint32_t eee_mode;
14519
14520    /* shmem data already read in bxe_get_shmem_info() */
14521
14522    ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x "
14523                        "link_config0=0x%08x\n",
14524               sc->link_params.lane_config,
14525               sc->link_params.speed_cap_mask[0],
14526               sc->port.link_config[0]);
14527
14528
14529    bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14530    bxe_link_settings_requested(sc);
14531
14532    if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14533        sc->link_params.feature_config_flags |=
14534            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14535    } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14536        sc->link_params.feature_config_flags &=
14537            ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14538    } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) {
14539        sc->link_params.feature_config_flags |=
14540            ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED;
14541    }
14542
14543    /* configure link feature according to nvram value */
14544    eee_mode =
14545        (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) &
14546          PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
14547         PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
14548    if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
14549        sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14550                                    ELINK_EEE_MODE_ENABLE_LPI |
14551                                    ELINK_EEE_MODE_OUTPUT_TIME);
14552    } else {
14553        sc->link_params.eee_mode = 0;
14554    }
14555
14556    /* get the media type */
14557    bxe_media_detect(sc);
14558	ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14559}
14560
14561static void
14562bxe_get_params(struct bxe_softc *sc)
14563{
14564    /* get user tunable params */
14565    bxe_get_tunable_params(sc);
14566
14567    /* select the RX and TX ring sizes */
14568    sc->tx_ring_size = TX_BD_USABLE;
14569    sc->rx_ring_size = RX_BD_USABLE;
14570
14571    /* XXX disable WoL */
14572    sc->wol = 0;
14573}
14574
14575static void
14576bxe_set_modes_bitmap(struct bxe_softc *sc)
14577{
14578    uint32_t flags = 0;
14579
14580    if (CHIP_REV_IS_FPGA(sc)) {
14581        SET_FLAGS(flags, MODE_FPGA);
14582    } else if (CHIP_REV_IS_EMUL(sc)) {
14583        SET_FLAGS(flags, MODE_EMUL);
14584    } else {
14585        SET_FLAGS(flags, MODE_ASIC);
14586    }
14587
14588    if (CHIP_IS_MODE_4_PORT(sc)) {
14589        SET_FLAGS(flags, MODE_PORT4);
14590    } else {
14591        SET_FLAGS(flags, MODE_PORT2);
14592    }
14593
14594    if (CHIP_IS_E2(sc)) {
14595        SET_FLAGS(flags, MODE_E2);
14596    } else if (CHIP_IS_E3(sc)) {
14597        SET_FLAGS(flags, MODE_E3);
14598        if (CHIP_REV(sc) == CHIP_REV_Ax) {
14599            SET_FLAGS(flags, MODE_E3_A0);
14600        } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ {
14601            SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
14602        }
14603    }
14604
14605    if (IS_MF(sc)) {
14606        SET_FLAGS(flags, MODE_MF);
14607        switch (sc->devinfo.mf_info.mf_mode) {
14608        case MULTI_FUNCTION_SD:
14609            SET_FLAGS(flags, MODE_MF_SD);
14610            break;
14611        case MULTI_FUNCTION_SI:
14612            SET_FLAGS(flags, MODE_MF_SI);
14613            break;
14614        case MULTI_FUNCTION_AFEX:
14615            SET_FLAGS(flags, MODE_MF_AFEX);
14616            break;
14617        }
14618    } else {
14619        SET_FLAGS(flags, MODE_SF);
14620    }
14621
14622#if defined(__LITTLE_ENDIAN)
14623    SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
14624#else /* __BIG_ENDIAN */
14625    SET_FLAGS(flags, MODE_BIG_ENDIAN);
14626#endif
14627
14628    INIT_MODE_FLAGS(sc) = flags;
14629}
14630
14631static int
14632bxe_alloc_hsi_mem(struct bxe_softc *sc)
14633{
14634    struct bxe_fastpath *fp;
14635    bus_addr_t busaddr;
14636    int max_agg_queues;
14637    int max_segments;
14638    bus_size_t max_size;
14639    bus_size_t max_seg_size;
14640    char buf[32];
14641    int rc;
14642    int i, j;
14643
14644    /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */
14645
14646    /* allocate the parent bus DMA tag */
14647    rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14648                            1,                        /* alignment */
14649                            0,                        /* boundary limit */
14650                            BUS_SPACE_MAXADDR,        /* restricted low */
14651                            BUS_SPACE_MAXADDR,        /* restricted hi */
14652                            NULL,                     /* addr filter() */
14653                            NULL,                     /* addr filter() arg */
14654                            BUS_SPACE_MAXSIZE_32BIT,  /* max map size */
14655                            BUS_SPACE_UNRESTRICTED,   /* num discontinuous */
14656                            BUS_SPACE_MAXSIZE_32BIT,  /* max seg size */
14657                            0,                        /* flags */
14658                            NULL,                     /* lock() */
14659                            NULL,                     /* lock() arg */
14660                            &sc->parent_dma_tag);     /* returned dma tag */
14661    if (rc != 0) {
14662        BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc);
14663        return (1);
14664    }
14665
14666    /************************/
14667    /* DEFAULT STATUS BLOCK */
14668    /************************/
14669
14670    if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block),
14671                      &sc->def_sb_dma, "default status block") != 0) {
14672        /* XXX */
14673        bus_dma_tag_destroy(sc->parent_dma_tag);
14674        return (1);
14675    }
14676
14677    sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14678
14679    /***************/
14680    /* EVENT QUEUE */
14681    /***************/
14682
14683    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14684                      &sc->eq_dma, "event queue") != 0) {
14685        /* XXX */
14686        bxe_dma_free(sc, &sc->def_sb_dma);
14687        sc->def_sb = NULL;
14688        bus_dma_tag_destroy(sc->parent_dma_tag);
14689        return (1);
14690    }
14691
14692    sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14693
14694    /*************/
14695    /* SLOW PATH */
14696    /*************/
14697
14698    if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath),
14699                      &sc->sp_dma, "slow path") != 0) {
14700        /* XXX */
14701        bxe_dma_free(sc, &sc->eq_dma);
14702        sc->eq = NULL;
14703        bxe_dma_free(sc, &sc->def_sb_dma);
14704        sc->def_sb = NULL;
14705        bus_dma_tag_destroy(sc->parent_dma_tag);
14706        return (1);
14707    }
14708
14709    sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14710
14711    /*******************/
14712    /* SLOW PATH QUEUE */
14713    /*******************/
14714
14715    if (bxe_dma_alloc(sc, BCM_PAGE_SIZE,
14716                      &sc->spq_dma, "slow path queue") != 0) {
14717        /* XXX */
14718        bxe_dma_free(sc, &sc->sp_dma);
14719        sc->sp = NULL;
14720        bxe_dma_free(sc, &sc->eq_dma);
14721        sc->eq = NULL;
14722        bxe_dma_free(sc, &sc->def_sb_dma);
14723        sc->def_sb = NULL;
14724        bus_dma_tag_destroy(sc->parent_dma_tag);
14725        return (1);
14726    }
14727
14728    sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14729
14730    /***************************/
14731    /* FW DECOMPRESSION BUFFER */
14732    /***************************/
14733
14734    if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14735                      "fw decompression buffer") != 0) {
14736        /* XXX */
14737        bxe_dma_free(sc, &sc->spq_dma);
14738        sc->spq = NULL;
14739        bxe_dma_free(sc, &sc->sp_dma);
14740        sc->sp = NULL;
14741        bxe_dma_free(sc, &sc->eq_dma);
14742        sc->eq = NULL;
14743        bxe_dma_free(sc, &sc->def_sb_dma);
14744        sc->def_sb = NULL;
14745        bus_dma_tag_destroy(sc->parent_dma_tag);
14746        return (1);
14747    }
14748
14749    sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14750
14751    if ((sc->gz_strm =
14752         malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14753        /* XXX */
14754        bxe_dma_free(sc, &sc->gz_buf_dma);
14755        sc->gz_buf = NULL;
14756        bxe_dma_free(sc, &sc->spq_dma);
14757        sc->spq = NULL;
14758        bxe_dma_free(sc, &sc->sp_dma);
14759        sc->sp = NULL;
14760        bxe_dma_free(sc, &sc->eq_dma);
14761        sc->eq = NULL;
14762        bxe_dma_free(sc, &sc->def_sb_dma);
14763        sc->def_sb = NULL;
14764        bus_dma_tag_destroy(sc->parent_dma_tag);
14765        return (1);
14766    }
14767
14768    /*************/
14769    /* FASTPATHS */
14770    /*************/
14771
14772    /* allocate DMA memory for each fastpath structure */
14773    for (i = 0; i < sc->num_queues; i++) {
14774        fp = &sc->fp[i];
14775        fp->sc    = sc;
14776        fp->index = i;
14777
14778        /*******************/
14779        /* FP STATUS BLOCK */
14780        /*******************/
14781
14782        snprintf(buf, sizeof(buf), "fp %d status block", i);
14783        if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block),
14784                          &fp->sb_dma, buf) != 0) {
14785            /* XXX unwind and free previous fastpath allocations */
14786            BLOGE(sc, "Failed to alloc %s\n", buf);
14787            return (1);
14788        } else {
14789            if (CHIP_IS_E2E3(sc)) {
14790                fp->status_block.e2_sb =
14791                    (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14792            } else {
14793                fp->status_block.e1x_sb =
14794                    (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14795            }
14796        }
14797
14798        /******************/
14799        /* FP TX BD CHAIN */
14800        /******************/
14801
14802        snprintf(buf, sizeof(buf), "fp %d tx bd chain", i);
14803        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES),
14804                          &fp->tx_dma, buf) != 0) {
14805            /* XXX unwind and free previous fastpath allocations */
14806            BLOGE(sc, "Failed to alloc %s\n", buf);
14807            return (1);
14808        } else {
14809            fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14810        }
14811
14812        /* link together the tx bd chain pages */
14813        for (j = 1; j <= TX_BD_NUM_PAGES; j++) {
14814            /* index into the tx bd chain array to last entry per page */
14815            struct eth_tx_next_bd *tx_next_bd =
14816                &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14817            /* point to the next page and wrap from last page */
14818            busaddr = (fp->tx_dma.paddr +
14819                       (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES)));
14820            tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14821            tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14822        }
14823
14824        /******************/
14825        /* FP RX BD CHAIN */
14826        /******************/
14827
14828        snprintf(buf, sizeof(buf), "fp %d rx bd chain", i);
14829        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES),
14830                          &fp->rx_dma, buf) != 0) {
14831            /* XXX unwind and free previous fastpath allocations */
14832            BLOGE(sc, "Failed to alloc %s\n", buf);
14833            return (1);
14834        } else {
14835            fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14836        }
14837
14838        /* link together the rx bd chain pages */
14839        for (j = 1; j <= RX_BD_NUM_PAGES; j++) {
14840            /* index into the rx bd chain array to last entry per page */
14841            struct eth_rx_bd *rx_bd =
14842                &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14843            /* point to the next page and wrap from last page */
14844            busaddr = (fp->rx_dma.paddr +
14845                       (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES)));
14846            rx_bd->addr_hi = htole32(U64_HI(busaddr));
14847            rx_bd->addr_lo = htole32(U64_LO(busaddr));
14848        }
14849
14850        /*******************/
14851        /* FP RX RCQ CHAIN */
14852        /*******************/
14853
14854        snprintf(buf, sizeof(buf), "fp %d rcq chain", i);
14855        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES),
14856                          &fp->rcq_dma, buf) != 0) {
14857            /* XXX unwind and free previous fastpath allocations */
14858            BLOGE(sc, "Failed to alloc %s\n", buf);
14859            return (1);
14860        } else {
14861            fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14862        }
14863
14864        /* link together the rcq chain pages */
14865        for (j = 1; j <= RCQ_NUM_PAGES; j++) {
14866            /* index into the rcq chain array to last entry per page */
14867            struct eth_rx_cqe_next_page *rx_cqe_next =
14868                (struct eth_rx_cqe_next_page *)
14869                &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14870            /* point to the next page and wrap from last page */
14871            busaddr = (fp->rcq_dma.paddr +
14872                       (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES)));
14873            rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14874            rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14875        }
14876
14877        /*******************/
14878        /* FP RX SGE CHAIN */
14879        /*******************/
14880
14881        snprintf(buf, sizeof(buf), "fp %d sge chain", i);
14882        if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES),
14883                          &fp->rx_sge_dma, buf) != 0) {
14884            /* XXX unwind and free previous fastpath allocations */
14885            BLOGE(sc, "Failed to alloc %s\n", buf);
14886            return (1);
14887        } else {
14888            fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14889        }
14890
14891        /* link together the sge chain pages */
14892        for (j = 1; j <= RX_SGE_NUM_PAGES; j++) {
14893            /* index into the rcq chain array to last entry per page */
14894            struct eth_rx_sge *rx_sge =
14895                &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14896            /* point to the next page and wrap from last page */
14897            busaddr = (fp->rx_sge_dma.paddr +
14898                       (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES)));
14899            rx_sge->addr_hi = htole32(U64_HI(busaddr));
14900            rx_sge->addr_lo = htole32(U64_LO(busaddr));
14901        }
14902
14903        /***********************/
14904        /* FP TX MBUF DMA MAPS */
14905        /***********************/
14906
14907        /* set required sizes before mapping to conserve resources */
14908        if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14909            max_size     = BXE_TSO_MAX_SIZE;
14910            max_segments = BXE_TSO_MAX_SEGMENTS;
14911            max_seg_size = BXE_TSO_MAX_SEG_SIZE;
14912        } else {
14913            max_size     = (MCLBYTES * BXE_MAX_SEGMENTS);
14914            max_segments = BXE_MAX_SEGMENTS;
14915            max_seg_size = MCLBYTES;
14916        }
14917
14918        /* create a dma tag for the tx mbufs */
14919        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14920                                1,                  /* alignment */
14921                                0,                  /* boundary limit */
14922                                BUS_SPACE_MAXADDR,  /* restricted low */
14923                                BUS_SPACE_MAXADDR,  /* restricted hi */
14924                                NULL,               /* addr filter() */
14925                                NULL,               /* addr filter() arg */
14926                                max_size,           /* max map size */
14927                                max_segments,       /* num discontinuous */
14928                                max_seg_size,       /* max seg size */
14929                                0,                  /* flags */
14930                                NULL,               /* lock() */
14931                                NULL,               /* lock() arg */
14932                                &fp->tx_mbuf_tag);  /* returned dma tag */
14933        if (rc != 0) {
14934            /* XXX unwind and free previous fastpath allocations */
14935            BLOGE(sc, "Failed to create dma tag for "
14936                      "'fp %d tx mbufs' (%d)\n", i, rc);
14937            return (1);
14938        }
14939
14940        /* create dma maps for each of the tx mbuf clusters */
14941        for (j = 0; j < TX_BD_TOTAL; j++) {
14942            if (bus_dmamap_create(fp->tx_mbuf_tag,
14943                                  BUS_DMA_NOWAIT,
14944                                  &fp->tx_mbuf_chain[j].m_map)) {
14945                /* XXX unwind and free previous fastpath allocations */
14946                BLOGE(sc, "Failed to create dma map for "
14947                          "'fp %d tx mbuf %d' (%d)\n", i, j, rc);
14948                return (1);
14949            }
14950        }
14951
14952        /***********************/
14953        /* FP RX MBUF DMA MAPS */
14954        /***********************/
14955
14956        /* create a dma tag for the rx mbufs */
14957        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14958                                1,                  /* alignment */
14959                                0,                  /* boundary limit */
14960                                BUS_SPACE_MAXADDR,  /* restricted low */
14961                                BUS_SPACE_MAXADDR,  /* restricted hi */
14962                                NULL,               /* addr filter() */
14963                                NULL,               /* addr filter() arg */
14964                                MJUM9BYTES,         /* max map size */
14965                                1,                  /* num discontinuous */
14966                                MJUM9BYTES,         /* max seg size */
14967                                0,                  /* flags */
14968                                NULL,               /* lock() */
14969                                NULL,               /* lock() arg */
14970                                &fp->rx_mbuf_tag);  /* returned dma tag */
14971        if (rc != 0) {
14972            /* XXX unwind and free previous fastpath allocations */
14973            BLOGE(sc, "Failed to create dma tag for "
14974                      "'fp %d rx mbufs' (%d)\n", i, rc);
14975            return (1);
14976        }
14977
14978        /* create dma maps for each of the rx mbuf clusters */
14979        for (j = 0; j < RX_BD_TOTAL; j++) {
14980            if (bus_dmamap_create(fp->rx_mbuf_tag,
14981                                  BUS_DMA_NOWAIT,
14982                                  &fp->rx_mbuf_chain[j].m_map)) {
14983                /* XXX unwind and free previous fastpath allocations */
14984                BLOGE(sc, "Failed to create dma map for "
14985                          "'fp %d rx mbuf %d' (%d)\n", i, j, rc);
14986                return (1);
14987            }
14988        }
14989
14990        /* create dma map for the spare rx mbuf cluster */
14991        if (bus_dmamap_create(fp->rx_mbuf_tag,
14992                              BUS_DMA_NOWAIT,
14993                              &fp->rx_mbuf_spare_map)) {
14994            /* XXX unwind and free previous fastpath allocations */
14995            BLOGE(sc, "Failed to create dma map for "
14996                      "'fp %d spare rx mbuf' (%d)\n", i, rc);
14997            return (1);
14998        }
14999
15000        /***************************/
15001        /* FP RX SGE MBUF DMA MAPS */
15002        /***************************/
15003
15004        /* create a dma tag for the rx sge mbufs */
15005        rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15006                                1,                  /* alignment */
15007                                0,                  /* boundary limit */
15008                                BUS_SPACE_MAXADDR,  /* restricted low */
15009                                BUS_SPACE_MAXADDR,  /* restricted hi */
15010                                NULL,               /* addr filter() */
15011                                NULL,               /* addr filter() arg */
15012                                BCM_PAGE_SIZE,      /* max map size */
15013                                1,                  /* num discontinuous */
15014                                BCM_PAGE_SIZE,      /* max seg size */
15015                                0,                  /* flags */
15016                                NULL,               /* lock() */
15017                                NULL,               /* lock() arg */
15018                                &fp->rx_sge_mbuf_tag); /* returned dma tag */
15019        if (rc != 0) {
15020            /* XXX unwind and free previous fastpath allocations */
15021            BLOGE(sc, "Failed to create dma tag for "
15022                      "'fp %d rx sge mbufs' (%d)\n", i, rc);
15023            return (1);
15024        }
15025
15026        /* create dma maps for the rx sge mbuf clusters */
15027        for (j = 0; j < RX_SGE_TOTAL; j++) {
15028            if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15029                                  BUS_DMA_NOWAIT,
15030                                  &fp->rx_sge_mbuf_chain[j].m_map)) {
15031                /* XXX unwind and free previous fastpath allocations */
15032                BLOGE(sc, "Failed to create dma map for "
15033                          "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc);
15034                return (1);
15035            }
15036        }
15037
15038        /* create dma map for the spare rx sge mbuf cluster */
15039        if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15040                              BUS_DMA_NOWAIT,
15041                              &fp->rx_sge_mbuf_spare_map)) {
15042            /* XXX unwind and free previous fastpath allocations */
15043            BLOGE(sc, "Failed to create dma map for "
15044                      "'fp %d spare rx sge mbuf' (%d)\n", i, rc);
15045            return (1);
15046        }
15047
15048        /***************************/
15049        /* FP RX TPA MBUF DMA MAPS */
15050        /***************************/
15051
15052        /* create dma maps for the rx tpa mbuf clusters */
15053        max_agg_queues = MAX_AGG_QS(sc);
15054
15055        for (j = 0; j < max_agg_queues; j++) {
15056            if (bus_dmamap_create(fp->rx_mbuf_tag,
15057                                  BUS_DMA_NOWAIT,
15058                                  &fp->rx_tpa_info[j].bd.m_map)) {
15059                /* XXX unwind and free previous fastpath allocations */
15060                BLOGE(sc, "Failed to create dma map for "
15061                          "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc);
15062                return (1);
15063            }
15064        }
15065
15066        /* create dma map for the spare rx tpa mbuf cluster */
15067        if (bus_dmamap_create(fp->rx_mbuf_tag,
15068                              BUS_DMA_NOWAIT,
15069                              &fp->rx_tpa_info_mbuf_spare_map)) {
15070            /* XXX unwind and free previous fastpath allocations */
15071            BLOGE(sc, "Failed to create dma map for "
15072                      "'fp %d spare rx tpa mbuf' (%d)\n", i, rc);
15073            return (1);
15074        }
15075
15076        bxe_init_sge_ring_bit_mask(fp);
15077    }
15078
15079    return (0);
15080}
15081
15082static void
15083bxe_free_hsi_mem(struct bxe_softc *sc)
15084{
15085    struct bxe_fastpath *fp;
15086    int max_agg_queues;
15087    int i, j;
15088
15089    if (sc->parent_dma_tag == NULL) {
15090        return; /* assume nothing was allocated */
15091    }
15092
15093    for (i = 0; i < sc->num_queues; i++) {
15094        fp = &sc->fp[i];
15095
15096        /*******************/
15097        /* FP STATUS BLOCK */
15098        /*******************/
15099
15100        bxe_dma_free(sc, &fp->sb_dma);
15101        memset(&fp->status_block, 0, sizeof(fp->status_block));
15102
15103        /******************/
15104        /* FP TX BD CHAIN */
15105        /******************/
15106
15107        bxe_dma_free(sc, &fp->tx_dma);
15108        fp->tx_chain = NULL;
15109
15110        /******************/
15111        /* FP RX BD CHAIN */
15112        /******************/
15113
15114        bxe_dma_free(sc, &fp->rx_dma);
15115        fp->rx_chain = NULL;
15116
15117        /*******************/
15118        /* FP RX RCQ CHAIN */
15119        /*******************/
15120
15121        bxe_dma_free(sc, &fp->rcq_dma);
15122        fp->rcq_chain = NULL;
15123
15124        /*******************/
15125        /* FP RX SGE CHAIN */
15126        /*******************/
15127
15128        bxe_dma_free(sc, &fp->rx_sge_dma);
15129        fp->rx_sge_chain = NULL;
15130
15131        /***********************/
15132        /* FP TX MBUF DMA MAPS */
15133        /***********************/
15134
15135        if (fp->tx_mbuf_tag != NULL) {
15136            for (j = 0; j < TX_BD_TOTAL; j++) {
15137                if (fp->tx_mbuf_chain[j].m_map != NULL) {
15138                    bus_dmamap_unload(fp->tx_mbuf_tag,
15139                                      fp->tx_mbuf_chain[j].m_map);
15140                    bus_dmamap_destroy(fp->tx_mbuf_tag,
15141                                       fp->tx_mbuf_chain[j].m_map);
15142                }
15143            }
15144
15145            bus_dma_tag_destroy(fp->tx_mbuf_tag);
15146            fp->tx_mbuf_tag = NULL;
15147        }
15148
15149        /***********************/
15150        /* FP RX MBUF DMA MAPS */
15151        /***********************/
15152
15153        if (fp->rx_mbuf_tag != NULL) {
15154            for (j = 0; j < RX_BD_TOTAL; j++) {
15155                if (fp->rx_mbuf_chain[j].m_map != NULL) {
15156                    bus_dmamap_unload(fp->rx_mbuf_tag,
15157                                      fp->rx_mbuf_chain[j].m_map);
15158                    bus_dmamap_destroy(fp->rx_mbuf_tag,
15159                                       fp->rx_mbuf_chain[j].m_map);
15160                }
15161            }
15162
15163            if (fp->rx_mbuf_spare_map != NULL) {
15164                bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15165                bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15166            }
15167
15168            /***************************/
15169            /* FP RX TPA MBUF DMA MAPS */
15170            /***************************/
15171
15172            max_agg_queues = MAX_AGG_QS(sc);
15173
15174            for (j = 0; j < max_agg_queues; j++) {
15175                if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15176                    bus_dmamap_unload(fp->rx_mbuf_tag,
15177                                      fp->rx_tpa_info[j].bd.m_map);
15178                    bus_dmamap_destroy(fp->rx_mbuf_tag,
15179                                       fp->rx_tpa_info[j].bd.m_map);
15180                }
15181            }
15182
15183            if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15184                bus_dmamap_unload(fp->rx_mbuf_tag,
15185                                  fp->rx_tpa_info_mbuf_spare_map);
15186                bus_dmamap_destroy(fp->rx_mbuf_tag,
15187                                   fp->rx_tpa_info_mbuf_spare_map);
15188            }
15189
15190            bus_dma_tag_destroy(fp->rx_mbuf_tag);
15191            fp->rx_mbuf_tag = NULL;
15192        }
15193
15194        /***************************/
15195        /* FP RX SGE MBUF DMA MAPS */
15196        /***************************/
15197
15198        if (fp->rx_sge_mbuf_tag != NULL) {
15199            for (j = 0; j < RX_SGE_TOTAL; j++) {
15200                if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15201                    bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15202                                      fp->rx_sge_mbuf_chain[j].m_map);
15203                    bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15204                                       fp->rx_sge_mbuf_chain[j].m_map);
15205                }
15206            }
15207
15208            if (fp->rx_sge_mbuf_spare_map != NULL) {
15209                bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15210                                  fp->rx_sge_mbuf_spare_map);
15211                bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15212                                   fp->rx_sge_mbuf_spare_map);
15213            }
15214
15215            bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15216            fp->rx_sge_mbuf_tag = NULL;
15217        }
15218    }
15219
15220    /***************************/
15221    /* FW DECOMPRESSION BUFFER */
15222    /***************************/
15223
15224    bxe_dma_free(sc, &sc->gz_buf_dma);
15225    sc->gz_buf = NULL;
15226    free(sc->gz_strm, M_DEVBUF);
15227    sc->gz_strm = NULL;
15228
15229    /*******************/
15230    /* SLOW PATH QUEUE */
15231    /*******************/
15232
15233    bxe_dma_free(sc, &sc->spq_dma);
15234    sc->spq = NULL;
15235
15236    /*************/
15237    /* SLOW PATH */
15238    /*************/
15239
15240    bxe_dma_free(sc, &sc->sp_dma);
15241    sc->sp = NULL;
15242
15243    /***************/
15244    /* EVENT QUEUE */
15245    /***************/
15246
15247    bxe_dma_free(sc, &sc->eq_dma);
15248    sc->eq = NULL;
15249
15250    /************************/
15251    /* DEFAULT STATUS BLOCK */
15252    /************************/
15253
15254    bxe_dma_free(sc, &sc->def_sb_dma);
15255    sc->def_sb = NULL;
15256
15257    bus_dma_tag_destroy(sc->parent_dma_tag);
15258    sc->parent_dma_tag = NULL;
15259}
15260
15261/*
15262 * Previous driver DMAE transaction may have occurred when pre-boot stage
15263 * ended and boot began. This would invalidate the addresses of the
15264 * transaction, resulting in was-error bit set in the PCI causing all
15265 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15266 * the interrupt which detected this from the pglueb and the was-done bit
15267 */
15268static void
15269bxe_prev_interrupted_dmae(struct bxe_softc *sc)
15270{
15271    uint32_t val;
15272
15273    if (!CHIP_IS_E1x(sc)) {
15274        val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS);
15275        if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
15276            BLOGD(sc, DBG_LOAD,
15277                  "Clearing 'was-error' bit that was set in pglueb");
15278            REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc));
15279        }
15280    }
15281}
15282
15283static int
15284bxe_prev_mcp_done(struct bxe_softc *sc)
15285{
15286    uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
15287                                 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
15288    if (!rc) {
15289        BLOGE(sc, "MCP response failure, aborting\n");
15290        return (-1);
15291    }
15292
15293    return (0);
15294}
15295
15296static struct bxe_prev_list_node *
15297bxe_prev_path_get_entry(struct bxe_softc *sc)
15298{
15299    struct bxe_prev_list_node *tmp;
15300
15301    LIST_FOREACH(tmp, &bxe_prev_list, node) {
15302        if ((sc->pcie_bus == tmp->bus) &&
15303            (sc->pcie_device == tmp->slot) &&
15304            (SC_PATH(sc) == tmp->path)) {
15305            return (tmp);
15306        }
15307    }
15308
15309    return (NULL);
15310}
15311
15312static uint8_t
15313bxe_prev_is_path_marked(struct bxe_softc *sc)
15314{
15315    struct bxe_prev_list_node *tmp;
15316    int rc = FALSE;
15317
15318    mtx_lock(&bxe_prev_mtx);
15319
15320    tmp = bxe_prev_path_get_entry(sc);
15321    if (tmp) {
15322        if (tmp->aer) {
15323            BLOGD(sc, DBG_LOAD,
15324                  "Path %d/%d/%d was marked by AER\n",
15325                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15326        } else {
15327            rc = TRUE;
15328            BLOGD(sc, DBG_LOAD,
15329                  "Path %d/%d/%d was already cleaned from previous drivers\n",
15330                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15331        }
15332    }
15333
15334    mtx_unlock(&bxe_prev_mtx);
15335
15336    return (rc);
15337}
15338
15339static int
15340bxe_prev_mark_path(struct bxe_softc *sc,
15341                   uint8_t          after_undi)
15342{
15343    struct bxe_prev_list_node *tmp;
15344
15345    mtx_lock(&bxe_prev_mtx);
15346
15347    /* Check whether the entry for this path already exists */
15348    tmp = bxe_prev_path_get_entry(sc);
15349    if (tmp) {
15350        if (!tmp->aer) {
15351            BLOGD(sc, DBG_LOAD,
15352                  "Re-marking AER in path %d/%d/%d\n",
15353                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15354        } else {
15355            BLOGD(sc, DBG_LOAD,
15356                  "Removing AER indication from path %d/%d/%d\n",
15357                  sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15358            tmp->aer = 0;
15359        }
15360
15361        mtx_unlock(&bxe_prev_mtx);
15362        return (0);
15363    }
15364
15365    mtx_unlock(&bxe_prev_mtx);
15366
15367    /* Create an entry for this path and add it */
15368    tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF,
15369                 (M_NOWAIT | M_ZERO));
15370    if (!tmp) {
15371        BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n");
15372        return (-1);
15373    }
15374
15375    tmp->bus  = sc->pcie_bus;
15376    tmp->slot = sc->pcie_device;
15377    tmp->path = SC_PATH(sc);
15378    tmp->aer  = 0;
15379    tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15380
15381    mtx_lock(&bxe_prev_mtx);
15382
15383    BLOGD(sc, DBG_LOAD,
15384          "Marked path %d/%d/%d - finished previous unload\n",
15385          sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15386    LIST_INSERT_HEAD(&bxe_prev_list, tmp, node);
15387
15388    mtx_unlock(&bxe_prev_mtx);
15389
15390    return (0);
15391}
15392
15393static int
15394bxe_do_flr(struct bxe_softc *sc)
15395{
15396    int i;
15397
15398    /* only E2 and onwards support FLR */
15399    if (CHIP_IS_E1x(sc)) {
15400        BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n");
15401        return (-1);
15402    }
15403
15404    /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
15405    if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15406        BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n",
15407              sc->devinfo.bc_ver);
15408        return (-1);
15409    }
15410
15411    /* Wait for Transaction Pending bit clean */
15412    for (i = 0; i < 4; i++) {
15413        if (i) {
15414            DELAY(((1 << (i - 1)) * 100) * 1000);
15415        }
15416
15417        if (!bxe_is_pcie_pending(sc)) {
15418            goto clear;
15419        }
15420    }
15421
15422    BLOGE(sc, "PCIE transaction is not cleared, "
15423              "proceeding with reset anyway\n");
15424
15425clear:
15426
15427    BLOGD(sc, DBG_LOAD, "Initiating FLR\n");
15428    bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0);
15429
15430    return (0);
15431}
15432
15433struct bxe_mac_vals {
15434    uint32_t xmac_addr;
15435    uint32_t xmac_val;
15436    uint32_t emac_addr;
15437    uint32_t emac_val;
15438    uint32_t umac_addr;
15439    uint32_t umac_val;
15440    uint32_t bmac_addr;
15441    uint32_t bmac_val[2];
15442};
15443
15444static void
15445bxe_prev_unload_close_mac(struct bxe_softc *sc,
15446                          struct bxe_mac_vals *vals)
15447{
15448    uint32_t val, base_addr, offset, mask, reset_reg;
15449    uint8_t mac_stopped = FALSE;
15450    uint8_t port = SC_PORT(sc);
15451    uint32_t wb_data[2];
15452
15453    /* reset addresses as they also mark which values were changed */
15454    vals->bmac_addr = 0;
15455    vals->umac_addr = 0;
15456    vals->xmac_addr = 0;
15457    vals->emac_addr = 0;
15458
15459    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2);
15460
15461    if (!CHIP_IS_E3(sc)) {
15462        val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
15463        mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
15464        if ((mask & reset_reg) && val) {
15465            BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n");
15466            base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM
15467                                    : NIG_REG_INGRESS_BMAC0_MEM;
15468            offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL
15469                                    : BIGMAC_REGISTER_BMAC_CONTROL;
15470
15471            /*
15472             * use rd/wr since we cannot use dmae. This is safe
15473             * since MCP won't access the bus due to the request
15474             * to unload, and no function on the path can be
15475             * loaded at this time.
15476             */
15477            wb_data[0] = REG_RD(sc, base_addr + offset);
15478            wb_data[1] = REG_RD(sc, base_addr + offset + 0x4);
15479            vals->bmac_addr = base_addr + offset;
15480            vals->bmac_val[0] = wb_data[0];
15481            vals->bmac_val[1] = wb_data[1];
15482            wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE;
15483            REG_WR(sc, vals->bmac_addr, wb_data[0]);
15484            REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15485        }
15486
15487        BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n");
15488        vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15489        vals->emac_val = REG_RD(sc, vals->emac_addr);
15490        REG_WR(sc, vals->emac_addr, 0);
15491        mac_stopped = TRUE;
15492    } else {
15493        if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
15494            BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n");
15495            base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
15496            val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI);
15497            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1));
15498            REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1));
15499            vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15500            vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15501            REG_WR(sc, vals->xmac_addr, 0);
15502            mac_stopped = TRUE;
15503        }
15504
15505        mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
15506        if (mask & reset_reg) {
15507            BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n");
15508            base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
15509            vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15510            vals->umac_val = REG_RD(sc, vals->umac_addr);
15511            REG_WR(sc, vals->umac_addr, 0);
15512            mac_stopped = TRUE;
15513        }
15514    }
15515
15516    if (mac_stopped) {
15517        DELAY(20000);
15518    }
15519}
15520
15521#define BXE_PREV_UNDI_PROD_ADDR(p)  (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
15522#define BXE_PREV_UNDI_RCQ(val)      ((val) & 0xffff)
15523#define BXE_PREV_UNDI_BD(val)       ((val) >> 16 & 0xffff)
15524#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
15525
15526static void
15527bxe_prev_unload_undi_inc(struct bxe_softc *sc,
15528                         uint8_t          port,
15529                         uint8_t          inc)
15530{
15531    uint16_t rcq, bd;
15532    uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port));
15533
15534    rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc;
15535    bd = BXE_PREV_UNDI_BD(tmp_reg) + inc;
15536
15537    tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd);
15538    REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg);
15539
15540    BLOGD(sc, DBG_LOAD,
15541          "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15542          port, bd, rcq);
15543}
15544
15545static int
15546bxe_prev_unload_common(struct bxe_softc *sc)
15547{
15548    uint32_t reset_reg, tmp_reg = 0, rc;
15549    uint8_t prev_undi = FALSE;
15550    struct bxe_mac_vals mac_vals;
15551    uint32_t timer_count = 1000;
15552    uint32_t prev_brb;
15553
15554    /*
15555     * It is possible a previous function received 'common' answer,
15556     * but hasn't loaded yet, therefore creating a scenario of
15557     * multiple functions receiving 'common' on the same path.
15558     */
15559    BLOGD(sc, DBG_LOAD, "Common unload Flow\n");
15560
15561    memset(&mac_vals, 0, sizeof(mac_vals));
15562
15563    if (bxe_prev_is_path_marked(sc)) {
15564        return (bxe_prev_mcp_done(sc));
15565    }
15566
15567    reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1);
15568
15569    /* Reset should be performed after BRB is emptied */
15570    if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
15571        /* Close the MAC Rx to prevent BRB from filling up */
15572        bxe_prev_unload_close_mac(sc, &mac_vals);
15573
15574        /* close LLH filters towards the BRB */
15575        elink_set_rx_filter(&sc->link_params, 0);
15576
15577        /*
15578         * Check if the UNDI driver was previously loaded.
15579         * UNDI driver initializes CID offset for normal bell to 0x7
15580         */
15581        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
15582            tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
15583            if (tmp_reg == 0x7) {
15584                BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n");
15585                prev_undi = TRUE;
15586                /* clear the UNDI indication */
15587                REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
15588                /* clear possible idle check errors */
15589                REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0);
15590            }
15591        }
15592
15593        /* wait until BRB is empty */
15594        tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15595        while (timer_count) {
15596            prev_brb = tmp_reg;
15597
15598            tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS);
15599            if (!tmp_reg) {
15600                break;
15601            }
15602
15603            BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg);
15604
15605            /* reset timer as long as BRB actually gets emptied */
15606            if (prev_brb > tmp_reg) {
15607                timer_count = 1000;
15608            } else {
15609                timer_count--;
15610            }
15611
15612            /* If UNDI resides in memory, manually increment it */
15613            if (prev_undi) {
15614                bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1);
15615            }
15616
15617            DELAY(10);
15618        }
15619
15620        if (!timer_count) {
15621            BLOGE(sc, "Failed to empty BRB\n");
15622        }
15623    }
15624
15625    /* No packets are in the pipeline, path is ready for reset */
15626    bxe_reset_common(sc);
15627
15628    if (mac_vals.xmac_addr) {
15629        REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val);
15630    }
15631    if (mac_vals.umac_addr) {
15632        REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val);
15633    }
15634    if (mac_vals.emac_addr) {
15635        REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val);
15636    }
15637    if (mac_vals.bmac_addr) {
15638        REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
15639        REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
15640    }
15641
15642    rc = bxe_prev_mark_path(sc, prev_undi);
15643    if (rc) {
15644        bxe_prev_mcp_done(sc);
15645        return (rc);
15646    }
15647
15648    return (bxe_prev_mcp_done(sc));
15649}
15650
15651static int
15652bxe_prev_unload_uncommon(struct bxe_softc *sc)
15653{
15654    int rc;
15655
15656    BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n");
15657
15658    /* Test if previous unload process was already finished for this path */
15659    if (bxe_prev_is_path_marked(sc)) {
15660        return (bxe_prev_mcp_done(sc));
15661    }
15662
15663    BLOGD(sc, DBG_LOAD, "Path is unmarked\n");
15664
15665    /*
15666     * If function has FLR capabilities, and existing FW version matches
15667     * the one required, then FLR will be sufficient to clean any residue
15668     * left by previous driver
15669     */
15670    rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION);
15671    if (!rc) {
15672        /* fw version is good */
15673        BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n");
15674        rc = bxe_do_flr(sc);
15675    }
15676
15677    if (!rc) {
15678        /* FLR was performed */
15679        BLOGD(sc, DBG_LOAD, "FLR successful\n");
15680        return (0);
15681    }
15682
15683    BLOGD(sc, DBG_LOAD, "Could not FLR\n");
15684
15685    /* Close the MCP request, return failure*/
15686    rc = bxe_prev_mcp_done(sc);
15687    if (!rc) {
15688        rc = BXE_PREV_WAIT_NEEDED;
15689    }
15690
15691    return (rc);
15692}
15693
15694static int
15695bxe_prev_unload(struct bxe_softc *sc)
15696{
15697    int time_counter = 10;
15698    uint32_t fw, hw_lock_reg, hw_lock_val;
15699    uint32_t rc = 0;
15700
15701    /*
15702     * Clear HW from errors which may have resulted from an interrupted
15703     * DMAE transaction.
15704     */
15705    bxe_prev_interrupted_dmae(sc);
15706
15707    /* Release previously held locks */
15708    hw_lock_reg =
15709        (SC_FUNC(sc) <= 5) ?
15710            (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
15711            (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15712
15713    hw_lock_val = (REG_RD(sc, hw_lock_reg));
15714    if (hw_lock_val) {
15715        if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
15716            BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n");
15717            REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
15718                   (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
15719        }
15720        BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n");
15721        REG_WR(sc, hw_lock_reg, 0xffffffff);
15722    } else {
15723        BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n");
15724    }
15725
15726    if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
15727        BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n");
15728        REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
15729    }
15730
15731    do {
15732        /* Lock MCP using an unload request */
15733        fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
15734        if (!fw) {
15735            BLOGE(sc, "MCP response failure, aborting\n");
15736            rc = -1;
15737            break;
15738        }
15739
15740        if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
15741            rc = bxe_prev_unload_common(sc);
15742            break;
15743        }
15744
15745        /* non-common reply from MCP night require looping */
15746        rc = bxe_prev_unload_uncommon(sc);
15747        if (rc != BXE_PREV_WAIT_NEEDED) {
15748            break;
15749        }
15750
15751        DELAY(20000);
15752    } while (--time_counter);
15753
15754    if (!time_counter || rc) {
15755        BLOGE(sc, "Failed to unload previous driver!"
15756            " time_counter %d rc %d\n", time_counter, rc);
15757        rc = -1;
15758    }
15759
15760    return (rc);
15761}
15762
15763void
15764bxe_dcbx_set_state(struct bxe_softc *sc,
15765                   uint8_t          dcb_on,
15766                   uint32_t         dcbx_enabled)
15767{
15768    if (!CHIP_IS_E1x(sc)) {
15769        sc->dcb_state = dcb_on;
15770        sc->dcbx_enabled = dcbx_enabled;
15771    } else {
15772        sc->dcb_state = FALSE;
15773        sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15774    }
15775    BLOGD(sc, DBG_LOAD,
15776          "DCB state [%s:%s]\n",
15777          dcb_on ? "ON" : "OFF",
15778          (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15779          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15780          (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ?
15781          "on-chip with negotiation" : "invalid");
15782}
15783
15784/* must be called after sriov-enable */
15785static int
15786bxe_set_qm_cid_count(struct bxe_softc *sc)
15787{
15788    int cid_count = BXE_L2_MAX_CID(sc);
15789
15790    if (IS_SRIOV(sc)) {
15791        cid_count += BXE_VF_CIDS;
15792    }
15793
15794    if (CNIC_SUPPORT(sc)) {
15795        cid_count += CNIC_CID_MAX;
15796    }
15797
15798    return (roundup(cid_count, QM_CID_ROUND));
15799}
15800
15801static void
15802bxe_init_multi_cos(struct bxe_softc *sc)
15803{
15804    int pri, cos;
15805
15806    uint32_t pri_map = 0; /* XXX change to user config */
15807
15808    for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) {
15809        cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4));
15810        if (cos < sc->max_cos) {
15811            sc->prio_to_cos[pri] = cos;
15812        } else {
15813            BLOGW(sc, "Invalid COS %d for priority %d "
15814                      "(max COS is %d), setting to 0\n",
15815                  cos, pri, (sc->max_cos - 1));
15816            sc->prio_to_cos[pri] = 0;
15817        }
15818    }
15819}
15820
15821static int
15822bxe_sysctl_state(SYSCTL_HANDLER_ARGS)
15823{
15824    struct bxe_softc *sc;
15825    int error, result;
15826
15827    result = 0;
15828    error = sysctl_handle_int(oidp, &result, 0, req);
15829
15830    if (error || !req->newptr) {
15831        return (error);
15832    }
15833
15834    if (result == 1) {
15835        uint32_t  temp;
15836        sc = (struct bxe_softc *)arg1;
15837
15838        BLOGI(sc, "... dumping driver state ...\n");
15839        temp = SHMEM2_RD(sc, temperature_in_half_celsius);
15840        BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2));
15841    }
15842
15843    return (error);
15844}
15845
15846static int
15847bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS)
15848{
15849    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15850    uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15851    uint32_t *offset;
15852    uint64_t value = 0;
15853    int index = (int)arg2;
15854
15855    if (index >= BXE_NUM_ETH_STATS) {
15856        BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index);
15857        return (-1);
15858    }
15859
15860    offset = (eth_stats + bxe_eth_stats_arr[index].offset);
15861
15862    switch (bxe_eth_stats_arr[index].size) {
15863    case 4:
15864        value = (uint64_t)*offset;
15865        break;
15866    case 8:
15867        value = HILO_U64(*offset, *(offset + 1));
15868        break;
15869    default:
15870        BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n",
15871              index, bxe_eth_stats_arr[index].size);
15872        return (-1);
15873    }
15874
15875    return (sysctl_handle_64(oidp, &value, 0, req));
15876}
15877
15878static int
15879bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS)
15880{
15881    struct bxe_softc *sc = (struct bxe_softc *)arg1;
15882    uint32_t *eth_stats;
15883    uint32_t *offset;
15884    uint64_t value = 0;
15885    uint32_t q_stat = (uint32_t)arg2;
15886    uint32_t fp_index = ((q_stat >> 16) & 0xffff);
15887    uint32_t index = (q_stat & 0xffff);
15888
15889    eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15890
15891    if (index >= BXE_NUM_ETH_Q_STATS) {
15892        BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index);
15893        return (-1);
15894    }
15895
15896    offset = (eth_stats + bxe_eth_q_stats_arr[index].offset);
15897
15898    switch (bxe_eth_q_stats_arr[index].size) {
15899    case 4:
15900        value = (uint64_t)*offset;
15901        break;
15902    case 8:
15903        value = HILO_U64(*offset, *(offset + 1));
15904        break;
15905    default:
15906        BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n",
15907              index, bxe_eth_q_stats_arr[index].size);
15908        return (-1);
15909    }
15910
15911    return (sysctl_handle_64(oidp, &value, 0, req));
15912}
15913
15914static void bxe_force_link_reset(struct bxe_softc *sc)
15915{
15916
15917        bxe_acquire_phy_lock(sc);
15918        elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15919        bxe_release_phy_lock(sc);
15920}
15921
15922static int
15923bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS)
15924{
15925        struct bxe_softc *sc = (struct bxe_softc *)arg1;
15926        uint32_t cfg_idx = bxe_get_link_cfg_idx(sc);
15927        int rc = 0;
15928        int error;
15929        int result;
15930
15931
15932        error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15933
15934        if (error || !req->newptr) {
15935                return (error);
15936        }
15937        if ((sc->bxe_pause_param < 0) ||  (sc->bxe_pause_param > 8)) {
15938                BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param);
15939                sc->bxe_pause_param = 8;
15940        }
15941
15942        result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15943
15944
15945        if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg))  {
15946                        BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15947                        return -EINVAL;
15948        }
15949
15950        if(IS_MF(sc))
15951                return 0;
15952       sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15953        if(result & ELINK_FLOW_CTRL_RX)
15954                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15955
15956        if(result & ELINK_FLOW_CTRL_TX)
15957                sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15958        if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15959                sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15960
15961        if(result & 0x400) {
15962                if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15963                        sc->link_params.req_flow_ctrl[cfg_idx] =
15964                                ELINK_FLOW_CTRL_AUTO;
15965                }
15966                sc->link_params.req_fc_auto_adv = 0;
15967                if (result & ELINK_FLOW_CTRL_RX)
15968                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15969
15970                if (result & ELINK_FLOW_CTRL_TX)
15971                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15972                if (!sc->link_params.req_fc_auto_adv)
15973                        sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15974        }
15975         if (IS_PF(sc)) {
15976                        if (sc->link_vars.link_up) {
15977                                bxe_stats_handle(sc, STATS_EVENT_STOP);
15978                        }
15979			if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15980                        bxe_force_link_reset(sc);
15981                        bxe_acquire_phy_lock(sc);
15982
15983                        rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15984
15985                        bxe_release_phy_lock(sc);
15986
15987                        bxe_calc_fc_adv(sc);
15988                        }
15989        }
15990        return rc;
15991}
15992
15993
15994static void
15995bxe_add_sysctls(struct bxe_softc *sc)
15996{
15997    struct sysctl_ctx_list *ctx;
15998    struct sysctl_oid_list *children;
15999    struct sysctl_oid *queue_top, *queue;
16000    struct sysctl_oid_list *queue_top_children, *queue_children;
16001    char queue_num_buf[32];
16002    uint32_t q_stat;
16003    int i, j;
16004
16005    ctx = device_get_sysctl_ctx(sc->dev);
16006    children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
16007
16008    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version",
16009                      CTLFLAG_RD, BXE_DRIVER_VERSION, 0,
16010                      "version");
16011
16012    snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16013             BCM_5710_FW_MAJOR_VERSION,
16014             BCM_5710_FW_MINOR_VERSION,
16015             BCM_5710_FW_REVISION_VERSION,
16016             BCM_5710_FW_ENGINEERING_VERSION);
16017
16018    snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16019        ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION)     ? "Single"  :
16020         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD)   ? "MF-SD"   :
16021         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI)   ? "MF-SI"   :
16022         (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16023                                                                "Unknown"));
16024    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics",
16025                    CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16026                    "multifunction vnics per port");
16027
16028    snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16029        ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16030         (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16031         (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16032                                              "???GT/s"),
16033        sc->devinfo.pcie_link_width);
16034
16035    sc->debug = bxe_debug;
16036
16037    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version",
16038                      CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16039                      "bootcode version");
16040    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version",
16041                      CTLFLAG_RD, sc->fw_ver_str, 0,
16042                      "firmware version");
16043    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode",
16044                      CTLFLAG_RD, sc->mf_mode_str, 0,
16045                      "multifunction mode");
16046    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr",
16047                      CTLFLAG_RD, sc->mac_addr_str, 0,
16048                      "mac address");
16049    SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link",
16050                      CTLFLAG_RD, sc->pci_link_str, 0,
16051                      "pci link status");
16052    SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug",
16053                    CTLFLAG_RW, &sc->debug,
16054                    "debug logging mode");
16055
16056    sc->trigger_grcdump = 0;
16057    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump",
16058                   CTLFLAG_RW, &sc->trigger_grcdump, 0,
16059                   "trigger grcdump should be invoked"
16060                   "  before collecting grcdump");
16061
16062    sc->grcdump_started = 0;
16063    sc->grcdump_done = 0;
16064    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done",
16065                   CTLFLAG_RD, &sc->grcdump_done, 0,
16066                   "set by driver when grcdump is done");
16067
16068    sc->rx_budget = bxe_rx_budget;
16069    SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget",
16070                    CTLFLAG_RW, &sc->rx_budget, 0,
16071                    "rx processing budget");
16072
16073    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param",
16074        CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16075        bxe_sysctl_pauseparam, "IU",
16076        "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16077
16078
16079    SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state",
16080        CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
16081        bxe_sysctl_state, "IU", "dump driver state");
16082
16083    for (i = 0; i < BXE_NUM_ETH_STATS; i++) {
16084        SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
16085            bxe_eth_stats_arr[i].string,
16086            CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
16087            bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string);
16088    }
16089
16090    /* add a new parent node for all queues "dev.bxe.#.queue" */
16091    queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue",
16092        CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "queue");
16093    queue_top_children = SYSCTL_CHILDREN(queue_top);
16094
16095    for (i = 0; i < sc->num_queues; i++) {
16096        /* add a new parent node for a single queue "dev.bxe.#.queue.#" */
16097        snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i);
16098        queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
16099            queue_num_buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "single queue");
16100        queue_children = SYSCTL_CHILDREN(queue);
16101
16102        for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) {
16103            q_stat = ((i << 16) | j);
16104            SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO,
16105                 bxe_eth_q_stats_arr[j].string,
16106                 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, q_stat,
16107                 bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string);
16108        }
16109    }
16110}
16111
16112static int
16113bxe_alloc_buf_rings(struct bxe_softc *sc)
16114{
16115    int i;
16116    struct bxe_fastpath *fp;
16117
16118    for (i = 0; i < sc->num_queues; i++) {
16119
16120        fp = &sc->fp[i];
16121
16122        fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16123                                   M_NOWAIT, &fp->tx_mtx);
16124        if (fp->tx_br == NULL)
16125            return (-1);
16126    }
16127
16128    return (0);
16129}
16130
16131static void
16132bxe_free_buf_rings(struct bxe_softc *sc)
16133{
16134    int i;
16135    struct bxe_fastpath *fp;
16136
16137    for (i = 0; i < sc->num_queues; i++) {
16138
16139        fp = &sc->fp[i];
16140
16141        if (fp->tx_br) {
16142            buf_ring_free(fp->tx_br, M_DEVBUF);
16143            fp->tx_br = NULL;
16144        }
16145    }
16146}
16147
16148static void
16149bxe_init_fp_mutexs(struct bxe_softc *sc)
16150{
16151    int i;
16152    struct bxe_fastpath *fp;
16153
16154    for (i = 0; i < sc->num_queues; i++) {
16155
16156        fp = &sc->fp[i];
16157
16158        snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16159            "bxe%d_fp%d_tx_lock", sc->unit, i);
16160        mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16161
16162        snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16163            "bxe%d_fp%d_rx_lock", sc->unit, i);
16164        mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16165    }
16166}
16167
16168static void
16169bxe_destroy_fp_mutexs(struct bxe_softc *sc)
16170{
16171    int i;
16172    struct bxe_fastpath *fp;
16173
16174    for (i = 0; i < sc->num_queues; i++) {
16175
16176        fp = &sc->fp[i];
16177
16178        if (mtx_initialized(&fp->tx_mtx)) {
16179            mtx_destroy(&fp->tx_mtx);
16180        }
16181
16182        if (mtx_initialized(&fp->rx_mtx)) {
16183            mtx_destroy(&fp->rx_mtx);
16184        }
16185    }
16186}
16187
16188
16189/*
16190 * Device attach function.
16191 *
16192 * Allocates device resources, performs secondary chip identification, and
16193 * initializes driver instance variables. This function is called from driver
16194 * load after a successful probe.
16195 *
16196 * Returns:
16197 *   0 = Success, >0 = Failure
16198 */
16199static int
16200bxe_attach(device_t dev)
16201{
16202    struct bxe_softc *sc;
16203
16204    sc = device_get_softc(dev);
16205
16206    BLOGD(sc, DBG_LOAD, "Starting attach...\n");
16207
16208    sc->state = BXE_STATE_CLOSED;
16209
16210    sc->dev  = dev;
16211    sc->unit = device_get_unit(dev);
16212
16213    BLOGD(sc, DBG_LOAD, "softc = %p\n", sc);
16214
16215    sc->pcie_bus    = pci_get_bus(dev);
16216    sc->pcie_device = pci_get_slot(dev);
16217    sc->pcie_func   = pci_get_function(dev);
16218
16219    /* enable bus master capability */
16220    pci_enable_busmaster(dev);
16221
16222    /* get the BARs */
16223    if (bxe_allocate_bars(sc) != 0) {
16224        return (ENXIO);
16225    }
16226
16227    /* initialize the mutexes */
16228    bxe_init_mutexes(sc);
16229
16230    /* prepare the periodic callout */
16231    callout_init(&sc->periodic_callout, 1);
16232
16233    /* prepare the chip taskqueue */
16234    sc->chip_tq_flags = CHIP_TQ_NONE;
16235    snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16236             "bxe%d_chip_tq", sc->unit);
16237    TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16238    sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16239                                   taskqueue_thread_enqueue,
16240                                   &sc->chip_tq);
16241    taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16242                            "%s", sc->chip_tq_name);
16243
16244    TIMEOUT_TASK_INIT(taskqueue_thread,
16245        &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task,  sc);
16246
16247
16248    /* get device info and set params */
16249    if (bxe_get_device_info(sc) != 0) {
16250        BLOGE(sc, "getting device info\n");
16251        bxe_deallocate_bars(sc);
16252        pci_disable_busmaster(dev);
16253        return (ENXIO);
16254    }
16255
16256    /* get final misc params */
16257    bxe_get_params(sc);
16258
16259    /* set the default MTU (changed via ifconfig) */
16260    sc->mtu = ETHERMTU;
16261
16262    bxe_set_modes_bitmap(sc);
16263
16264    /* XXX
16265     * If in AFEX mode and the function is configured for FCoE
16266     * then bail... no L2 allowed.
16267     */
16268
16269    /* get phy settings from shmem and 'and' against admin settings */
16270    bxe_get_phy_info(sc);
16271
16272    /* initialize the FreeBSD ifnet interface */
16273    if (bxe_init_ifnet(sc) != 0) {
16274        bxe_release_mutexes(sc);
16275        bxe_deallocate_bars(sc);
16276        pci_disable_busmaster(dev);
16277        return (ENXIO);
16278    }
16279
16280    if (bxe_add_cdev(sc) != 0) {
16281        if (sc->ifp != NULL) {
16282            ether_ifdetach(sc->ifp);
16283        }
16284        ifmedia_removeall(&sc->ifmedia);
16285        bxe_release_mutexes(sc);
16286        bxe_deallocate_bars(sc);
16287        pci_disable_busmaster(dev);
16288        return (ENXIO);
16289    }
16290
16291    /* allocate device interrupts */
16292    if (bxe_interrupt_alloc(sc) != 0) {
16293        bxe_del_cdev(sc);
16294        if (sc->ifp != NULL) {
16295            ether_ifdetach(sc->ifp);
16296        }
16297        ifmedia_removeall(&sc->ifmedia);
16298        bxe_release_mutexes(sc);
16299        bxe_deallocate_bars(sc);
16300        pci_disable_busmaster(dev);
16301        return (ENXIO);
16302    }
16303
16304    bxe_init_fp_mutexs(sc);
16305
16306    if (bxe_alloc_buf_rings(sc) != 0) {
16307	bxe_free_buf_rings(sc);
16308        bxe_interrupt_free(sc);
16309        bxe_del_cdev(sc);
16310        if (sc->ifp != NULL) {
16311            ether_ifdetach(sc->ifp);
16312        }
16313        ifmedia_removeall(&sc->ifmedia);
16314        bxe_release_mutexes(sc);
16315        bxe_deallocate_bars(sc);
16316        pci_disable_busmaster(dev);
16317        return (ENXIO);
16318    }
16319
16320    /* allocate ilt */
16321    if (bxe_alloc_ilt_mem(sc) != 0) {
16322	bxe_free_buf_rings(sc);
16323        bxe_interrupt_free(sc);
16324        bxe_del_cdev(sc);
16325        if (sc->ifp != NULL) {
16326            ether_ifdetach(sc->ifp);
16327        }
16328        ifmedia_removeall(&sc->ifmedia);
16329        bxe_release_mutexes(sc);
16330        bxe_deallocate_bars(sc);
16331        pci_disable_busmaster(dev);
16332        return (ENXIO);
16333    }
16334
16335    /* allocate the host hardware/software hsi structures */
16336    if (bxe_alloc_hsi_mem(sc) != 0) {
16337        bxe_free_ilt_mem(sc);
16338	bxe_free_buf_rings(sc);
16339        bxe_interrupt_free(sc);
16340        bxe_del_cdev(sc);
16341        if (sc->ifp != NULL) {
16342            ether_ifdetach(sc->ifp);
16343        }
16344        ifmedia_removeall(&sc->ifmedia);
16345        bxe_release_mutexes(sc);
16346        bxe_deallocate_bars(sc);
16347        pci_disable_busmaster(dev);
16348        return (ENXIO);
16349    }
16350
16351    /* need to reset chip if UNDI was active */
16352    if (IS_PF(sc) && !BXE_NOMCP(sc)) {
16353        /* init fw_seq */
16354        sc->fw_seq =
16355            (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
16356             DRV_MSG_SEQ_NUMBER_MASK);
16357        BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16358        bxe_prev_unload(sc);
16359    }
16360
16361#if 1
16362    /* XXX */
16363    bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16364#else
16365    if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) &&
16366        SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) &&
16367        SHMEM2_RD(sc, dcbx_lldp_params_offset) &&
16368        SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) {
16369        bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON);
16370        bxe_dcbx_init_params(sc);
16371    } else {
16372        bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF);
16373    }
16374#endif
16375
16376    /* calculate qm_cid_count */
16377    sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16378    BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16379
16380    sc->max_cos = 1;
16381    bxe_init_multi_cos(sc);
16382
16383    bxe_add_sysctls(sc);
16384
16385    return (0);
16386}
16387
16388/*
16389 * Device detach function.
16390 *
16391 * Stops the controller, resets the controller, and releases resources.
16392 *
16393 * Returns:
16394 *   0 = Success, >0 = Failure
16395 */
16396static int
16397bxe_detach(device_t dev)
16398{
16399    struct bxe_softc *sc;
16400    if_t ifp;
16401
16402    sc = device_get_softc(dev);
16403
16404    BLOGD(sc, DBG_LOAD, "Starting detach...\n");
16405
16406    ifp = sc->ifp;
16407    if (ifp != NULL && if_vlantrunkinuse(ifp)) {
16408        BLOGE(sc, "Cannot detach while VLANs are in use.\n");
16409        return(EBUSY);
16410    }
16411
16412    bxe_del_cdev(sc);
16413
16414    /* stop the periodic callout */
16415    bxe_periodic_stop(sc);
16416
16417    /* stop the chip taskqueue */
16418    atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16419    if (sc->chip_tq) {
16420        taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16421        taskqueue_free(sc->chip_tq);
16422        sc->chip_tq = NULL;
16423        taskqueue_drain_timeout(taskqueue_thread,
16424            &sc->sp_err_timeout_task);
16425    }
16426
16427    /* stop and reset the controller if it was open */
16428    if (sc->state != BXE_STATE_CLOSED) {
16429        BXE_CORE_LOCK(sc);
16430        bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE);
16431        sc->state = BXE_STATE_DISABLED;
16432        BXE_CORE_UNLOCK(sc);
16433    }
16434
16435    /* release the network interface */
16436    if (ifp != NULL) {
16437        ether_ifdetach(ifp);
16438    }
16439    ifmedia_removeall(&sc->ifmedia);
16440
16441    /* XXX do the following based on driver state... */
16442
16443    /* free the host hardware/software hsi structures */
16444    bxe_free_hsi_mem(sc);
16445
16446    /* free ilt */
16447    bxe_free_ilt_mem(sc);
16448
16449    bxe_free_buf_rings(sc);
16450
16451    /* release the interrupts */
16452    bxe_interrupt_free(sc);
16453
16454    /* Release the mutexes*/
16455    bxe_destroy_fp_mutexs(sc);
16456    bxe_release_mutexes(sc);
16457
16458
16459    /* Release the PCIe BAR mapped memory */
16460    bxe_deallocate_bars(sc);
16461
16462    /* Release the FreeBSD interface. */
16463    if (sc->ifp != NULL) {
16464        if_free(sc->ifp);
16465    }
16466
16467    pci_disable_busmaster(dev);
16468
16469    return (0);
16470}
16471
16472/*
16473 * Device shutdown function.
16474 *
16475 * Stops and resets the controller.
16476 *
16477 * Returns:
16478 *   Nothing
16479 */
16480static int
16481bxe_shutdown(device_t dev)
16482{
16483    struct bxe_softc *sc;
16484
16485    sc = device_get_softc(dev);
16486
16487    BLOGD(sc, DBG_LOAD, "Starting shutdown...\n");
16488
16489    /* stop the periodic callout */
16490    bxe_periodic_stop(sc);
16491
16492    if (sc->state != BXE_STATE_CLOSED) {
16493    	BXE_CORE_LOCK(sc);
16494    	bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE);
16495    	BXE_CORE_UNLOCK(sc);
16496    }
16497
16498    return (0);
16499}
16500
16501void
16502bxe_igu_ack_sb(struct bxe_softc *sc,
16503               uint8_t          igu_sb_id,
16504               uint8_t          segment,
16505               uint16_t         index,
16506               uint8_t          op,
16507               uint8_t          update)
16508{
16509    uint32_t igu_addr = sc->igu_base_addr;
16510    igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
16511    bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16512}
16513
16514static void
16515bxe_igu_clear_sb_gen(struct bxe_softc *sc,
16516                     uint8_t          func,
16517                     uint8_t          idu_sb_id,
16518                     uint8_t          is_pf)
16519{
16520    uint32_t data, ctl, cnt = 100;
16521    uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
16522    uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
16523    uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
16524    uint32_t sb_bit =  1 << (idu_sb_id%32);
16525    uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
16526    uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
16527
16528    /* Not supported in BC mode */
16529    if (CHIP_INT_MODE_IS_BC(sc)) {
16530        return;
16531    }
16532
16533    data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup <<
16534             IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
16535            IGU_REGULAR_CLEANUP_SET |
16536            IGU_REGULAR_BCLEANUP);
16537
16538    ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) |
16539           (func_encode << IGU_CTRL_REG_FID_SHIFT) |
16540           (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
16541
16542    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16543            data, igu_addr_data);
16544    REG_WR(sc, igu_addr_data, data);
16545
16546    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16547                      BUS_SPACE_BARRIER_WRITE);
16548    mb();
16549
16550    BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
16551            ctl, igu_addr_ctl);
16552    REG_WR(sc, igu_addr_ctl, ctl);
16553
16554    bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16555                      BUS_SPACE_BARRIER_WRITE);
16556    mb();
16557
16558    /* wait for clean up to finish */
16559    while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16560        DELAY(20000);
16561    }
16562
16563    if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
16564        BLOGD(sc, DBG_LOAD,
16565              "Unable to finish IGU cleanup: "
16566              "idu_sb_id %d offset %d bit %d (cnt %d)\n",
16567              idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
16568    }
16569}
16570
16571static void
16572bxe_igu_clear_sb(struct bxe_softc *sc,
16573                 uint8_t          idu_sb_id)
16574{
16575    bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/);
16576}
16577
16578
16579
16580
16581
16582
16583
16584/*******************/
16585/* ECORE CALLBACKS */
16586/*******************/
16587
16588static void
16589bxe_reset_common(struct bxe_softc *sc)
16590{
16591    uint32_t val = 0x1400;
16592
16593    /* reset_common */
16594    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f);
16595
16596    if (CHIP_IS_E3(sc)) {
16597        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
16598        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
16599    }
16600
16601    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val);
16602}
16603
16604static void
16605bxe_common_init_phy(struct bxe_softc *sc)
16606{
16607    uint32_t shmem_base[2];
16608    uint32_t shmem2_base[2];
16609
16610    /* Avoid common init in case MFW supports LFA */
16611    if (SHMEM2_RD(sc, size) >
16612        (uint32_t)offsetof(struct shmem2_region,
16613                           lfa_host_addr[SC_PORT(sc)])) {
16614        return;
16615    }
16616
16617    shmem_base[0]  = sc->devinfo.shmem_base;
16618    shmem2_base[0] = sc->devinfo.shmem2_base;
16619
16620    if (!CHIP_IS_E1x(sc)) {
16621        shmem_base[1]  = SHMEM2_RD(sc, other_shmem_base_addr);
16622        shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
16623    }
16624
16625    bxe_acquire_phy_lock(sc);
16626    elink_common_init_phy(sc, shmem_base, shmem2_base,
16627                          sc->devinfo.chip_id, 0);
16628    bxe_release_phy_lock(sc);
16629}
16630
16631static void
16632bxe_pf_disable(struct bxe_softc *sc)
16633{
16634    uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION);
16635
16636    val &= ~IGU_PF_CONF_FUNC_EN;
16637
16638    REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
16639    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
16640    REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0);
16641}
16642
16643static void
16644bxe_init_pxp(struct bxe_softc *sc)
16645{
16646    uint16_t devctl;
16647    int r_order, w_order;
16648
16649    devctl = bxe_pcie_capability_read(sc, PCIER_DEVICE_CTL, 2);
16650
16651    BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl);
16652
16653    w_order = ((devctl & PCIEM_CTL_MAX_PAYLOAD) >> 5);
16654
16655    if (sc->mrrs == -1) {
16656        r_order = ((devctl & PCIEM_CTL_MAX_READ_REQUEST) >> 12);
16657    } else {
16658        BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16659        r_order = sc->mrrs;
16660    }
16661
16662    ecore_init_pxp_arb(sc, r_order, w_order);
16663}
16664
16665static uint32_t
16666bxe_get_pretend_reg(struct bxe_softc *sc)
16667{
16668    uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
16669    uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16670    return (base + (SC_ABS_FUNC(sc)) * stride);
16671}
16672
16673/*
16674 * Called only on E1H or E2.
16675 * When pretending to be PF, the pretend value is the function number 0..7.
16676 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16677 * combination.
16678 */
16679static int
16680bxe_pretend_func(struct bxe_softc *sc,
16681                 uint16_t         pretend_func_val)
16682{
16683    uint32_t pretend_reg;
16684
16685    if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) {
16686        return (-1);
16687    }
16688
16689    /* get my own pretend register */
16690    pretend_reg = bxe_get_pretend_reg(sc);
16691    REG_WR(sc, pretend_reg, pretend_func_val);
16692    REG_RD(sc, pretend_reg);
16693    return (0);
16694}
16695
16696static void
16697bxe_iov_init_dmae(struct bxe_softc *sc)
16698{
16699    return;
16700}
16701
16702static void
16703bxe_iov_init_dq(struct bxe_softc *sc)
16704{
16705    return;
16706}
16707
16708/* send a NIG loopback debug packet */
16709static void
16710bxe_lb_pckt(struct bxe_softc *sc)
16711{
16712    uint32_t wb_write[3];
16713
16714    /* Ethernet source and destination addresses */
16715    wb_write[0] = 0x55555555;
16716    wb_write[1] = 0x55555555;
16717    wb_write[2] = 0x20;     /* SOP */
16718    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16719
16720    /* NON-IP protocol */
16721    wb_write[0] = 0x09000000;
16722    wb_write[1] = 0x55555555;
16723    wb_write[2] = 0x10;     /* EOP, eop_bvalid = 0 */
16724    REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
16725}
16726
16727/*
16728 * Some of the internal memories are not directly readable from the driver.
16729 * To test them we send debug packets.
16730 */
16731static int
16732bxe_int_mem_test(struct bxe_softc *sc)
16733{
16734    int factor;
16735    int count, i;
16736    uint32_t val = 0;
16737
16738    if (CHIP_REV_IS_FPGA(sc)) {
16739        factor = 120;
16740    } else if (CHIP_REV_IS_EMUL(sc)) {
16741        factor = 200;
16742    } else {
16743        factor = 1;
16744    }
16745
16746    /* disable inputs of parser neighbor blocks */
16747    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16748    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16749    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16750    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16751
16752    /*  write 0 to parser credits for CFC search request */
16753    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16754
16755    /* send Ethernet packet */
16756    bxe_lb_pckt(sc);
16757
16758    /* TODO do i reset NIG statistic? */
16759    /* Wait until NIG register shows 1 packet of size 0x10 */
16760    count = 1000 * factor;
16761    while (count) {
16762        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16763        val = *BXE_SP(sc, wb_data[0]);
16764        if (val == 0x10) {
16765            break;
16766        }
16767
16768        DELAY(10000);
16769        count--;
16770    }
16771
16772    if (val != 0x10) {
16773        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16774        return (-1);
16775    }
16776
16777    /* wait until PRS register shows 1 packet */
16778    count = (1000 * factor);
16779    while (count) {
16780        val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16781        if (val == 1) {
16782            break;
16783        }
16784
16785        DELAY(10000);
16786        count--;
16787    }
16788
16789    if (val != 0x1) {
16790        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16791        return (-2);
16792    }
16793
16794    /* Reset and init BRB, PRS */
16795    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16796    DELAY(50000);
16797    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16798    DELAY(50000);
16799    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16800    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16801
16802    /* Disable inputs of parser neighbor blocks */
16803    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0);
16804    REG_WR(sc, TCM_REG_PRS_IFEN, 0x0);
16805    REG_WR(sc, CFC_REG_DEBUG0, 0x1);
16806    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0);
16807
16808    /* Write 0 to parser credits for CFC search request */
16809    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
16810
16811    /* send 10 Ethernet packets */
16812    for (i = 0; i < 10; i++) {
16813        bxe_lb_pckt(sc);
16814    }
16815
16816    /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */
16817    count = (1000 * factor);
16818    while (count) {
16819        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
16820        val = *BXE_SP(sc, wb_data[0]);
16821        if (val == 0xb0) {
16822            break;
16823        }
16824
16825        DELAY(10000);
16826        count--;
16827    }
16828
16829    if (val != 0xb0) {
16830        BLOGE(sc, "NIG timeout val=0x%x\n", val);
16831        return (-3);
16832    }
16833
16834    /* Wait until PRS register shows 2 packets */
16835    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16836    if (val != 2) {
16837        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16838    }
16839
16840    /* Write 1 to parser credits for CFC search request */
16841    REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
16842
16843    /* Wait until PRS register shows 3 packets */
16844    DELAY(10000 * factor);
16845
16846    /* Wait until NIG register shows 1 packet of size 0x10 */
16847    val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS);
16848    if (val != 3) {
16849        BLOGE(sc, "PRS timeout val=0x%x\n", val);
16850    }
16851
16852    /* clear NIG EOP FIFO */
16853    for (i = 0; i < 11; i++) {
16854        REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO);
16855    }
16856
16857    val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY);
16858    if (val != 1) {
16859        BLOGE(sc, "clear of NIG failed val=0x%x\n", val);
16860        return (-4);
16861    }
16862
16863    /* Reset and init BRB, PRS, NIG */
16864    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
16865    DELAY(50000);
16866    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
16867    DELAY(50000);
16868    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
16869    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
16870    if (!CNIC_SUPPORT(sc)) {
16871        /* set NIC mode */
16872        REG_WR(sc, PRS_REG_NIC_MODE, 1);
16873    }
16874
16875    /* Enable inputs of parser neighbor blocks */
16876    REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff);
16877    REG_WR(sc, TCM_REG_PRS_IFEN, 0x1);
16878    REG_WR(sc, CFC_REG_DEBUG0, 0x0);
16879    REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1);
16880
16881    return (0);
16882}
16883
16884static void
16885bxe_setup_fan_failure_detection(struct bxe_softc *sc)
16886{
16887    int is_required;
16888    uint32_t val;
16889    int port;
16890
16891    is_required = 0;
16892    val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) &
16893           SHARED_HW_CFG_FAN_FAILURE_MASK);
16894
16895    if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) {
16896        is_required = 1;
16897    }
16898    /*
16899     * The fan failure mechanism is usually related to the PHY type since
16900     * the power consumption of the board is affected by the PHY. Currently,
16901     * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
16902     */
16903    else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) {
16904        for (port = PORT_0; port < PORT_MAX; port++) {
16905            is_required |= elink_fan_failure_det_req(sc,
16906                                                     sc->devinfo.shmem_base,
16907                                                     sc->devinfo.shmem2_base,
16908                                                     port);
16909        }
16910    }
16911
16912    BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required);
16913
16914    if (is_required == 0) {
16915        return;
16916    }
16917
16918    /* Fan failure is indicated by SPIO 5 */
16919    bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
16920
16921    /* set to active low mode */
16922    val = REG_RD(sc, MISC_REG_SPIO_INT);
16923    val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
16924    REG_WR(sc, MISC_REG_SPIO_INT, val);
16925
16926    /* enable interrupt to signal the IGU */
16927    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
16928    val |= MISC_SPIO_SPIO5;
16929    REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val);
16930}
16931
16932static void
16933bxe_enable_blocks_attention(struct bxe_softc *sc)
16934{
16935    uint32_t val;
16936
16937    REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
16938    if (!CHIP_IS_E1x(sc)) {
16939        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40);
16940    } else {
16941        REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0);
16942    }
16943    REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
16944    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
16945    /*
16946     * mask read length error interrupts in brb for parser
16947     * (parsing unit and 'checksum and crc' unit)
16948     * these errors are legal (PU reads fixed length and CAC can cause
16949     * read length error on truncated packets)
16950     */
16951    REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00);
16952    REG_WR(sc, QM_REG_QM_INT_MASK, 0);
16953    REG_WR(sc, TM_REG_TM_INT_MASK, 0);
16954    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0);
16955    REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0);
16956    REG_WR(sc, XCM_REG_XCM_INT_MASK, 0);
16957/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */
16958/*      REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */
16959    REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0);
16960    REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0);
16961    REG_WR(sc, UCM_REG_UCM_INT_MASK, 0);
16962/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */
16963/*      REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */
16964    REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
16965    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0);
16966    REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0);
16967    REG_WR(sc, CCM_REG_CCM_INT_MASK, 0);
16968/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */
16969/*      REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */
16970
16971    val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
16972           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
16973           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN);
16974    if (!CHIP_IS_E1x(sc)) {
16975        val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
16976                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED);
16977    }
16978    REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val);
16979
16980    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0);
16981    REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0);
16982    REG_WR(sc, TCM_REG_TCM_INT_MASK, 0);
16983/*      REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */
16984
16985    if (!CHIP_IS_E1x(sc)) {
16986        /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
16987        REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
16988    }
16989
16990    REG_WR(sc, CDU_REG_CDU_INT_MASK, 0);
16991    REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0);
16992/*      REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */
16993    REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18);     /* bit 3,4 masked */
16994}
16995
16996/**
16997 * bxe_init_hw_common - initialize the HW at the COMMON phase.
16998 *
16999 * @sc:     driver handle
17000 */
17001static int
17002bxe_init_hw_common(struct bxe_softc *sc)
17003{
17004    uint8_t abs_func_id;
17005    uint32_t val;
17006
17007    BLOGD(sc, DBG_LOAD, "starting common init for func %d\n",
17008          SC_ABS_FUNC(sc));
17009
17010    /*
17011     * take the RESET lock to protect undi_unload flow from accessing
17012     * registers while we are resetting the chip
17013     */
17014    bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17015
17016    bxe_reset_common(sc);
17017
17018    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff);
17019
17020    val = 0xfffc;
17021    if (CHIP_IS_E3(sc)) {
17022        val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
17023        val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
17024    }
17025
17026    REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val);
17027
17028    bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
17029
17030    ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON);
17031    BLOGD(sc, DBG_LOAD, "after misc block init\n");
17032
17033    if (!CHIP_IS_E1x(sc)) {
17034        /*
17035         * 4-port mode or 2-port mode we need to turn off master-enable for
17036         * everyone. After that we turn it back on for self. So, we disregard
17037         * multi-function, and always disable all functions on the given path,
17038         * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17039         */
17040        for (abs_func_id = SC_PATH(sc);
17041             abs_func_id < (E2_FUNC_MAX * 2);
17042             abs_func_id += 2) {
17043            if (abs_func_id == SC_ABS_FUNC(sc)) {
17044                REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17045                continue;
17046            }
17047
17048            bxe_pretend_func(sc, abs_func_id);
17049
17050            /* clear pf enable */
17051            bxe_pf_disable(sc);
17052
17053            bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17054        }
17055    }
17056
17057    BLOGD(sc, DBG_LOAD, "after pf disable\n");
17058
17059    ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON);
17060
17061    if (CHIP_IS_E1(sc)) {
17062        /*
17063         * enable HW interrupt from PXP on USDM overflow
17064         * bit 16 on INT_MASK_0
17065         */
17066        REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0);
17067    }
17068
17069    ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON);
17070    bxe_init_pxp(sc);
17071
17072#ifdef __BIG_ENDIAN
17073    REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1);
17074    REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1);
17075    REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
17076    REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
17077    REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
17078    /* make sure this value is 0 */
17079    REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0);
17080
17081    //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1);
17082    REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1);
17083    REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1);
17084    REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1);
17085    REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
17086#endif
17087
17088    ecore_ilt_init_page_size(sc, INITOP_SET);
17089
17090    if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) {
17091        REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
17092    }
17093
17094    /* let the HW do it's magic... */
17095    DELAY(100000);
17096
17097    /* finish PXP init */
17098    val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
17099    if (val != 1) {
17100        BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n",
17101            val);
17102        return (-1);
17103    }
17104    val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
17105    if (val != 1) {
17106        BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val);
17107        return (-1);
17108    }
17109
17110    BLOGD(sc, DBG_LOAD, "after pxp init\n");
17111
17112    /*
17113     * Timer bug workaround for E2 only. We need to set the entire ILT to have
17114     * entries with value "0" and valid bit on. This needs to be done by the
17115     * first PF that is loaded in a path (i.e. common phase)
17116     */
17117    if (!CHIP_IS_E1x(sc)) {
17118/*
17119 * In E2 there is a bug in the timers block that can cause function 6 / 7
17120 * (i.e. vnic3) to start even if it is marked as "scan-off".
17121 * This occurs when a different function (func2,3) is being marked
17122 * as "scan-off". Real-life scenario for example: if a driver is being
17123 * load-unloaded while func6,7 are down. This will cause the timer to access
17124 * the ilt, translate to a logical address and send a request to read/write.
17125 * Since the ilt for the function that is down is not valid, this will cause
17126 * a translation error which is unrecoverable.
17127 * The Workaround is intended to make sure that when this happens nothing
17128 * fatal will occur. The workaround:
17129 *  1.  First PF driver which loads on a path will:
17130 *      a.  After taking the chip out of reset, by using pretend,
17131 *          it will write "0" to the following registers of
17132 *          the other vnics.
17133 *          REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
17134 *          REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
17135 *          REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
17136 *          And for itself it will write '1' to
17137 *          PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
17138 *          dmae-operations (writing to pram for example.)
17139 *          note: can be done for only function 6,7 but cleaner this
17140 *            way.
17141 *      b.  Write zero+valid to the entire ILT.
17142 *      c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
17143 *          VNIC3 (of that port). The range allocated will be the
17144 *          entire ILT. This is needed to prevent  ILT range error.
17145 *  2.  Any PF driver load flow:
17146 *      a.  ILT update with the physical addresses of the allocated
17147 *          logical pages.
17148 *      b.  Wait 20msec. - note that this timeout is needed to make
17149 *          sure there are no requests in one of the PXP internal
17150 *          queues with "old" ILT addresses.
17151 *      c.  PF enable in the PGLC.
17152 *      d.  Clear the was_error of the PF in the PGLC. (could have
17153 *          occurred while driver was down)
17154 *      e.  PF enable in the CFC (WEAK + STRONG)
17155 *      f.  Timers scan enable
17156 *  3.  PF driver unload flow:
17157 *      a.  Clear the Timers scan_en.
17158 *      b.  Polling for scan_on=0 for that PF.
17159 *      c.  Clear the PF enable bit in the PXP.
17160 *      d.  Clear the PF enable in the CFC (WEAK + STRONG)
17161 *      e.  Write zero+valid to all ILT entries (The valid bit must
17162 *          stay set)
17163 *      f.  If this is VNIC 3 of a port then also init
17164 *          first_timers_ilt_entry to zero and last_timers_ilt_entry
17165 *          to the last entry in the ILT.
17166 *
17167 *      Notes:
17168 *      Currently the PF error in the PGLC is non recoverable.
17169 *      In the future the there will be a recovery routine for this error.
17170 *      Currently attention is masked.
17171 *      Having an MCP lock on the load/unload process does not guarantee that
17172 *      there is no Timer disable during Func6/7 enable. This is because the
17173 *      Timers scan is currently being cleared by the MCP on FLR.
17174 *      Step 2.d can be done only for PF6/7 and the driver can also check if
17175 *      there is error before clearing it. But the flow above is simpler and
17176 *      more general.
17177 *      All ILT entries are written by zero+valid and not just PF6/7
17178 *      ILT entries since in the future the ILT entries allocation for
17179 *      PF-s might be dynamic.
17180 */
17181        struct ilt_client_info ilt_cli;
17182        struct ecore_ilt ilt;
17183
17184        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
17185        memset(&ilt, 0, sizeof(struct ecore_ilt));
17186
17187        /* initialize dummy TM client */
17188        ilt_cli.start      = 0;
17189        ilt_cli.end        = ILT_NUM_PAGE_ENTRIES - 1;
17190        ilt_cli.client_num = ILT_CLIENT_TM;
17191
17192        /*
17193         * Step 1: set zeroes to all ilt page entries with valid bit on
17194         * Step 2: set the timers first/last ilt entry to point
17195         * to the entire range to prevent ILT range error for 3rd/4th
17196         * vnic (this code assumes existence of the vnic)
17197         *
17198         * both steps performed by call to ecore_ilt_client_init_op()
17199         * with dummy TM client
17200         *
17201         * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
17202         * and his brother are split registers
17203         */
17204
17205        bxe_pretend_func(sc, (SC_PATH(sc) + 6));
17206        ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR);
17207        bxe_pretend_func(sc, SC_ABS_FUNC(sc));
17208
17209        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN);
17210        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN);
17211        REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
17212    }
17213
17214    REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0);
17215    REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0);
17216
17217    if (!CHIP_IS_E1x(sc)) {
17218        int factor = CHIP_REV_IS_EMUL(sc) ? 1000 :
17219                     (CHIP_REV_IS_FPGA(sc) ? 400 : 0);
17220
17221        ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON);
17222        ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON);
17223
17224        /* let the HW do it's magic... */
17225        do {
17226            DELAY(200000);
17227            val = REG_RD(sc, ATC_REG_ATC_INIT_DONE);
17228        } while (factor-- && (val != 1));
17229
17230        if (val != 1) {
17231            BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val);
17232            return (-1);
17233        }
17234    }
17235
17236    BLOGD(sc, DBG_LOAD, "after pglue and atc init\n");
17237
17238    ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON);
17239
17240    bxe_iov_init_dmae(sc);
17241
17242    /* clean the DMAE memory */
17243    sc->dmae_ready = 1;
17244    ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1);
17245
17246    ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON);
17247
17248    ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON);
17249
17250    ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON);
17251
17252    ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON);
17253
17254    bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3);
17255    bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3);
17256    bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3);
17257    bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3);
17258
17259    ecore_init_block(sc, BLOCK_QM, PHASE_COMMON);
17260
17261    /* QM queues pointers table */
17262    ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17263
17264    /* soft reset pulse */
17265    REG_WR(sc, QM_REG_SOFT_RESET, 1);
17266    REG_WR(sc, QM_REG_SOFT_RESET, 0);
17267
17268    if (CNIC_SUPPORT(sc))
17269        ecore_init_block(sc, BLOCK_TM, PHASE_COMMON);
17270
17271    ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON);
17272    REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT);
17273    if (!CHIP_REV_IS_SLOW(sc)) {
17274        /* enable hw interrupt from doorbell Q */
17275        REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0);
17276    }
17277
17278    ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON);
17279
17280    ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON);
17281    REG_WR(sc, PRS_REG_A_PRSU_20, 0xf);
17282
17283    if (!CHIP_IS_E1(sc)) {
17284        REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17285    }
17286
17287    if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) {
17288        if (IS_MF_AFEX(sc)) {
17289            /*
17290             * configure that AFEX and VLAN headers must be
17291             * received in AFEX mode
17292             */
17293            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE);
17294            REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA);
17295            REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
17296            REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
17297            REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4);
17298        } else {
17299            /*
17300             * Bit-map indicating which L2 hdrs may appear
17301             * after the basic Ethernet header
17302             */
17303            REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC,
17304                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17305        }
17306    }
17307
17308    ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON);
17309    ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON);
17310    ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON);
17311    ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON);
17312
17313    if (!CHIP_IS_E1x(sc)) {
17314        /* reset VFC memories */
17315        REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17316               VFC_MEMORIES_RST_REG_CAM_RST |
17317               VFC_MEMORIES_RST_REG_RAM_RST);
17318        REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
17319               VFC_MEMORIES_RST_REG_CAM_RST |
17320               VFC_MEMORIES_RST_REG_RAM_RST);
17321
17322        DELAY(20000);
17323    }
17324
17325    ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON);
17326    ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON);
17327    ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON);
17328    ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON);
17329
17330    /* sync semi rtc */
17331    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
17332           0x80000000);
17333    REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
17334           0x80000000);
17335
17336    ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON);
17337    ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON);
17338    ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON);
17339
17340    if (!CHIP_IS_E1x(sc)) {
17341        if (IS_MF_AFEX(sc)) {
17342            /*
17343             * configure that AFEX and VLAN headers must be
17344             * sent in AFEX mode
17345             */
17346            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE);
17347            REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA);
17348            REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
17349            REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
17350            REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4);
17351        } else {
17352            REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC,
17353                   sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17354        }
17355    }
17356
17357    REG_WR(sc, SRC_REG_SOFT_RST, 1);
17358
17359    ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON);
17360
17361    if (CNIC_SUPPORT(sc)) {
17362        REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672);
17363        REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
17364        REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b);
17365        REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a);
17366        REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116);
17367        REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
17368        REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf);
17369        REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
17370        REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f);
17371        REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7);
17372    }
17373    REG_WR(sc, SRC_REG_SOFT_RST, 0);
17374
17375    if (sizeof(union cdu_context) != 1024) {
17376        /* we currently assume that a context is 1024 bytes */
17377        BLOGE(sc, "please adjust the size of cdu_context(%ld)\n",
17378              (long)sizeof(union cdu_context));
17379    }
17380
17381    ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON);
17382    val = (4 << 24) + (0 << 12) + 1024;
17383    REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val);
17384
17385    ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON);
17386
17387    REG_WR(sc, CFC_REG_INIT_REG, 0x7FF);
17388    /* enable context validation interrupt from CFC */
17389    REG_WR(sc, CFC_REG_CFC_INT_MASK, 0);
17390
17391    /* set the thresholds to prevent CFC/CDU race */
17392    REG_WR(sc, CFC_REG_DEBUG0, 0x20020000);
17393    ecore_init_block(sc, BLOCK_HC, PHASE_COMMON);
17394
17395    if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) {
17396        REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36);
17397    }
17398
17399    ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON);
17400    ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON);
17401
17402    /* Reset PCIE errors for debug */
17403    REG_WR(sc, 0x2814, 0xffffffff);
17404    REG_WR(sc, 0x3820, 0xffffffff);
17405
17406    if (!CHIP_IS_E1x(sc)) {
17407        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
17408               (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
17409                PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
17410        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
17411               (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
17412                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
17413                PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
17414        REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
17415               (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
17416                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
17417                PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
17418    }
17419
17420    ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON);
17421
17422    if (!CHIP_IS_E1(sc)) {
17423        /* in E3 this done in per-port section */
17424        if (!CHIP_IS_E3(sc))
17425            REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc));
17426    }
17427
17428    if (CHIP_IS_E1H(sc)) {
17429        /* not applicable for E2 (and above ...) */
17430        REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc));
17431    }
17432
17433    if (CHIP_REV_IS_SLOW(sc)) {
17434        DELAY(200000);
17435    }
17436
17437    /* finish CFC init */
17438    val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
17439    if (val != 1) {
17440        BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val);
17441        return (-1);
17442    }
17443    val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
17444    if (val != 1) {
17445        BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val);
17446        return (-1);
17447    }
17448    val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
17449    if (val != 1) {
17450        BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val);
17451        return (-1);
17452    }
17453    REG_WR(sc, CFC_REG_DEBUG0, 0);
17454
17455    if (CHIP_IS_E1(sc)) {
17456        /* read NIG statistic to see if this is our first up since powerup */
17457        bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2);
17458        val = *BXE_SP(sc, wb_data[0]);
17459
17460        /* do internal memory self test */
17461        if ((val == 0) && bxe_int_mem_test(sc)) {
17462            BLOGE(sc, "internal mem self test failed val=0x%x\n", val);
17463            return (-1);
17464        }
17465    }
17466
17467    bxe_setup_fan_failure_detection(sc);
17468
17469    /* clear PXP2 attentions */
17470    REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
17471
17472    bxe_enable_blocks_attention(sc);
17473
17474    if (!CHIP_REV_IS_SLOW(sc)) {
17475        ecore_enable_blocks_parity(sc);
17476    }
17477
17478    if (!BXE_NOMCP(sc)) {
17479        if (CHIP_IS_E1x(sc)) {
17480            bxe_common_init_phy(sc);
17481        }
17482    }
17483
17484    return (0);
17485}
17486
17487/**
17488 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17489 *
17490 * @sc:     driver handle
17491 */
17492static int
17493bxe_init_hw_common_chip(struct bxe_softc *sc)
17494{
17495    int rc = bxe_init_hw_common(sc);
17496
17497    if (rc) {
17498        BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc);
17499        return (rc);
17500    }
17501
17502    /* In E2 2-PORT mode, same ext phy is used for the two paths */
17503    if (!BXE_NOMCP(sc)) {
17504        bxe_common_init_phy(sc);
17505    }
17506
17507    return (0);
17508}
17509
17510static int
17511bxe_init_hw_port(struct bxe_softc *sc)
17512{
17513    int port = SC_PORT(sc);
17514    int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
17515    uint32_t low, high;
17516    uint32_t val;
17517
17518    BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port);
17519
17520    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
17521
17522    ecore_init_block(sc, BLOCK_MISC, init_phase);
17523    ecore_init_block(sc, BLOCK_PXP, init_phase);
17524    ecore_init_block(sc, BLOCK_PXP2, init_phase);
17525
17526    /*
17527     * Timers bug workaround: disables the pf_master bit in pglue at
17528     * common phase, we need to enable it here before any dmae access are
17529     * attempted. Therefore we manually added the enable-master to the
17530     * port phase (it also happens in the function phase)
17531     */
17532    if (!CHIP_IS_E1x(sc)) {
17533        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
17534    }
17535
17536    ecore_init_block(sc, BLOCK_ATC, init_phase);
17537    ecore_init_block(sc, BLOCK_DMAE, init_phase);
17538    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
17539    ecore_init_block(sc, BLOCK_QM, init_phase);
17540
17541    ecore_init_block(sc, BLOCK_TCM, init_phase);
17542    ecore_init_block(sc, BLOCK_UCM, init_phase);
17543    ecore_init_block(sc, BLOCK_CCM, init_phase);
17544    ecore_init_block(sc, BLOCK_XCM, init_phase);
17545
17546    /* QM cid (connection) count */
17547    ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17548
17549    if (CNIC_SUPPORT(sc)) {
17550        ecore_init_block(sc, BLOCK_TM, init_phase);
17551        REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20);
17552        REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
17553    }
17554
17555    ecore_init_block(sc, BLOCK_DORQ, init_phase);
17556
17557    ecore_init_block(sc, BLOCK_BRB1, init_phase);
17558
17559    if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) {
17560        if (IS_MF(sc)) {
17561            low = (BXE_ONE_PORT(sc) ? 160 : 246);
17562        } else if (sc->mtu > 4096) {
17563            if (BXE_ONE_PORT(sc)) {
17564                low = 160;
17565            } else {
17566                val = sc->mtu;
17567                /* (24*1024 + val*4)/256 */
17568                low = (96 + (val / 64) + ((val % 64) ? 1 : 0));
17569            }
17570        } else {
17571            low = (BXE_ONE_PORT(sc) ? 80 : 160);
17572        }
17573        high = (low + 56); /* 14*1024/256 */
17574        REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
17575        REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
17576    }
17577
17578    if (CHIP_IS_MODE_4_PORT(sc)) {
17579        REG_WR(sc, SC_PORT(sc) ?
17580               BRB1_REG_MAC_GUARANTIED_1 :
17581               BRB1_REG_MAC_GUARANTIED_0, 40);
17582    }
17583
17584    ecore_init_block(sc, BLOCK_PRS, init_phase);
17585    if (CHIP_IS_E3B0(sc)) {
17586        if (IS_MF_AFEX(sc)) {
17587            /* configure headers for AFEX mode */
17588            REG_WR(sc, SC_PORT(sc) ?
17589                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17590                   PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
17591            REG_WR(sc, SC_PORT(sc) ?
17592                   PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
17593                   PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
17594            REG_WR(sc, SC_PORT(sc) ?
17595                   PRS_REG_MUST_HAVE_HDRS_PORT_1 :
17596                   PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
17597        } else {
17598            /* Ovlan exists only if we are in multi-function +
17599             * switch-dependent mode, in switch-independent there
17600             * is no ovlan headers
17601             */
17602            REG_WR(sc, SC_PORT(sc) ?
17603                   PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
17604                   PRS_REG_HDRS_AFTER_BASIC_PORT_0,
17605                   (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17606        }
17607    }
17608
17609    ecore_init_block(sc, BLOCK_TSDM, init_phase);
17610    ecore_init_block(sc, BLOCK_CSDM, init_phase);
17611    ecore_init_block(sc, BLOCK_USDM, init_phase);
17612    ecore_init_block(sc, BLOCK_XSDM, init_phase);
17613
17614    ecore_init_block(sc, BLOCK_TSEM, init_phase);
17615    ecore_init_block(sc, BLOCK_USEM, init_phase);
17616    ecore_init_block(sc, BLOCK_CSEM, init_phase);
17617    ecore_init_block(sc, BLOCK_XSEM, init_phase);
17618
17619    ecore_init_block(sc, BLOCK_UPB, init_phase);
17620    ecore_init_block(sc, BLOCK_XPB, init_phase);
17621
17622    ecore_init_block(sc, BLOCK_PBF, init_phase);
17623
17624    if (CHIP_IS_E1x(sc)) {
17625        /* configure PBF to work without PAUSE mtu 9000 */
17626        REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
17627
17628        /* update threshold */
17629        REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
17630        /* update init credit */
17631        REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17632
17633        /* probe changes */
17634        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1);
17635        DELAY(50);
17636        REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0);
17637    }
17638
17639    if (CNIC_SUPPORT(sc)) {
17640        ecore_init_block(sc, BLOCK_SRC, init_phase);
17641    }
17642
17643    ecore_init_block(sc, BLOCK_CDU, init_phase);
17644    ecore_init_block(sc, BLOCK_CFC, init_phase);
17645
17646    if (CHIP_IS_E1(sc)) {
17647        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
17648        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
17649    }
17650    ecore_init_block(sc, BLOCK_HC, init_phase);
17651
17652    ecore_init_block(sc, BLOCK_IGU, init_phase);
17653
17654    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
17655    /* init aeu_mask_attn_func_0/1:
17656     *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17657     *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17658     *             bits 4-7 are used for "per vn group attention" */
17659    val = IS_MF(sc) ? 0xF7 : 0x7;
17660    /* Enable DCBX attention for all but E1 */
17661    val |= CHIP_IS_E1(sc) ? 0 : 0x10;
17662    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
17663
17664    ecore_init_block(sc, BLOCK_NIG, init_phase);
17665
17666    if (!CHIP_IS_E1x(sc)) {
17667        /* Bit-map indicating which L2 hdrs may appear after the
17668         * basic Ethernet header
17669         */
17670        if (IS_MF_AFEX(sc)) {
17671            REG_WR(sc, SC_PORT(sc) ?
17672                   NIG_REG_P1_HDRS_AFTER_BASIC :
17673                   NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
17674        } else {
17675            REG_WR(sc, SC_PORT(sc) ?
17676                   NIG_REG_P1_HDRS_AFTER_BASIC :
17677                   NIG_REG_P0_HDRS_AFTER_BASIC,
17678                   IS_MF_SD(sc) ? 7 : 6);
17679        }
17680
17681        if (CHIP_IS_E3(sc)) {
17682            REG_WR(sc, SC_PORT(sc) ?
17683                   NIG_REG_LLH1_MF_MODE :
17684                   NIG_REG_LLH_MF_MODE, IS_MF(sc));
17685        }
17686    }
17687    if (!CHIP_IS_E3(sc)) {
17688        REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
17689    }
17690
17691    if (!CHIP_IS_E1(sc)) {
17692        /* 0x2 disable mf_ov, 0x1 enable */
17693        REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
17694               (IS_MF_SD(sc) ? 0x1 : 0x2));
17695
17696        if (!CHIP_IS_E1x(sc)) {
17697            val = 0;
17698            switch (sc->devinfo.mf_info.mf_mode) {
17699            case MULTI_FUNCTION_SD:
17700                val = 1;
17701                break;
17702            case MULTI_FUNCTION_SI:
17703            case MULTI_FUNCTION_AFEX:
17704                val = 2;
17705                break;
17706            }
17707
17708            REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE :
17709                        NIG_REG_LLH0_CLS_TYPE), val);
17710        }
17711        REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
17712        REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
17713        REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
17714    }
17715
17716    /* If SPIO5 is set to generate interrupts, enable it for this port */
17717    val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN);
17718    if (val & MISC_SPIO_SPIO5) {
17719        uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
17720                                    MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
17721        val = REG_RD(sc, reg_addr);
17722        val |= AEU_INPUTS_ATTN_BITS_SPIO5;
17723        REG_WR(sc, reg_addr, val);
17724    }
17725
17726    return (0);
17727}
17728
17729static uint32_t
17730bxe_flr_clnup_reg_poll(struct bxe_softc *sc,
17731                       uint32_t         reg,
17732                       uint32_t         expected,
17733                       uint32_t         poll_count)
17734{
17735    uint32_t cur_cnt = poll_count;
17736    uint32_t val;
17737
17738    while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17739        DELAY(FLR_WAIT_INTERVAL);
17740    }
17741
17742    return (val);
17743}
17744
17745static int
17746bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc,
17747                              uint32_t         reg,
17748                              char             *msg,
17749                              uint32_t         poll_cnt)
17750{
17751    uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
17752
17753    if (val != 0) {
17754        BLOGE(sc, "%s usage count=%d\n", msg, val);
17755        return (1);
17756    }
17757
17758    return (0);
17759}
17760
17761/* Common routines with VF FLR cleanup */
17762static uint32_t
17763bxe_flr_clnup_poll_count(struct bxe_softc *sc)
17764{
17765    /* adjust polling timeout */
17766    if (CHIP_REV_IS_EMUL(sc)) {
17767        return (FLR_POLL_CNT * 2000);
17768    }
17769
17770    if (CHIP_REV_IS_FPGA(sc)) {
17771        return (FLR_POLL_CNT * 120);
17772    }
17773
17774    return (FLR_POLL_CNT);
17775}
17776
17777static int
17778bxe_poll_hw_usage_counters(struct bxe_softc *sc,
17779                           uint32_t         poll_cnt)
17780{
17781    /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17782    if (bxe_flr_clnup_poll_hw_counter(sc,
17783                                      CFC_REG_NUM_LCIDS_INSIDE_PF,
17784                                      "CFC PF usage counter timed out",
17785                                      poll_cnt)) {
17786        return (1);
17787    }
17788
17789    /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17790    if (bxe_flr_clnup_poll_hw_counter(sc,
17791                                      DORQ_REG_PF_USAGE_CNT,
17792                                      "DQ PF usage counter timed out",
17793                                      poll_cnt)) {
17794        return (1);
17795    }
17796
17797    /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17798    if (bxe_flr_clnup_poll_hw_counter(sc,
17799                                      QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc),
17800                                      "QM PF usage counter timed out",
17801                                      poll_cnt)) {
17802        return (1);
17803    }
17804
17805    /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17806    if (bxe_flr_clnup_poll_hw_counter(sc,
17807                                      TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc),
17808                                      "Timers VNIC usage counter timed out",
17809                                      poll_cnt)) {
17810        return (1);
17811    }
17812
17813    if (bxe_flr_clnup_poll_hw_counter(sc,
17814                                      TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc),
17815                                      "Timers NUM_SCANS usage counter timed out",
17816                                      poll_cnt)) {
17817        return (1);
17818    }
17819
17820    /* Wait DMAE PF usage counter to zero */
17821    if (bxe_flr_clnup_poll_hw_counter(sc,
17822                                      dmae_reg_go_c[INIT_DMAE_C(sc)],
17823                                      "DMAE dommand register timed out",
17824                                      poll_cnt)) {
17825        return (1);
17826    }
17827
17828    return (0);
17829}
17830
17831#define OP_GEN_PARAM(param)                                            \
17832    (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
17833#define OP_GEN_TYPE(type)                                           \
17834    (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
17835#define OP_GEN_AGG_VECT(index)                                             \
17836    (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
17837
17838static int
17839bxe_send_final_clnup(struct bxe_softc *sc,
17840                     uint8_t          clnup_func,
17841                     uint32_t         poll_cnt)
17842{
17843    uint32_t op_gen_command = 0;
17844    uint32_t comp_addr = (BAR_CSTRORM_INTMEM +
17845                          CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func));
17846    int ret = 0;
17847
17848    if (REG_RD(sc, comp_addr)) {
17849        BLOGE(sc, "Cleanup complete was not 0 before sending\n");
17850        return (1);
17851    }
17852
17853    op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
17854    op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
17855    op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
17856    op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
17857
17858    BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n");
17859    REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
17860
17861    if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
17862        BLOGE(sc, "FW final cleanup did not succeed\n");
17863        BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n",
17864              (REG_RD(sc, comp_addr)));
17865        bxe_panic(sc, ("FLR cleanup failed\n"));
17866        return (1);
17867    }
17868
17869    /* Zero completion for nxt FLR */
17870    REG_WR(sc, comp_addr, 0);
17871
17872    return (ret);
17873}
17874
17875static void
17876bxe_pbf_pN_buf_flushed(struct bxe_softc       *sc,
17877                       struct pbf_pN_buf_regs *regs,
17878                       uint32_t               poll_count)
17879{
17880    uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start;
17881    uint32_t cur_cnt = poll_count;
17882
17883    crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17884    crd = crd_start = REG_RD(sc, regs->crd);
17885    init_crd = REG_RD(sc, regs->init_crd);
17886
17887    BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17888    BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
17889    BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17890
17891    while ((crd != init_crd) &&
17892           ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17893            (init_crd - crd_start))) {
17894        if (cur_cnt--) {
17895            DELAY(FLR_WAIT_INTERVAL);
17896            crd = REG_RD(sc, regs->crd);
17897            crd_freed = REG_RD(sc, regs->crd_freed);
17898        } else {
17899            BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17900            BLOGD(sc, DBG_LOAD, "CREDIT[%d]      : c:%x\n", regs->pN, crd);
17901            BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17902            break;
17903        }
17904    }
17905
17906    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n",
17907          poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17908}
17909
17910static void
17911bxe_pbf_pN_cmd_flushed(struct bxe_softc       *sc,
17912                       struct pbf_pN_cmd_regs *regs,
17913                       uint32_t               poll_count)
17914{
17915    uint32_t occup, to_free, freed, freed_start;
17916    uint32_t cur_cnt = poll_count;
17917
17918    occup = to_free = REG_RD(sc, regs->lines_occup);
17919    freed = freed_start = REG_RD(sc, regs->lines_freed);
17920
17921    BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17922    BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17923
17924    while (occup &&
17925           ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17926        if (cur_cnt--) {
17927            DELAY(FLR_WAIT_INTERVAL);
17928            occup = REG_RD(sc, regs->lines_occup);
17929            freed = REG_RD(sc, regs->lines_freed);
17930        } else {
17931            BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17932            BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
17933            BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17934            break;
17935        }
17936    }
17937
17938    BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n",
17939          poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17940}
17941
17942static void
17943bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count)
17944{
17945    struct pbf_pN_cmd_regs cmd_regs[] = {
17946        {0, (CHIP_IS_E3B0(sc)) ?
17947            PBF_REG_TQ_OCCUPANCY_Q0 :
17948            PBF_REG_P0_TQ_OCCUPANCY,
17949            (CHIP_IS_E3B0(sc)) ?
17950            PBF_REG_TQ_LINES_FREED_CNT_Q0 :
17951            PBF_REG_P0_TQ_LINES_FREED_CNT},
17952        {1, (CHIP_IS_E3B0(sc)) ?
17953            PBF_REG_TQ_OCCUPANCY_Q1 :
17954            PBF_REG_P1_TQ_OCCUPANCY,
17955            (CHIP_IS_E3B0(sc)) ?
17956            PBF_REG_TQ_LINES_FREED_CNT_Q1 :
17957            PBF_REG_P1_TQ_LINES_FREED_CNT},
17958        {4, (CHIP_IS_E3B0(sc)) ?
17959            PBF_REG_TQ_OCCUPANCY_LB_Q :
17960            PBF_REG_P4_TQ_OCCUPANCY,
17961            (CHIP_IS_E3B0(sc)) ?
17962            PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
17963            PBF_REG_P4_TQ_LINES_FREED_CNT}
17964    };
17965
17966    struct pbf_pN_buf_regs buf_regs[] = {
17967        {0, (CHIP_IS_E3B0(sc)) ?
17968            PBF_REG_INIT_CRD_Q0 :
17969            PBF_REG_P0_INIT_CRD ,
17970            (CHIP_IS_E3B0(sc)) ?
17971            PBF_REG_CREDIT_Q0 :
17972            PBF_REG_P0_CREDIT,
17973            (CHIP_IS_E3B0(sc)) ?
17974            PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
17975            PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
17976        {1, (CHIP_IS_E3B0(sc)) ?
17977            PBF_REG_INIT_CRD_Q1 :
17978            PBF_REG_P1_INIT_CRD,
17979            (CHIP_IS_E3B0(sc)) ?
17980            PBF_REG_CREDIT_Q1 :
17981            PBF_REG_P1_CREDIT,
17982            (CHIP_IS_E3B0(sc)) ?
17983            PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
17984            PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
17985        {4, (CHIP_IS_E3B0(sc)) ?
17986            PBF_REG_INIT_CRD_LB_Q :
17987            PBF_REG_P4_INIT_CRD,
17988            (CHIP_IS_E3B0(sc)) ?
17989            PBF_REG_CREDIT_LB_Q :
17990            PBF_REG_P4_CREDIT,
17991            (CHIP_IS_E3B0(sc)) ?
17992            PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
17993            PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
17994    };
17995
17996    int i;
17997
17998    /* Verify the command queues are flushed P0, P1, P4 */
17999    for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) {
18000        bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count);
18001    }
18002
18003    /* Verify the transmission buffers are flushed P0, P1, P4 */
18004    for (i = 0; i < ARRAY_SIZE(buf_regs); i++) {
18005        bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count);
18006    }
18007}
18008
18009static void
18010bxe_hw_enable_status(struct bxe_softc *sc)
18011{
18012    uint32_t val;
18013
18014    val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
18015    BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
18016
18017    val = REG_RD(sc, PBF_REG_DISABLE_PF);
18018    BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val);
18019
18020    val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
18021    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
18022
18023    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
18024    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
18025
18026    val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
18027    BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
18028
18029    val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
18030    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
18031
18032    val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
18033    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
18034
18035    val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
18036    BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val);
18037}
18038
18039static int
18040bxe_pf_flr_clnup(struct bxe_softc *sc)
18041{
18042    uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc);
18043
18044    BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc));
18045
18046    /* Re-enable PF target read access */
18047    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
18048
18049    /* Poll HW usage counters */
18050    BLOGD(sc, DBG_LOAD, "Polling usage counters\n");
18051    if (bxe_poll_hw_usage_counters(sc, poll_cnt)) {
18052        return (-1);
18053    }
18054
18055    /* Zero the igu 'trailing edge' and 'leading edge' */
18056
18057    /* Send the FW cleanup command */
18058    if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) {
18059        return (-1);
18060    }
18061
18062    /* ATC cleanup */
18063
18064    /* Verify TX hw is flushed */
18065    bxe_tx_hw_flushed(sc, poll_cnt);
18066
18067    /* Wait 100ms (not adjusted according to platform) */
18068    DELAY(100000);
18069
18070    /* Verify no pending pci transactions */
18071    if (bxe_is_pcie_pending(sc)) {
18072        BLOGE(sc, "PCIE Transactions still pending\n");
18073    }
18074
18075    /* Debug */
18076    bxe_hw_enable_status(sc);
18077
18078    /*
18079     * Master enable - Due to WB DMAE writes performed before this
18080     * register is re-initialized as part of the regular function init
18081     */
18082    REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18083
18084    return (0);
18085}
18086
18087static int
18088bxe_init_hw_func(struct bxe_softc *sc)
18089{
18090    int port = SC_PORT(sc);
18091    int func = SC_FUNC(sc);
18092    int init_phase = PHASE_PF0 + func;
18093    struct ecore_ilt *ilt = sc->ilt;
18094    uint16_t cdu_ilt_start;
18095    uint32_t addr, val;
18096    uint32_t main_mem_base, main_mem_size, main_mem_prty_clr;
18097    int i, main_mem_width, rc;
18098
18099    BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func);
18100
18101    /* FLR cleanup */
18102    if (!CHIP_IS_E1x(sc)) {
18103        rc = bxe_pf_flr_clnup(sc);
18104        if (rc) {
18105            BLOGE(sc, "FLR cleanup failed!\n");
18106            // XXX bxe_fw_dump(sc);
18107            // XXX bxe_idle_chk(sc);
18108            return (rc);
18109        }
18110    }
18111
18112    /* set MSI reconfigure capability */
18113    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18114        addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
18115        val = REG_RD(sc, addr);
18116        val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
18117        REG_WR(sc, addr, val);
18118    }
18119
18120    ecore_init_block(sc, BLOCK_PXP, init_phase);
18121    ecore_init_block(sc, BLOCK_PXP2, init_phase);
18122
18123    ilt = sc->ilt;
18124    cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18125
18126    for (i = 0; i < L2_ILT_LINES(sc); i++) {
18127        ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18128        ilt->lines[cdu_ilt_start + i].page_mapping =
18129            sc->context[i].vcxt_dma.paddr;
18130        ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18131    }
18132    ecore_ilt_init_op(sc, INITOP_SET);
18133
18134    /* Set NIC mode */
18135    REG_WR(sc, PRS_REG_NIC_MODE, 1);
18136    BLOGD(sc, DBG_LOAD, "NIC MODE configured\n");
18137
18138    if (!CHIP_IS_E1x(sc)) {
18139        uint32_t pf_conf = IGU_PF_CONF_FUNC_EN;
18140
18141        /* Turn on a single ISR mode in IGU if driver is going to use
18142         * INT#x or MSI
18143         */
18144        if (sc->interrupt_mode != INTR_MODE_MSIX) {
18145            pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
18146        }
18147
18148        /*
18149         * Timers workaround bug: function init part.
18150         * Need to wait 20msec after initializing ILT,
18151         * needed to make sure there are no requests in
18152         * one of the PXP internal queues with "old" ILT addresses
18153         */
18154        DELAY(20000);
18155
18156        /*
18157         * Master enable - Due to WB DMAE writes performed before this
18158         * register is re-initialized as part of the regular function
18159         * init
18160         */
18161        REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
18162        /* Enable the function in IGU */
18163        REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf);
18164    }
18165
18166    sc->dmae_ready = 1;
18167
18168    ecore_init_block(sc, BLOCK_PGLUE_B, init_phase);
18169
18170    if (!CHIP_IS_E1x(sc))
18171        REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
18172
18173    ecore_init_block(sc, BLOCK_ATC, init_phase);
18174    ecore_init_block(sc, BLOCK_DMAE, init_phase);
18175    ecore_init_block(sc, BLOCK_NIG, init_phase);
18176    ecore_init_block(sc, BLOCK_SRC, init_phase);
18177    ecore_init_block(sc, BLOCK_MISC, init_phase);
18178    ecore_init_block(sc, BLOCK_TCM, init_phase);
18179    ecore_init_block(sc, BLOCK_UCM, init_phase);
18180    ecore_init_block(sc, BLOCK_CCM, init_phase);
18181    ecore_init_block(sc, BLOCK_XCM, init_phase);
18182    ecore_init_block(sc, BLOCK_TSEM, init_phase);
18183    ecore_init_block(sc, BLOCK_USEM, init_phase);
18184    ecore_init_block(sc, BLOCK_CSEM, init_phase);
18185    ecore_init_block(sc, BLOCK_XSEM, init_phase);
18186
18187    if (!CHIP_IS_E1x(sc))
18188        REG_WR(sc, QM_REG_PF_EN, 1);
18189
18190    if (!CHIP_IS_E1x(sc)) {
18191        REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18192        REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18193        REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18194        REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func);
18195    }
18196    ecore_init_block(sc, BLOCK_QM, init_phase);
18197
18198    ecore_init_block(sc, BLOCK_TM, init_phase);
18199    ecore_init_block(sc, BLOCK_DORQ, init_phase);
18200
18201    bxe_iov_init_dq(sc);
18202
18203    ecore_init_block(sc, BLOCK_BRB1, init_phase);
18204    ecore_init_block(sc, BLOCK_PRS, init_phase);
18205    ecore_init_block(sc, BLOCK_TSDM, init_phase);
18206    ecore_init_block(sc, BLOCK_CSDM, init_phase);
18207    ecore_init_block(sc, BLOCK_USDM, init_phase);
18208    ecore_init_block(sc, BLOCK_XSDM, init_phase);
18209    ecore_init_block(sc, BLOCK_UPB, init_phase);
18210    ecore_init_block(sc, BLOCK_XPB, init_phase);
18211    ecore_init_block(sc, BLOCK_PBF, init_phase);
18212    if (!CHIP_IS_E1x(sc))
18213        REG_WR(sc, PBF_REG_DISABLE_PF, 0);
18214
18215    ecore_init_block(sc, BLOCK_CDU, init_phase);
18216
18217    ecore_init_block(sc, BLOCK_CFC, init_phase);
18218
18219    if (!CHIP_IS_E1x(sc))
18220        REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1);
18221
18222    if (IS_MF(sc)) {
18223        REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1);
18224        REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc));
18225    }
18226
18227    ecore_init_block(sc, BLOCK_MISC_AEU, init_phase);
18228
18229    /* HC init per function */
18230    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18231        if (CHIP_IS_E1H(sc)) {
18232            REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18233
18234            REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18235            REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18236        }
18237        ecore_init_block(sc, BLOCK_HC, init_phase);
18238
18239    } else {
18240        int num_segs, sb_idx, prod_offset;
18241
18242        REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
18243
18244        if (!CHIP_IS_E1x(sc)) {
18245            REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18246            REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18247        }
18248
18249        ecore_init_block(sc, BLOCK_IGU, init_phase);
18250
18251        if (!CHIP_IS_E1x(sc)) {
18252            int dsb_idx = 0;
18253            /**
18254             * Producer memory:
18255             * E2 mode: address 0-135 match to the mapping memory;
18256             * 136 - PF0 default prod; 137 - PF1 default prod;
18257             * 138 - PF2 default prod; 139 - PF3 default prod;
18258             * 140 - PF0 attn prod;    141 - PF1 attn prod;
18259             * 142 - PF2 attn prod;    143 - PF3 attn prod;
18260             * 144-147 reserved.
18261             *
18262             * E1.5 mode - In backward compatible mode;
18263             * for non default SB; each even line in the memory
18264             * holds the U producer and each odd line hold
18265             * the C producer. The first 128 producers are for
18266             * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18267             * producers are for the DSB for each PF.
18268             * Each PF has five segments: (the order inside each
18269             * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18270             * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18271             * 144-147 attn prods;
18272             */
18273            /* non-default-status-blocks */
18274            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18275                IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
18276            for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18277                prod_offset = (sc->igu_base_sb + sb_idx) *
18278                    num_segs;
18279
18280                for (i = 0; i < num_segs; i++) {
18281                    addr = IGU_REG_PROD_CONS_MEMORY +
18282                            (prod_offset + i) * 4;
18283                    REG_WR(sc, addr, 0);
18284                }
18285                /* send consumer update with value 0 */
18286                bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18287                           USTORM_ID, 0, IGU_INT_NOP, 1);
18288                bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18289            }
18290
18291            /* default-status-blocks */
18292            num_segs = CHIP_INT_MODE_IS_BC(sc) ?
18293                IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
18294
18295            if (CHIP_IS_MODE_4_PORT(sc))
18296                dsb_idx = SC_FUNC(sc);
18297            else
18298                dsb_idx = SC_VN(sc);
18299
18300            prod_offset = (CHIP_INT_MODE_IS_BC(sc) ?
18301                       IGU_BC_BASE_DSB_PROD + dsb_idx :
18302                       IGU_NORM_BASE_DSB_PROD + dsb_idx);
18303
18304            /*
18305             * igu prods come in chunks of E1HVN_MAX (4) -
18306             * does not matters what is the current chip mode
18307             */
18308            for (i = 0; i < (num_segs * E1HVN_MAX);
18309                 i += E1HVN_MAX) {
18310                addr = IGU_REG_PROD_CONS_MEMORY +
18311                            (prod_offset + i)*4;
18312                REG_WR(sc, addr, 0);
18313            }
18314            /* send consumer update with 0 */
18315            if (CHIP_INT_MODE_IS_BC(sc)) {
18316                bxe_ack_sb(sc, sc->igu_dsb_id,
18317                           USTORM_ID, 0, IGU_INT_NOP, 1);
18318                bxe_ack_sb(sc, sc->igu_dsb_id,
18319                           CSTORM_ID, 0, IGU_INT_NOP, 1);
18320                bxe_ack_sb(sc, sc->igu_dsb_id,
18321                           XSTORM_ID, 0, IGU_INT_NOP, 1);
18322                bxe_ack_sb(sc, sc->igu_dsb_id,
18323                           TSTORM_ID, 0, IGU_INT_NOP, 1);
18324                bxe_ack_sb(sc, sc->igu_dsb_id,
18325                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18326            } else {
18327                bxe_ack_sb(sc, sc->igu_dsb_id,
18328                           USTORM_ID, 0, IGU_INT_NOP, 1);
18329                bxe_ack_sb(sc, sc->igu_dsb_id,
18330                           ATTENTION_ID, 0, IGU_INT_NOP, 1);
18331            }
18332            bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18333
18334            /* !!! these should become driver const once
18335               rf-tool supports split-68 const */
18336            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
18337            REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
18338            REG_WR(sc, IGU_REG_SB_MASK_LSB, 0);
18339            REG_WR(sc, IGU_REG_SB_MASK_MSB, 0);
18340            REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0);
18341            REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0);
18342        }
18343    }
18344
18345    /* Reset PCIE errors for debug */
18346    REG_WR(sc, 0x2114, 0xffffffff);
18347    REG_WR(sc, 0x2120, 0xffffffff);
18348
18349    if (CHIP_IS_E1x(sc)) {
18350        main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
18351        main_mem_base = HC_REG_MAIN_MEMORY +
18352                SC_PORT(sc) * (main_mem_size * 4);
18353        main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
18354        main_mem_width = 8;
18355
18356        val = REG_RD(sc, main_mem_prty_clr);
18357        if (val) {
18358            BLOGD(sc, DBG_LOAD,
18359                  "Parity errors in HC block during function init (0x%x)!\n",
18360                  val);
18361        }
18362
18363        /* Clear "false" parity errors in MSI-X table */
18364        for (i = main_mem_base;
18365             i < main_mem_base + main_mem_size * 4;
18366             i += main_mem_width) {
18367            bxe_read_dmae(sc, i, main_mem_width / 4);
18368            bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data),
18369                           i, main_mem_width / 4);
18370        }
18371        /* Clear HC parity attention */
18372        REG_RD(sc, main_mem_prty_clr);
18373    }
18374
18375#if 1
18376    /* Enable STORMs SP logging */
18377    REG_WR8(sc, BAR_USTRORM_INTMEM +
18378           USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18379    REG_WR8(sc, BAR_TSTRORM_INTMEM +
18380           TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18381    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18382           CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18383    REG_WR8(sc, BAR_XSTRORM_INTMEM +
18384           XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1);
18385#endif
18386
18387    elink_phy_probe(&sc->link_params);
18388
18389    return (0);
18390}
18391
18392static void
18393bxe_link_reset(struct bxe_softc *sc)
18394{
18395    if (!BXE_NOMCP(sc)) {
18396	bxe_acquire_phy_lock(sc);
18397        elink_lfa_reset(&sc->link_params, &sc->link_vars);
18398	bxe_release_phy_lock(sc);
18399    } else {
18400        if (!CHIP_REV_IS_SLOW(sc)) {
18401            BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18402        }
18403    }
18404}
18405
18406static void
18407bxe_reset_port(struct bxe_softc *sc)
18408{
18409    int port = SC_PORT(sc);
18410    uint32_t val;
18411
18412	ELINK_DEBUG_P0(sc, "bxe_reset_port called\n");
18413    /* reset physical Link */
18414    bxe_link_reset(sc);
18415
18416    REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
18417
18418    /* Do not rcv packets to BRB */
18419    REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
18420    /* Do not direct rcv packets that are not for MCP to the BRB */
18421    REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
18422               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
18423
18424    /* Configure AEU */
18425    REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
18426
18427    DELAY(100000);
18428
18429    /* Check for BRB port occupancy */
18430    val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
18431    if (val) {
18432        BLOGD(sc, DBG_LOAD,
18433              "BRB1 is not empty, %d blocks are occupied\n", val);
18434    }
18435
18436    /* TODO: Close Doorbell port? */
18437}
18438
18439static void
18440bxe_ilt_wr(struct bxe_softc *sc,
18441           uint32_t         index,
18442           bus_addr_t       addr)
18443{
18444    int reg;
18445    uint32_t wb_write[2];
18446
18447    if (CHIP_IS_E1(sc)) {
18448        reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
18449    } else {
18450        reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
18451    }
18452
18453    wb_write[0] = ONCHIP_ADDR1(addr);
18454    wb_write[1] = ONCHIP_ADDR2(addr);
18455    REG_WR_DMAE(sc, reg, wb_write, 2);
18456}
18457
18458static void
18459bxe_clear_func_ilt(struct bxe_softc *sc,
18460                   uint32_t         func)
18461{
18462    uint32_t i, base = FUNC_ILT_BASE(func);
18463    for (i = base; i < base + ILT_PER_FUNC; i++) {
18464        bxe_ilt_wr(sc, i, 0);
18465    }
18466}
18467
18468static void
18469bxe_reset_func(struct bxe_softc *sc)
18470{
18471    struct bxe_fastpath *fp;
18472    int port = SC_PORT(sc);
18473    int func = SC_FUNC(sc);
18474    int i;
18475
18476    /* Disable the function in the FW */
18477    REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
18478    REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
18479    REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
18480    REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
18481
18482    /* FP SBs */
18483    FOR_EACH_ETH_QUEUE(sc, i) {
18484        fp = &sc->fp[i];
18485        REG_WR8(sc, BAR_CSTRORM_INTMEM +
18486                CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18487                SB_DISABLED);
18488    }
18489
18490    /* SP SB */
18491    REG_WR8(sc, BAR_CSTRORM_INTMEM +
18492            CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
18493            SB_DISABLED);
18494
18495    for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) {
18496        REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0);
18497    }
18498
18499    /* Configure IGU */
18500    if (sc->devinfo.int_block == INT_BLOCK_HC) {
18501        REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0);
18502        REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0);
18503    } else {
18504        REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0);
18505        REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0);
18506    }
18507
18508    if (CNIC_LOADED(sc)) {
18509        /* Disable Timer scan */
18510        REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
18511        /*
18512         * Wait for at least 10ms and up to 2 second for the timers
18513         * scan to complete
18514         */
18515        for (i = 0; i < 200; i++) {
18516            DELAY(10000);
18517            if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4))
18518                break;
18519        }
18520    }
18521
18522    /* Clear ILT */
18523    bxe_clear_func_ilt(sc, func);
18524
18525    /*
18526     * Timers workaround bug for E2: if this is vnic-3,
18527     * we need to set the entire ilt range for this timers.
18528     */
18529    if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) {
18530        struct ilt_client_info ilt_cli;
18531        /* use dummy TM client */
18532        memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
18533        ilt_cli.start = 0;
18534        ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18535        ilt_cli.client_num = ILT_CLIENT_TM;
18536
18537        ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR);
18538    }
18539
18540    /* this assumes that reset_port() called before reset_func()*/
18541    if (!CHIP_IS_E1x(sc)) {
18542        bxe_pf_disable(sc);
18543    }
18544
18545    sc->dmae_ready = 0;
18546}
18547
18548static int
18549bxe_gunzip_init(struct bxe_softc *sc)
18550{
18551    return (0);
18552}
18553
18554static void
18555bxe_gunzip_end(struct bxe_softc *sc)
18556{
18557    return;
18558}
18559
18560static int
18561bxe_init_firmware(struct bxe_softc *sc)
18562{
18563    if (CHIP_IS_E1(sc)) {
18564        ecore_init_e1_firmware(sc);
18565        sc->iro_array = e1_iro_arr;
18566    } else if (CHIP_IS_E1H(sc)) {
18567        ecore_init_e1h_firmware(sc);
18568        sc->iro_array = e1h_iro_arr;
18569    } else if (!CHIP_IS_E1x(sc)) {
18570        ecore_init_e2_firmware(sc);
18571        sc->iro_array = e2_iro_arr;
18572    } else {
18573        BLOGE(sc, "Unsupported chip revision\n");
18574        return (-1);
18575    }
18576
18577    return (0);
18578}
18579
18580static void
18581bxe_release_firmware(struct bxe_softc *sc)
18582{
18583    /* Do nothing */
18584    return;
18585}
18586
18587static int
18588ecore_gunzip(struct bxe_softc *sc,
18589             const uint8_t    *zbuf,
18590             int              len)
18591{
18592    /* XXX : Implement... */
18593    BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n");
18594    return (FALSE);
18595}
18596
18597static void
18598ecore_reg_wr_ind(struct bxe_softc *sc,
18599                 uint32_t         addr,
18600                 uint32_t         val)
18601{
18602    bxe_reg_wr_ind(sc, addr, val);
18603}
18604
18605static void
18606ecore_write_dmae_phys_len(struct bxe_softc *sc,
18607                          bus_addr_t       phys_addr,
18608                          uint32_t         addr,
18609                          uint32_t         len)
18610{
18611    bxe_write_dmae_phys_len(sc, phys_addr, addr, len);
18612}
18613
18614void
18615ecore_storm_memset_struct(struct bxe_softc *sc,
18616                          uint32_t         addr,
18617                          size_t           size,
18618                          uint32_t         *data)
18619{
18620    uint8_t i;
18621    for (i = 0; i < size/4; i++) {
18622        REG_WR(sc, addr + (i * 4), data[i]);
18623    }
18624}
18625
18626
18627/*
18628 * character device - ioctl interface definitions
18629 */
18630
18631
18632#include "bxe_dump.h"
18633#include "bxe_ioctl.h"
18634#include <sys/conf.h>
18635
18636static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
18637                struct thread *td);
18638
18639static struct cdevsw bxe_cdevsw = {
18640    .d_version = D_VERSION,
18641    .d_ioctl = bxe_eioctl,
18642    .d_name = "bxecnic",
18643};
18644
18645#define BXE_PATH(sc)    (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18646
18647
18648#define DUMP_ALL_PRESETS        0x1FFF
18649#define DUMP_MAX_PRESETS        13
18650#define IS_E1_REG(chips)        ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
18651#define IS_E1H_REG(chips)       ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
18652#define IS_E2_REG(chips)        ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
18653#define IS_E3A0_REG(chips)      ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
18654#define IS_E3B0_REG(chips)      ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
18655
18656#define IS_REG_IN_PRESET(presets, idx)  \
18657                ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18658
18659
18660static int
18661bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset)
18662{
18663    if (CHIP_IS_E1(sc))
18664        return dump_num_registers[0][preset-1];
18665    else if (CHIP_IS_E1H(sc))
18666        return dump_num_registers[1][preset-1];
18667    else if (CHIP_IS_E2(sc))
18668        return dump_num_registers[2][preset-1];
18669    else if (CHIP_IS_E3A0(sc))
18670        return dump_num_registers[3][preset-1];
18671    else if (CHIP_IS_E3B0(sc))
18672        return dump_num_registers[4][preset-1];
18673    else
18674        return 0;
18675}
18676
18677static int
18678bxe_get_total_regs_len32(struct bxe_softc *sc)
18679{
18680    uint32_t preset_idx;
18681    int regdump_len32 = 0;
18682
18683
18684    /* Calculate the total preset regs length */
18685    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18686        regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx);
18687    }
18688
18689    return regdump_len32;
18690}
18691
18692static const uint32_t *
18693__bxe_get_page_addr_ar(struct bxe_softc *sc)
18694{
18695    if (CHIP_IS_E2(sc))
18696        return page_vals_e2;
18697    else if (CHIP_IS_E3(sc))
18698        return page_vals_e3;
18699    else
18700        return NULL;
18701}
18702
18703static uint32_t
18704__bxe_get_page_reg_num(struct bxe_softc *sc)
18705{
18706    if (CHIP_IS_E2(sc))
18707        return PAGE_MODE_VALUES_E2;
18708    else if (CHIP_IS_E3(sc))
18709        return PAGE_MODE_VALUES_E3;
18710    else
18711        return 0;
18712}
18713
18714static const uint32_t *
18715__bxe_get_page_write_ar(struct bxe_softc *sc)
18716{
18717    if (CHIP_IS_E2(sc))
18718        return page_write_regs_e2;
18719    else if (CHIP_IS_E3(sc))
18720        return page_write_regs_e3;
18721    else
18722        return NULL;
18723}
18724
18725static uint32_t
18726__bxe_get_page_write_num(struct bxe_softc *sc)
18727{
18728    if (CHIP_IS_E2(sc))
18729        return PAGE_WRITE_REGS_E2;
18730    else if (CHIP_IS_E3(sc))
18731        return PAGE_WRITE_REGS_E3;
18732    else
18733        return 0;
18734}
18735
18736static const struct reg_addr *
18737__bxe_get_page_read_ar(struct bxe_softc *sc)
18738{
18739    if (CHIP_IS_E2(sc))
18740        return page_read_regs_e2;
18741    else if (CHIP_IS_E3(sc))
18742        return page_read_regs_e3;
18743    else
18744        return NULL;
18745}
18746
18747static uint32_t
18748__bxe_get_page_read_num(struct bxe_softc *sc)
18749{
18750    if (CHIP_IS_E2(sc))
18751        return PAGE_READ_REGS_E2;
18752    else if (CHIP_IS_E3(sc))
18753        return PAGE_READ_REGS_E3;
18754    else
18755        return 0;
18756}
18757
18758static bool
18759bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info)
18760{
18761    if (CHIP_IS_E1(sc))
18762        return IS_E1_REG(reg_info->chips);
18763    else if (CHIP_IS_E1H(sc))
18764        return IS_E1H_REG(reg_info->chips);
18765    else if (CHIP_IS_E2(sc))
18766        return IS_E2_REG(reg_info->chips);
18767    else if (CHIP_IS_E3A0(sc))
18768        return IS_E3A0_REG(reg_info->chips);
18769    else if (CHIP_IS_E3B0(sc))
18770        return IS_E3B0_REG(reg_info->chips);
18771    else
18772        return 0;
18773}
18774
18775static bool
18776bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info)
18777{
18778    if (CHIP_IS_E1(sc))
18779        return IS_E1_REG(wreg_info->chips);
18780    else if (CHIP_IS_E1H(sc))
18781        return IS_E1H_REG(wreg_info->chips);
18782    else if (CHIP_IS_E2(sc))
18783        return IS_E2_REG(wreg_info->chips);
18784    else if (CHIP_IS_E3A0(sc))
18785        return IS_E3A0_REG(wreg_info->chips);
18786    else if (CHIP_IS_E3B0(sc))
18787        return IS_E3B0_REG(wreg_info->chips);
18788    else
18789        return 0;
18790}
18791
18792/**
18793 * bxe_read_pages_regs - read "paged" registers
18794 *
18795 * @bp          device handle
18796 * @p           output buffer
18797 *
18798 * Reads "paged" memories: memories that may only be read by first writing to a
18799 * specific address ("write address") and then reading from a specific address
18800 * ("read address"). There may be more than one write address per "page" and
18801 * more than one read address per write address.
18802 */
18803static void
18804bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18805{
18806    uint32_t i, j, k, n;
18807
18808    /* addresses of the paged registers */
18809    const uint32_t *page_addr = __bxe_get_page_addr_ar(sc);
18810    /* number of paged registers */
18811    int num_pages = __bxe_get_page_reg_num(sc);
18812    /* write addresses */
18813    const uint32_t *write_addr = __bxe_get_page_write_ar(sc);
18814    /* number of write addresses */
18815    int write_num = __bxe_get_page_write_num(sc);
18816    /* read addresses info */
18817    const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc);
18818    /* number of read addresses */
18819    int read_num = __bxe_get_page_read_num(sc);
18820    uint32_t addr, size;
18821
18822    for (i = 0; i < num_pages; i++) {
18823        for (j = 0; j < write_num; j++) {
18824            REG_WR(sc, write_addr[j], page_addr[i]);
18825
18826            for (k = 0; k < read_num; k++) {
18827                if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) {
18828                    size = read_addr[k].size;
18829                    for (n = 0; n < size; n++) {
18830                        addr = read_addr[k].addr + n*4;
18831                        *p++ = REG_RD(sc, addr);
18832                    }
18833                }
18834            }
18835        }
18836    }
18837    return;
18838}
18839
18840
18841static int
18842bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset)
18843{
18844    uint32_t i, j, addr;
18845    const struct wreg_addr *wreg_addr_p = NULL;
18846
18847    if (CHIP_IS_E1(sc))
18848        wreg_addr_p = &wreg_addr_e1;
18849    else if (CHIP_IS_E1H(sc))
18850        wreg_addr_p = &wreg_addr_e1h;
18851    else if (CHIP_IS_E2(sc))
18852        wreg_addr_p = &wreg_addr_e2;
18853    else if (CHIP_IS_E3A0(sc))
18854        wreg_addr_p = &wreg_addr_e3;
18855    else if (CHIP_IS_E3B0(sc))
18856        wreg_addr_p = &wreg_addr_e3b0;
18857    else
18858        return (-1);
18859
18860    /* Read the idle_chk registers */
18861    for (i = 0; i < IDLE_REGS_COUNT; i++) {
18862        if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) &&
18863            IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
18864            for (j = 0; j < idle_reg_addrs[i].size; j++)
18865                *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4);
18866        }
18867    }
18868
18869    /* Read the regular registers */
18870    for (i = 0; i < REGS_COUNT; i++) {
18871        if (bxe_is_reg_in_chip(sc, &reg_addrs[i]) &&
18872            IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
18873            for (j = 0; j < reg_addrs[i].size; j++)
18874                *p++ = REG_RD(sc, reg_addrs[i].addr + j*4);
18875        }
18876    }
18877
18878    /* Read the CAM registers */
18879    if (bxe_is_wreg_in_chip(sc, wreg_addr_p) &&
18880        IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18881        for (i = 0; i < wreg_addr_p->size; i++) {
18882            *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18883
18884            /* In case of wreg_addr register, read additional
18885               registers from read_regs array
18886             */
18887            for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18888                addr = *(wreg_addr_p->read_regs);
18889                *p++ = REG_RD(sc, addr + j*4);
18890            }
18891        }
18892    }
18893
18894    /* Paged registers are supported in E2 & E3 only */
18895    if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) {
18896        /* Read "paged" registers */
18897        bxe_read_pages_regs(sc, p, preset);
18898    }
18899
18900    return 0;
18901}
18902
18903int
18904bxe_grc_dump(struct bxe_softc *sc)
18905{
18906    int rval = 0;
18907    uint32_t preset_idx;
18908    uint8_t *buf;
18909    uint32_t size;
18910    struct  dump_header *d_hdr;
18911    uint32_t i;
18912    uint32_t reg_val;
18913    uint32_t reg_addr;
18914    uint32_t cmd_offset;
18915    struct ecore_ilt *ilt = SC_ILT(sc);
18916    struct bxe_fastpath *fp;
18917    struct ilt_client_info *ilt_cli;
18918    int grc_dump_size;
18919
18920
18921    if (sc->grcdump_done || sc->grcdump_started)
18922	return (rval);
18923
18924    sc->grcdump_started = 1;
18925    BLOGI(sc, "Started collecting grcdump\n");
18926
18927    grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
18928                sizeof(struct  dump_header);
18929
18930    sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18931
18932    if (sc->grc_dump == NULL) {
18933        BLOGW(sc, "Unable to allocate memory for grcdump collection\n");
18934        return(ENOMEM);
18935    }
18936
18937
18938
18939    /* Disable parity attentions as long as following dump may
18940     * cause false alarms by reading never written registers. We
18941     * will re-enable parity attentions right after the dump.
18942     */
18943
18944    /* Disable parity on path 0 */
18945    bxe_pretend_func(sc, 0);
18946
18947    ecore_disable_blocks_parity(sc);
18948
18949    /* Disable parity on path 1 */
18950    bxe_pretend_func(sc, 1);
18951    ecore_disable_blocks_parity(sc);
18952
18953    /* Return to current function */
18954    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
18955
18956    buf = sc->grc_dump;
18957    d_hdr = sc->grc_dump;
18958
18959    d_hdr->header_size = (sizeof(struct  dump_header) >> 2) - 1;
18960    d_hdr->version = BNX2X_DUMP_VERSION;
18961    d_hdr->preset = DUMP_ALL_PRESETS;
18962
18963    if (CHIP_IS_E1(sc)) {
18964        d_hdr->dump_meta_data = DUMP_CHIP_E1;
18965    } else if (CHIP_IS_E1H(sc)) {
18966        d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18967    } else if (CHIP_IS_E2(sc)) {
18968        d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18969                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18970    } else if (CHIP_IS_E3A0(sc)) {
18971        d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18972                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18973    } else if (CHIP_IS_E3B0(sc)) {
18974        d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18975                (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0);
18976    }
18977
18978    buf += sizeof(struct  dump_header);
18979
18980    for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
18981
18982        /* Skip presets with IOR */
18983        if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) ||
18984            (preset_idx == 11))
18985            continue;
18986
18987        rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx);
18988
18989	if (rval)
18990            break;
18991
18992        size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t));
18993
18994        buf += size;
18995    }
18996
18997    bxe_pretend_func(sc, 0);
18998    ecore_clear_blocks_parity(sc);
18999    ecore_enable_blocks_parity(sc);
19000
19001    bxe_pretend_func(sc, 1);
19002    ecore_clear_blocks_parity(sc);
19003    ecore_enable_blocks_parity(sc);
19004
19005    /* Return to current function */
19006    bxe_pretend_func(sc, SC_ABS_FUNC(sc));
19007
19008
19009
19010    if(sc->state == BXE_STATE_OPEN) {
19011        if(sc->fw_stats_req  != NULL) {
19012    		BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n",
19013        			(uintmax_t)sc->fw_stats_req_mapping,
19014        			(uintmax_t)sc->fw_stats_data_mapping,
19015        			sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
19016		}
19017		if(sc->def_sb != NULL) {
19018			BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n",
19019        			(void *)sc->def_sb_dma.paddr, sc->def_sb,
19020        			sizeof(struct host_sp_status_block));
19021		}
19022		if(sc->eq_dma.vaddr != NULL) {
19023    		BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n",
19024        			(uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19025		}
19026		if(sc->sp_dma.vaddr != NULL) {
19027    		BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n",
19028        			(uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19029        			sizeof(struct bxe_slowpath));
19030		}
19031		if(sc->spq_dma.vaddr != NULL) {
19032    		BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n",
19033        			(uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19034		}
19035		if(sc->gz_buf_dma.vaddr != NULL) {
19036    		BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n",
19037        			(uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19038        			FW_BUF_SIZE);
19039		}
19040    	for (i = 0; i < sc->num_queues; i++) {
19041        	fp = &sc->fp[i];
19042			if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19043                        fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19044                        fp->rx_sge_dma.vaddr != NULL) {
19045
19046				BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19047            			(uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19048            			sizeof(union bxe_host_hc_status_block));
19049				BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19050            			(uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19051            			(BCM_PAGE_SIZE * TX_BD_NUM_PAGES));
19052        		BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19053            			(uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19054            			(BCM_PAGE_SIZE * RX_BD_NUM_PAGES));
19055        		BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i,
19056            			(uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19057            			(BCM_PAGE_SIZE * RCQ_NUM_PAGES));
19058        		BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i,
19059            			(uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19060            			(BCM_PAGE_SIZE * RX_SGE_NUM_PAGES));
19061    		}
19062		}
19063		if(ilt != NULL ) {
19064    		ilt_cli = &ilt->clients[1];
19065			if(ilt->lines != NULL) {
19066    		for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19067        		BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n",
19068            			(uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19069            			((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19070    		}
19071			}
19072		}
19073
19074
19075    	cmd_offset = DMAE_REG_CMD_MEM;
19076    	for (i = 0; i < 224; i++) {
19077        	reg_addr = (cmd_offset +(i * 4));
19078        	reg_val = REG_RD(sc, reg_addr);
19079        	BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i,
19080            			reg_addr, reg_val);
19081    	}
19082	}
19083
19084    BLOGI(sc, "Collection of grcdump done\n");
19085    sc->grcdump_done = 1;
19086    return(rval);
19087}
19088
19089static int
19090bxe_add_cdev(struct bxe_softc *sc)
19091{
19092    sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19093
19094    if (sc->eeprom == NULL) {
19095        BLOGW(sc, "Unable to alloc for eeprom size buffer\n");
19096        return (-1);
19097    }
19098
19099    sc->ioctl_dev = make_dev(&bxe_cdevsw,
19100                            if_getdunit(sc->ifp),
19101                            UID_ROOT,
19102                            GID_WHEEL,
19103                            0600,
19104                            "%s",
19105                            if_name(sc->ifp));
19106
19107    if (sc->ioctl_dev == NULL) {
19108        free(sc->eeprom, M_DEVBUF);
19109        sc->eeprom = NULL;
19110        return (-1);
19111    }
19112
19113    sc->ioctl_dev->si_drv1 = sc;
19114
19115    return (0);
19116}
19117
19118static void
19119bxe_del_cdev(struct bxe_softc *sc)
19120{
19121    if (sc->ioctl_dev != NULL)
19122        destroy_dev(sc->ioctl_dev);
19123
19124    if (sc->eeprom != NULL) {
19125        free(sc->eeprom, M_DEVBUF);
19126        sc->eeprom = NULL;
19127    }
19128    sc->ioctl_dev = NULL;
19129
19130    return;
19131}
19132
19133static bool bxe_is_nvram_accessible(struct bxe_softc *sc)
19134{
19135
19136    if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
19137        return FALSE;
19138
19139    return TRUE;
19140}
19141
19142
19143static int
19144bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19145{
19146    int rval = 0;
19147
19148    if(!bxe_is_nvram_accessible(sc)) {
19149        BLOGW(sc, "Cannot access eeprom when interface is down\n");
19150        return (-EAGAIN);
19151    }
19152    rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len);
19153
19154
19155   return (rval);
19156}
19157
19158static int
19159bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len)
19160{
19161    int rval = 0;
19162
19163    if(!bxe_is_nvram_accessible(sc)) {
19164        BLOGW(sc, "Cannot access eeprom when interface is down\n");
19165        return (-EAGAIN);
19166    }
19167    rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len);
19168
19169   return (rval);
19170}
19171
19172static int
19173bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom)
19174{
19175    int rval = 0;
19176
19177    switch (eeprom->eeprom_cmd) {
19178
19179    case BXE_EEPROM_CMD_SET_EEPROM:
19180
19181        rval = copyin(eeprom->eeprom_data, sc->eeprom,
19182                       eeprom->eeprom_data_len);
19183
19184        if (rval)
19185            break;
19186
19187        rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19188                       eeprom->eeprom_data_len);
19189        break;
19190
19191    case BXE_EEPROM_CMD_GET_EEPROM:
19192
19193        rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19194                       eeprom->eeprom_data_len);
19195
19196        if (rval) {
19197            break;
19198        }
19199
19200        rval = copyout(sc->eeprom, eeprom->eeprom_data,
19201                       eeprom->eeprom_data_len);
19202        break;
19203
19204    default:
19205            rval = EINVAL;
19206            break;
19207    }
19208
19209    if (rval) {
19210        BLOGW(sc, "ioctl cmd %d  failed rval %d\n", eeprom->eeprom_cmd, rval);
19211    }
19212
19213    return (rval);
19214}
19215
19216static int
19217bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p)
19218{
19219    uint32_t ext_phy_config;
19220    int port = SC_PORT(sc);
19221    int cfg_idx = bxe_get_link_cfg_idx(sc);
19222
19223    dev_p->supported = sc->port.supported[cfg_idx] |
19224            (sc->port.supported[cfg_idx ^ 1] &
19225            (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE));
19226    dev_p->advertising = sc->port.advertising[cfg_idx];
19227    if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19228        ELINK_ETH_PHY_SFP_1G_FIBER) {
19229        dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19230        dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19231    }
19232    if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19233        !(sc->flags & BXE_MF_FUNC_DIS)) {
19234        dev_p->duplex = sc->link_vars.duplex;
19235        if (IS_MF(sc) && !BXE_NOMCP(sc))
19236            dev_p->speed = bxe_get_mf_speed(sc);
19237        else
19238            dev_p->speed = sc->link_vars.line_speed;
19239    } else {
19240        dev_p->duplex = DUPLEX_UNKNOWN;
19241        dev_p->speed = SPEED_UNKNOWN;
19242    }
19243
19244    dev_p->port = bxe_media_detect(sc);
19245
19246    ext_phy_config = SHMEM_RD(sc,
19247                         dev_info.port_hw_config[port].external_phy_config);
19248    if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) ==
19249        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
19250        dev_p->phy_address =  sc->port.phy_addr;
19251    else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19252            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
19253        ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) !=
19254            PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
19255        dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19256    else
19257        dev_p->phy_address = 0;
19258
19259    if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19260        dev_p->autoneg = AUTONEG_ENABLE;
19261    else
19262       dev_p->autoneg = AUTONEG_DISABLE;
19263
19264
19265    return 0;
19266}
19267
19268static int
19269bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
19270        struct thread *td)
19271{
19272    struct bxe_softc    *sc;
19273    int                 rval = 0;
19274    bxe_grcdump_t       *dump = NULL;
19275    int grc_dump_size;
19276    bxe_drvinfo_t   *drv_infop = NULL;
19277    bxe_dev_setting_t  *dev_p;
19278    bxe_dev_setting_t  dev_set;
19279    bxe_get_regs_t  *reg_p;
19280    bxe_reg_rdw_t *reg_rdw_p;
19281    bxe_pcicfg_rdw_t *cfg_rdw_p;
19282    bxe_perm_mac_addr_t *mac_addr_p;
19283
19284
19285    if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19286        return ENXIO;
19287
19288    dump = (bxe_grcdump_t *)data;
19289
19290    switch(cmd) {
19291
19292        case BXE_GRC_DUMP_SIZE:
19293            dump->pci_func = sc->pcie_func;
19294            dump->grcdump_size =
19295                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19296                     sizeof(struct  dump_header);
19297            break;
19298
19299        case BXE_GRC_DUMP:
19300
19301            grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) +
19302                                sizeof(struct  dump_header);
19303            if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19304                (dump->grcdump_size < grc_dump_size)) {
19305                rval = EINVAL;
19306                break;
19307            }
19308
19309            if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19310                (!sc->grcdump_started)) {
19311                rval =  bxe_grc_dump(sc);
19312            }
19313
19314            if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19315                (sc->grc_dump != NULL))  {
19316                dump->grcdump_dwords = grc_dump_size >> 2;
19317                rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19318                free(sc->grc_dump, M_DEVBUF);
19319                sc->grc_dump = NULL;
19320                sc->grcdump_started = 0;
19321                sc->grcdump_done = 0;
19322            }
19323
19324            break;
19325
19326        case BXE_DRV_INFO:
19327            drv_infop = (bxe_drvinfo_t *)data;
19328            snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19329            snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19330                BXE_DRIVER_VERSION);
19331            snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19332                sc->devinfo.bc_ver_str);
19333            snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19334                "%s", sc->fw_ver_str);
19335            drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19336            drv_infop->reg_dump_len =
19337                (bxe_get_total_regs_len32(sc) * sizeof(uint32_t))
19338                    + sizeof(struct  dump_header);
19339            snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19340                sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19341            break;
19342
19343        case BXE_DEV_SETTING:
19344            dev_p = (bxe_dev_setting_t *)data;
19345            bxe_get_settings(sc, &dev_set);
19346            dev_p->supported = dev_set.supported;
19347            dev_p->advertising = dev_set.advertising;
19348            dev_p->speed = dev_set.speed;
19349            dev_p->duplex = dev_set.duplex;
19350            dev_p->port = dev_set.port;
19351            dev_p->phy_address = dev_set.phy_address;
19352            dev_p->autoneg = dev_set.autoneg;
19353
19354            break;
19355
19356        case BXE_GET_REGS:
19357
19358            reg_p = (bxe_get_regs_t *)data;
19359            grc_dump_size = reg_p->reg_buf_len;
19360
19361            if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19362                bxe_grc_dump(sc);
19363            }
19364            if((sc->grcdump_done) && (sc->grcdump_started) &&
19365                (sc->grc_dump != NULL))  {
19366                rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19367                free(sc->grc_dump, M_DEVBUF);
19368                sc->grc_dump = NULL;
19369                sc->grcdump_started = 0;
19370                sc->grcdump_done = 0;
19371            }
19372
19373            break;
19374
19375        case BXE_RDW_REG:
19376            reg_rdw_p = (bxe_reg_rdw_t *)data;
19377            if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19378                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19379                reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19380
19381            if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19382                (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19383                REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19384
19385            break;
19386
19387        case BXE_RDW_PCICFG:
19388            cfg_rdw_p = (bxe_pcicfg_rdw_t *)data;
19389            if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19390
19391                cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19392                                         cfg_rdw_p->cfg_width);
19393
19394            } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19395                pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19396                            cfg_rdw_p->cfg_width);
19397            } else {
19398                BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n");
19399            }
19400            break;
19401
19402        case BXE_MAC_ADDR:
19403            mac_addr_p = (bxe_perm_mac_addr_t *)data;
19404            snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19405                sc->mac_addr_str);
19406            break;
19407
19408        case BXE_EEPROM:
19409            rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data);
19410            break;
19411
19412
19413        default:
19414            break;
19415    }
19416
19417    return (rval);
19418}
19419
19420#ifdef DEBUGNET
19421static void
19422bxe_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
19423{
19424	struct bxe_softc *sc;
19425
19426	sc = if_getsoftc(ifp);
19427	BXE_CORE_LOCK(sc);
19428	*nrxr = sc->num_queues;
19429	*ncl = DEBUGNET_MAX_IN_FLIGHT;
19430	*clsize = sc->fp[0].mbuf_alloc_size;
19431	BXE_CORE_UNLOCK(sc);
19432}
19433
19434static void
19435bxe_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
19436{
19437}
19438
19439static int
19440bxe_debugnet_transmit(if_t ifp, struct mbuf *m)
19441{
19442	struct bxe_softc *sc;
19443	int error;
19444
19445	sc = if_getsoftc(ifp);
19446	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
19447	    IFF_DRV_RUNNING || !sc->link_vars.link_up)
19448		return (ENOENT);
19449
19450	error = bxe_tx_encap(&sc->fp[0], &m);
19451	if (error != 0 && m != NULL)
19452		m_freem(m);
19453	return (error);
19454}
19455
19456static int
19457bxe_debugnet_poll(if_t ifp, int count)
19458{
19459	struct bxe_softc *sc;
19460	int i;
19461
19462	sc = if_getsoftc(ifp);
19463	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
19464	    !sc->link_vars.link_up)
19465		return (ENOENT);
19466
19467	for (i = 0; i < sc->num_queues; i++)
19468		(void)bxe_rxeof(sc, &sc->fp[i]);
19469	(void)bxe_txeof(sc, &sc->fp[0]);
19470	return (0);
19471}
19472#endif /* DEBUGNET */
19473