bxe.c revision 281955
1/*- 2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' 15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS 18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 * THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/10/sys/dev/bxe/bxe.c 281955 2015-04-24 23:26:44Z hiren $"); 29 30#define BXE_DRIVER_VERSION "1.78.78" 31 32#include "bxe.h" 33#include "ecore_sp.h" 34#include "ecore_init.h" 35#include "ecore_init_ops.h" 36 37#include "57710_int_offsets.h" 38#include "57711_int_offsets.h" 39#include "57712_int_offsets.h" 40 41/* 42 * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these 43 * explicitly here for older kernels that don't include this changeset. 44 */ 45#ifndef CTLTYPE_U64 46#define CTLTYPE_U64 CTLTYPE_QUAD 47#define sysctl_handle_64 sysctl_handle_quad 48#endif 49 50/* 51 * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these 52 * here as zero(0) for older kernels that don't include this changeset 53 * thereby masking the functionality. 54 */ 55#ifndef CSUM_TCP_IPV6 56#define CSUM_TCP_IPV6 0 57#define CSUM_UDP_IPV6 0 58#endif 59 60/* 61 * pci_find_cap was added in r219865. Re-define this at pci_find_extcap 62 * for older kernels that don't include this changeset. 63 */ 64#if __FreeBSD_version < 900035 65#define pci_find_cap pci_find_extcap 66#endif 67 68#define BXE_DEF_SB_ATT_IDX 0x0001 69#define BXE_DEF_SB_IDX 0x0002 70 71/* 72 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per 73 * function HW initialization. 74 */ 75#define FLR_WAIT_USEC 10000 /* 10 msecs */ 76#define FLR_WAIT_INTERVAL 50 /* usecs */ 77#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ 78 79struct pbf_pN_buf_regs { 80 int pN; 81 uint32_t init_crd; 82 uint32_t crd; 83 uint32_t crd_freed; 84}; 85 86struct pbf_pN_cmd_regs { 87 int pN; 88 uint32_t lines_occup; 89 uint32_t lines_freed; 90}; 91 92/* 93 * PCI Device ID Table used by bxe_probe(). 94 */ 95#define BXE_DEVDESC_MAX 64 96static struct bxe_device_type bxe_devs[] = { 97 { 98 BRCM_VENDORID, 99 CHIP_NUM_57710, 100 PCI_ANY_ID, PCI_ANY_ID, 101 "QLogic NetXtreme II BCM57710 10GbE" 102 }, 103 { 104 BRCM_VENDORID, 105 CHIP_NUM_57711, 106 PCI_ANY_ID, PCI_ANY_ID, 107 "QLogic NetXtreme II BCM57711 10GbE" 108 }, 109 { 110 BRCM_VENDORID, 111 CHIP_NUM_57711E, 112 PCI_ANY_ID, PCI_ANY_ID, 113 "QLogic NetXtreme II BCM57711E 10GbE" 114 }, 115 { 116 BRCM_VENDORID, 117 CHIP_NUM_57712, 118 PCI_ANY_ID, PCI_ANY_ID, 119 "QLogic NetXtreme II BCM57712 10GbE" 120 }, 121 { 122 BRCM_VENDORID, 123 CHIP_NUM_57712_MF, 124 PCI_ANY_ID, PCI_ANY_ID, 125 "QLogic NetXtreme II BCM57712 MF 10GbE" 126 }, 127#if 0 128 { 129 BRCM_VENDORID, 130 CHIP_NUM_57712_VF, 131 PCI_ANY_ID, PCI_ANY_ID, 132 "QLogic NetXtreme II BCM57712 VF 10GbE" 133 }, 134#endif 135 { 136 BRCM_VENDORID, 137 CHIP_NUM_57800, 138 PCI_ANY_ID, PCI_ANY_ID, 139 "QLogic NetXtreme II BCM57800 10GbE" 140 }, 141 { 142 BRCM_VENDORID, 143 CHIP_NUM_57800_MF, 144 PCI_ANY_ID, PCI_ANY_ID, 145 "QLogic NetXtreme II BCM57800 MF 10GbE" 146 }, 147#if 0 148 { 149 BRCM_VENDORID, 150 CHIP_NUM_57800_VF, 151 PCI_ANY_ID, PCI_ANY_ID, 152 "QLogic NetXtreme II BCM57800 VF 10GbE" 153 }, 154#endif 155 { 156 BRCM_VENDORID, 157 CHIP_NUM_57810, 158 PCI_ANY_ID, PCI_ANY_ID, 159 "QLogic NetXtreme II BCM57810 10GbE" 160 }, 161 { 162 BRCM_VENDORID, 163 CHIP_NUM_57810_MF, 164 PCI_ANY_ID, PCI_ANY_ID, 165 "QLogic NetXtreme II BCM57810 MF 10GbE" 166 }, 167#if 0 168 { 169 BRCM_VENDORID, 170 CHIP_NUM_57810_VF, 171 PCI_ANY_ID, PCI_ANY_ID, 172 "QLogic NetXtreme II BCM57810 VF 10GbE" 173 }, 174#endif 175 { 176 BRCM_VENDORID, 177 CHIP_NUM_57811, 178 PCI_ANY_ID, PCI_ANY_ID, 179 "QLogic NetXtreme II BCM57811 10GbE" 180 }, 181 { 182 BRCM_VENDORID, 183 CHIP_NUM_57811_MF, 184 PCI_ANY_ID, PCI_ANY_ID, 185 "QLogic NetXtreme II BCM57811 MF 10GbE" 186 }, 187#if 0 188 { 189 BRCM_VENDORID, 190 CHIP_NUM_57811_VF, 191 PCI_ANY_ID, PCI_ANY_ID, 192 "QLogic NetXtreme II BCM57811 VF 10GbE" 193 }, 194#endif 195 { 196 BRCM_VENDORID, 197 CHIP_NUM_57840_4_10, 198 PCI_ANY_ID, PCI_ANY_ID, 199 "QLogic NetXtreme II BCM57840 4x10GbE" 200 }, 201#if 0 202 { 203 BRCM_VENDORID, 204 CHIP_NUM_57840_2_20, 205 PCI_ANY_ID, PCI_ANY_ID, 206 "QLogic NetXtreme II BCM57840 2x20GbE" 207 }, 208#endif 209 { 210 BRCM_VENDORID, 211 CHIP_NUM_57840_MF, 212 PCI_ANY_ID, PCI_ANY_ID, 213 "QLogic NetXtreme II BCM57840 MF 10GbE" 214 }, 215#if 0 216 { 217 BRCM_VENDORID, 218 CHIP_NUM_57840_VF, 219 PCI_ANY_ID, PCI_ANY_ID, 220 "QLogic NetXtreme II BCM57840 VF 10GbE" 221 }, 222#endif 223 { 224 0, 0, 0, 0, NULL 225 } 226}; 227 228MALLOC_DECLARE(M_BXE_ILT); 229MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); 230 231/* 232 * FreeBSD device entry points. 233 */ 234static int bxe_probe(device_t); 235static int bxe_attach(device_t); 236static int bxe_detach(device_t); 237static int bxe_shutdown(device_t); 238 239/* 240 * FreeBSD KLD module/device interface event handler method. 241 */ 242static device_method_t bxe_methods[] = { 243 /* Device interface (device_if.h) */ 244 DEVMETHOD(device_probe, bxe_probe), 245 DEVMETHOD(device_attach, bxe_attach), 246 DEVMETHOD(device_detach, bxe_detach), 247 DEVMETHOD(device_shutdown, bxe_shutdown), 248#if 0 249 DEVMETHOD(device_suspend, bxe_suspend), 250 DEVMETHOD(device_resume, bxe_resume), 251#endif 252 /* Bus interface (bus_if.h) */ 253 DEVMETHOD(bus_print_child, bus_generic_print_child), 254 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 255 KOBJMETHOD_END 256}; 257 258/* 259 * FreeBSD KLD Module data declaration 260 */ 261static driver_t bxe_driver = { 262 "bxe", /* module name */ 263 bxe_methods, /* event handler */ 264 sizeof(struct bxe_softc) /* extra data */ 265}; 266 267/* 268 * FreeBSD dev class is needed to manage dev instances and 269 * to associate with a bus type 270 */ 271static devclass_t bxe_devclass; 272 273MODULE_DEPEND(bxe, pci, 1, 1, 1); 274MODULE_DEPEND(bxe, ether, 1, 1, 1); 275DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); 276 277/* resources needed for unloading a previously loaded device */ 278 279#define BXE_PREV_WAIT_NEEDED 1 280struct mtx bxe_prev_mtx; 281MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); 282struct bxe_prev_list_node { 283 LIST_ENTRY(bxe_prev_list_node) node; 284 uint8_t bus; 285 uint8_t slot; 286 uint8_t path; 287 uint8_t aer; /* XXX automatic error recovery */ 288 uint8_t undi; 289}; 290static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); 291 292static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 293 294/* Tunable device values... */ 295 296SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); 297 298/* Debug */ 299unsigned long bxe_debug = 0; 300TUNABLE_ULONG("hw.bxe.debug", &bxe_debug); 301SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN), 302 &bxe_debug, 0, "Debug logging mode"); 303 304/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ 305static int bxe_interrupt_mode = INTR_MODE_MSIX; 306TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode); 307SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, 308 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); 309 310/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ 311static int bxe_queue_count = 4; 312TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count); 313SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 314 &bxe_queue_count, 0, "Multi-Queue queue count"); 315 316/* max number of buffers per queue (default RX_BD_USABLE) */ 317static int bxe_max_rx_bufs = 0; 318TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs); 319SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, 320 &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); 321 322/* Host interrupt coalescing RX tick timer (usecs) */ 323static int bxe_hc_rx_ticks = 25; 324TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks); 325SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, 326 &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); 327 328/* Host interrupt coalescing TX tick timer (usecs) */ 329static int bxe_hc_tx_ticks = 50; 330TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks); 331SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, 332 &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); 333 334/* Maximum number of Rx packets to process at a time */ 335static int bxe_rx_budget = 0xffffffff; 336TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget); 337SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, 338 &bxe_rx_budget, 0, "Rx processing budget"); 339 340/* Maximum LRO aggregation size */ 341static int bxe_max_aggregation_size = 0; 342TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size); 343SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, 344 &bxe_max_aggregation_size, 0, "max aggregation size"); 345 346/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ 347static int bxe_mrrs = -1; 348TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs); 349SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, 350 &bxe_mrrs, 0, "PCIe maximum read request size"); 351 352/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ 353static int bxe_autogreeen = 0; 354TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen); 355SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, 356 &bxe_autogreeen, 0, "AutoGrEEEn support"); 357 358/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ 359static int bxe_udp_rss = 0; 360TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss); 361SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, 362 &bxe_udp_rss, 0, "UDP RSS support"); 363 364 365#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ 366 367#define STATS_OFFSET32(stat_name) \ 368 (offsetof(struct bxe_eth_stats, stat_name) / 4) 369 370#define Q_STATS_OFFSET32(stat_name) \ 371 (offsetof(struct bxe_eth_q_stats, stat_name) / 4) 372 373static const struct { 374 uint32_t offset; 375 uint32_t size; 376 uint32_t flags; 377#define STATS_FLAGS_PORT 1 378#define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ 379#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) 380 char string[STAT_NAME_LEN]; 381} bxe_eth_stats_arr[] = { 382 { STATS_OFFSET32(total_bytes_received_hi), 383 8, STATS_FLAGS_BOTH, "rx_bytes" }, 384 { STATS_OFFSET32(error_bytes_received_hi), 385 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, 386 { STATS_OFFSET32(total_unicast_packets_received_hi), 387 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, 388 { STATS_OFFSET32(total_multicast_packets_received_hi), 389 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, 390 { STATS_OFFSET32(total_broadcast_packets_received_hi), 391 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, 392 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 393 8, STATS_FLAGS_PORT, "rx_crc_errors" }, 394 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 395 8, STATS_FLAGS_PORT, "rx_align_errors" }, 396 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 397 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, 398 { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 399 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, 400 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 401 8, STATS_FLAGS_PORT, "rx_fragments" }, 402 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 403 8, STATS_FLAGS_PORT, "rx_jabbers" }, 404 { STATS_OFFSET32(no_buff_discard_hi), 405 8, STATS_FLAGS_BOTH, "rx_discards" }, 406 { STATS_OFFSET32(mac_filter_discard), 407 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, 408 { STATS_OFFSET32(mf_tag_discard), 409 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, 410 { STATS_OFFSET32(pfc_frames_received_hi), 411 8, STATS_FLAGS_PORT, "pfc_frames_received" }, 412 { STATS_OFFSET32(pfc_frames_sent_hi), 413 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, 414 { STATS_OFFSET32(brb_drop_hi), 415 8, STATS_FLAGS_PORT, "rx_brb_discard" }, 416 { STATS_OFFSET32(brb_truncate_hi), 417 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, 418 { STATS_OFFSET32(pause_frames_received_hi), 419 8, STATS_FLAGS_PORT, "rx_pause_frames" }, 420 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 421 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, 422 { STATS_OFFSET32(nig_timer_max), 423 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, 424 { STATS_OFFSET32(total_bytes_transmitted_hi), 425 8, STATS_FLAGS_BOTH, "tx_bytes" }, 426 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 427 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 428 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 429 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, 430 { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 431 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, 432 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 433 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, 434 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 435 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 436 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 437 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 438 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 439 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 440 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 441 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 442 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 443 8, STATS_FLAGS_PORT, "tx_deferred" }, 444 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 445 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 446 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 447 8, STATS_FLAGS_PORT, "tx_late_collisions" }, 448 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 449 8, STATS_FLAGS_PORT, "tx_total_collisions" }, 450 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 451 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, 452 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 453 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, 454 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 455 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 456 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 457 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 458 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 459 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 460 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 461 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 462 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 463 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 464 { STATS_OFFSET32(pause_frames_sent_hi), 465 8, STATS_FLAGS_PORT, "tx_pause_frames" }, 466 { STATS_OFFSET32(total_tpa_aggregations_hi), 467 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, 468 { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 469 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, 470 { STATS_OFFSET32(total_tpa_bytes_hi), 471 8, STATS_FLAGS_FUNC, "tpa_bytes"}, 472#if 0 473 { STATS_OFFSET32(recoverable_error), 474 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 475 { STATS_OFFSET32(unrecoverable_error), 476 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 477#endif 478 { STATS_OFFSET32(eee_tx_lpi), 479 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, 480 { STATS_OFFSET32(rx_calls), 481 4, STATS_FLAGS_FUNC, "rx_calls"}, 482 { STATS_OFFSET32(rx_pkts), 483 4, STATS_FLAGS_FUNC, "rx_pkts"}, 484 { STATS_OFFSET32(rx_tpa_pkts), 485 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, 486 { STATS_OFFSET32(rx_soft_errors), 487 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, 488 { STATS_OFFSET32(rx_hw_csum_errors), 489 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, 490 { STATS_OFFSET32(rx_ofld_frames_csum_ip), 491 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, 492 { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 493 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, 494 { STATS_OFFSET32(rx_budget_reached), 495 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, 496 { STATS_OFFSET32(tx_pkts), 497 4, STATS_FLAGS_FUNC, "tx_pkts"}, 498 { STATS_OFFSET32(tx_soft_errors), 499 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, 500 { STATS_OFFSET32(tx_ofld_frames_csum_ip), 501 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, 502 { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 503 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, 504 { STATS_OFFSET32(tx_ofld_frames_csum_udp), 505 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, 506 { STATS_OFFSET32(tx_ofld_frames_lso), 507 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, 508 { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 509 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, 510 { STATS_OFFSET32(tx_encap_failures), 511 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, 512 { STATS_OFFSET32(tx_hw_queue_full), 513 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, 514 { STATS_OFFSET32(tx_hw_max_queue_depth), 515 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, 516 { STATS_OFFSET32(tx_dma_mapping_failure), 517 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, 518 { STATS_OFFSET32(tx_max_drbr_queue_depth), 519 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, 520 { STATS_OFFSET32(tx_window_violation_std), 521 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, 522 { STATS_OFFSET32(tx_window_violation_tso), 523 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, 524#if 0 525 { STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 526 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"}, 527 { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 528 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"}, 529#endif 530 { STATS_OFFSET32(tx_chain_lost_mbuf), 531 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, 532 { STATS_OFFSET32(tx_frames_deferred), 533 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, 534 { STATS_OFFSET32(tx_queue_xoff), 535 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, 536 { STATS_OFFSET32(mbuf_defrag_attempts), 537 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, 538 { STATS_OFFSET32(mbuf_defrag_failures), 539 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, 540 { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 541 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, 542 { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 543 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, 544 { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 545 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, 546 { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 547 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, 548 { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 549 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, 550 { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 551 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, 552 { STATS_OFFSET32(mbuf_alloc_tx), 553 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, 554 { STATS_OFFSET32(mbuf_alloc_rx), 555 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, 556 { STATS_OFFSET32(mbuf_alloc_sge), 557 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, 558 { STATS_OFFSET32(mbuf_alloc_tpa), 559 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"} 560}; 561 562static const struct { 563 uint32_t offset; 564 uint32_t size; 565 char string[STAT_NAME_LEN]; 566} bxe_eth_q_stats_arr[] = { 567 { Q_STATS_OFFSET32(total_bytes_received_hi), 568 8, "rx_bytes" }, 569 { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 570 8, "rx_ucast_packets" }, 571 { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 572 8, "rx_mcast_packets" }, 573 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 574 8, "rx_bcast_packets" }, 575 { Q_STATS_OFFSET32(no_buff_discard_hi), 576 8, "rx_discards" }, 577 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 578 8, "tx_bytes" }, 579 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 580 8, "tx_ucast_packets" }, 581 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 582 8, "tx_mcast_packets" }, 583 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 584 8, "tx_bcast_packets" }, 585 { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 586 8, "tpa_aggregations" }, 587 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 588 8, "tpa_aggregated_frames"}, 589 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 590 8, "tpa_bytes"}, 591 { Q_STATS_OFFSET32(rx_calls), 592 4, "rx_calls"}, 593 { Q_STATS_OFFSET32(rx_pkts), 594 4, "rx_pkts"}, 595 { Q_STATS_OFFSET32(rx_tpa_pkts), 596 4, "rx_tpa_pkts"}, 597 { Q_STATS_OFFSET32(rx_soft_errors), 598 4, "rx_soft_errors"}, 599 { Q_STATS_OFFSET32(rx_hw_csum_errors), 600 4, "rx_hw_csum_errors"}, 601 { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 602 4, "rx_ofld_frames_csum_ip"}, 603 { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 604 4, "rx_ofld_frames_csum_tcp_udp"}, 605 { Q_STATS_OFFSET32(rx_budget_reached), 606 4, "rx_budget_reached"}, 607 { Q_STATS_OFFSET32(tx_pkts), 608 4, "tx_pkts"}, 609 { Q_STATS_OFFSET32(tx_soft_errors), 610 4, "tx_soft_errors"}, 611 { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 612 4, "tx_ofld_frames_csum_ip"}, 613 { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 614 4, "tx_ofld_frames_csum_tcp"}, 615 { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 616 4, "tx_ofld_frames_csum_udp"}, 617 { Q_STATS_OFFSET32(tx_ofld_frames_lso), 618 4, "tx_ofld_frames_lso"}, 619 { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 620 4, "tx_ofld_frames_lso_hdr_splits"}, 621 { Q_STATS_OFFSET32(tx_encap_failures), 622 4, "tx_encap_failures"}, 623 { Q_STATS_OFFSET32(tx_hw_queue_full), 624 4, "tx_hw_queue_full"}, 625 { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 626 4, "tx_hw_max_queue_depth"}, 627 { Q_STATS_OFFSET32(tx_dma_mapping_failure), 628 4, "tx_dma_mapping_failure"}, 629 { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 630 4, "tx_max_drbr_queue_depth"}, 631 { Q_STATS_OFFSET32(tx_window_violation_std), 632 4, "tx_window_violation_std"}, 633 { Q_STATS_OFFSET32(tx_window_violation_tso), 634 4, "tx_window_violation_tso"}, 635#if 0 636 { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6), 637 4, "tx_unsupported_tso_request_ipv6"}, 638 { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), 639 4, "tx_unsupported_tso_request_not_tcp"}, 640#endif 641 { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 642 4, "tx_chain_lost_mbuf"}, 643 { Q_STATS_OFFSET32(tx_frames_deferred), 644 4, "tx_frames_deferred"}, 645 { Q_STATS_OFFSET32(tx_queue_xoff), 646 4, "tx_queue_xoff"}, 647 { Q_STATS_OFFSET32(mbuf_defrag_attempts), 648 4, "mbuf_defrag_attempts"}, 649 { Q_STATS_OFFSET32(mbuf_defrag_failures), 650 4, "mbuf_defrag_failures"}, 651 { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 652 4, "mbuf_rx_bd_alloc_failed"}, 653 { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 654 4, "mbuf_rx_bd_mapping_failed"}, 655 { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 656 4, "mbuf_rx_tpa_alloc_failed"}, 657 { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 658 4, "mbuf_rx_tpa_mapping_failed"}, 659 { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 660 4, "mbuf_rx_sge_alloc_failed"}, 661 { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 662 4, "mbuf_rx_sge_mapping_failed"}, 663 { Q_STATS_OFFSET32(mbuf_alloc_tx), 664 4, "mbuf_alloc_tx"}, 665 { Q_STATS_OFFSET32(mbuf_alloc_rx), 666 4, "mbuf_alloc_rx"}, 667 { Q_STATS_OFFSET32(mbuf_alloc_sge), 668 4, "mbuf_alloc_sge"}, 669 { Q_STATS_OFFSET32(mbuf_alloc_tpa), 670 4, "mbuf_alloc_tpa"} 671}; 672 673#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) 674#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) 675 676 677static void bxe_cmng_fns_init(struct bxe_softc *sc, 678 uint8_t read_cfg, 679 uint8_t cmng_type); 680static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); 681static void storm_memset_cmng(struct bxe_softc *sc, 682 struct cmng_init *cmng, 683 uint8_t port); 684static void bxe_set_reset_global(struct bxe_softc *sc); 685static void bxe_set_reset_in_progress(struct bxe_softc *sc); 686static uint8_t bxe_reset_is_done(struct bxe_softc *sc, 687 int engine); 688static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); 689static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, 690 uint8_t *global, 691 uint8_t print); 692static void bxe_int_disable(struct bxe_softc *sc); 693static int bxe_release_leader_lock(struct bxe_softc *sc); 694static void bxe_pf_disable(struct bxe_softc *sc); 695static void bxe_free_fp_buffers(struct bxe_softc *sc); 696static inline void bxe_update_rx_prod(struct bxe_softc *sc, 697 struct bxe_fastpath *fp, 698 uint16_t rx_bd_prod, 699 uint16_t rx_cq_prod, 700 uint16_t rx_sge_prod); 701static void bxe_link_report_locked(struct bxe_softc *sc); 702static void bxe_link_report(struct bxe_softc *sc); 703static void bxe_link_status_update(struct bxe_softc *sc); 704static void bxe_periodic_callout_func(void *xsc); 705static void bxe_periodic_start(struct bxe_softc *sc); 706static void bxe_periodic_stop(struct bxe_softc *sc); 707static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 708 uint16_t prev_index, 709 uint16_t index); 710static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 711 int queue); 712static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 713 uint16_t index); 714static uint8_t bxe_txeof(struct bxe_softc *sc, 715 struct bxe_fastpath *fp); 716static void bxe_task_fp(struct bxe_fastpath *fp); 717static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, 718 struct mbuf *m, 719 uint8_t contents); 720static int bxe_alloc_mem(struct bxe_softc *sc); 721static void bxe_free_mem(struct bxe_softc *sc); 722static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); 723static void bxe_free_fw_stats_mem(struct bxe_softc *sc); 724static int bxe_interrupt_attach(struct bxe_softc *sc); 725static void bxe_interrupt_detach(struct bxe_softc *sc); 726static void bxe_set_rx_mode(struct bxe_softc *sc); 727static int bxe_init_locked(struct bxe_softc *sc); 728static int bxe_stop_locked(struct bxe_softc *sc); 729static __noinline int bxe_nic_load(struct bxe_softc *sc, 730 int load_mode); 731static __noinline int bxe_nic_unload(struct bxe_softc *sc, 732 uint32_t unload_mode, 733 uint8_t keep_link); 734 735static void bxe_handle_sp_tq(void *context, int pending); 736static void bxe_handle_rx_mode_tq(void *context, int pending); 737static void bxe_handle_fp_tq(void *context, int pending); 738 739 740/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ 741uint32_t 742calc_crc32(uint8_t *crc32_packet, 743 uint32_t crc32_length, 744 uint32_t crc32_seed, 745 uint8_t complement) 746{ 747 uint32_t byte = 0; 748 uint32_t bit = 0; 749 uint8_t msb = 0; 750 uint32_t temp = 0; 751 uint32_t shft = 0; 752 uint8_t current_byte = 0; 753 uint32_t crc32_result = crc32_seed; 754 const uint32_t CRC32_POLY = 0x1edc6f41; 755 756 if ((crc32_packet == NULL) || 757 (crc32_length == 0) || 758 ((crc32_length % 8) != 0)) 759 { 760 return (crc32_result); 761 } 762 763 for (byte = 0; byte < crc32_length; byte = byte + 1) 764 { 765 current_byte = crc32_packet[byte]; 766 for (bit = 0; bit < 8; bit = bit + 1) 767 { 768 /* msb = crc32_result[31]; */ 769 msb = (uint8_t)(crc32_result >> 31); 770 771 crc32_result = crc32_result << 1; 772 773 /* it (msb != current_byte[bit]) */ 774 if (msb != (0x1 & (current_byte >> bit))) 775 { 776 crc32_result = crc32_result ^ CRC32_POLY; 777 /* crc32_result[0] = 1 */ 778 crc32_result |= 1; 779 } 780 } 781 } 782 783 /* Last step is to: 784 * 1. "mirror" every bit 785 * 2. swap the 4 bytes 786 * 3. complement each bit 787 */ 788 789 /* Mirror */ 790 temp = crc32_result; 791 shft = sizeof(crc32_result) * 8 - 1; 792 793 for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) 794 { 795 temp <<= 1; 796 temp |= crc32_result & 1; 797 shft-- ; 798 } 799 800 /* temp[31-bit] = crc32_result[bit] */ 801 temp <<= shft; 802 803 /* Swap */ 804 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ 805 { 806 uint32_t t0, t1, t2, t3; 807 t0 = (0x000000ff & (temp >> 24)); 808 t1 = (0x0000ff00 & (temp >> 8)); 809 t2 = (0x00ff0000 & (temp << 8)); 810 t3 = (0xff000000 & (temp << 24)); 811 crc32_result = t0 | t1 | t2 | t3; 812 } 813 814 /* Complement */ 815 if (complement) 816 { 817 crc32_result = ~crc32_result; 818 } 819 820 return (crc32_result); 821} 822 823int 824bxe_test_bit(int nr, 825 volatile unsigned long *addr) 826{ 827 return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); 828} 829 830void 831bxe_set_bit(unsigned int nr, 832 volatile unsigned long *addr) 833{ 834 atomic_set_acq_long(addr, (1 << nr)); 835} 836 837void 838bxe_clear_bit(int nr, 839 volatile unsigned long *addr) 840{ 841 atomic_clear_acq_long(addr, (1 << nr)); 842} 843 844int 845bxe_test_and_set_bit(int nr, 846 volatile unsigned long *addr) 847{ 848 unsigned long x; 849 nr = (1 << nr); 850 do { 851 x = *addr; 852 } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); 853 // if (x & nr) bit_was_set; else bit_was_not_set; 854 return (x & nr); 855} 856 857int 858bxe_test_and_clear_bit(int nr, 859 volatile unsigned long *addr) 860{ 861 unsigned long x; 862 nr = (1 << nr); 863 do { 864 x = *addr; 865 } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); 866 // if (x & nr) bit_was_set; else bit_was_not_set; 867 return (x & nr); 868} 869 870int 871bxe_cmpxchg(volatile int *addr, 872 int old, 873 int new) 874{ 875 int x; 876 do { 877 x = *addr; 878 } while (atomic_cmpset_acq_int(addr, old, new) == 0); 879 return (x); 880} 881 882/* 883 * Get DMA memory from the OS. 884 * 885 * Validates that the OS has provided DMA buffers in response to a 886 * bus_dmamap_load call and saves the physical address of those buffers. 887 * When the callback is used the OS will return 0 for the mapping function 888 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any 889 * failures back to the caller. 890 * 891 * Returns: 892 * Nothing. 893 */ 894static void 895bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 896{ 897 struct bxe_dma *dma = arg; 898 899 if (error) { 900 dma->paddr = 0; 901 dma->nseg = 0; 902 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); 903 } else { 904 dma->paddr = segs->ds_addr; 905 dma->nseg = nseg; 906#if 0 907 BLOGD(dma->sc, DBG_LOAD, 908 "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 909 dma->msg, dma->vaddr, (void *)dma->paddr, 910 dma->nseg, dma->size); 911#endif 912 } 913} 914 915/* 916 * Allocate a block of memory and map it for DMA. No partial completions 917 * allowed and release any resources acquired if we can't acquire all 918 * resources. 919 * 920 * Returns: 921 * 0 = Success, !0 = Failure 922 */ 923int 924bxe_dma_alloc(struct bxe_softc *sc, 925 bus_size_t size, 926 struct bxe_dma *dma, 927 const char *msg) 928{ 929 int rc; 930 931 if (dma->size > 0) { 932 BLOGE(sc, "dma block '%s' already has size %lu\n", msg, 933 (unsigned long)dma->size); 934 return (1); 935 } 936 937 memset(dma, 0, sizeof(*dma)); /* sanity */ 938 dma->sc = sc; 939 dma->size = size; 940 snprintf(dma->msg, sizeof(dma->msg), "%s", msg); 941 942 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 943 BCM_PAGE_SIZE, /* alignment */ 944 0, /* boundary limit */ 945 BUS_SPACE_MAXADDR, /* restricted low */ 946 BUS_SPACE_MAXADDR, /* restricted hi */ 947 NULL, /* addr filter() */ 948 NULL, /* addr filter() arg */ 949 size, /* max map size */ 950 1, /* num discontinuous */ 951 size, /* max seg size */ 952 BUS_DMA_ALLOCNOW, /* flags */ 953 NULL, /* lock() */ 954 NULL, /* lock() arg */ 955 &dma->tag); /* returned dma tag */ 956 if (rc != 0) { 957 BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); 958 memset(dma, 0, sizeof(*dma)); 959 return (1); 960 } 961 962 rc = bus_dmamem_alloc(dma->tag, 963 (void **)&dma->vaddr, 964 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), 965 &dma->map); 966 if (rc != 0) { 967 BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); 968 bus_dma_tag_destroy(dma->tag); 969 memset(dma, 0, sizeof(*dma)); 970 return (1); 971 } 972 973 rc = bus_dmamap_load(dma->tag, 974 dma->map, 975 dma->vaddr, 976 size, 977 bxe_dma_map_addr, /* BLOGD in here */ 978 dma, 979 BUS_DMA_NOWAIT); 980 if (rc != 0) { 981 BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); 982 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 983 bus_dma_tag_destroy(dma->tag); 984 memset(dma, 0, sizeof(*dma)); 985 return (1); 986 } 987 988 return (0); 989} 990 991void 992bxe_dma_free(struct bxe_softc *sc, 993 struct bxe_dma *dma) 994{ 995 if (dma->size > 0) { 996#if 0 997 BLOGD(sc, DBG_LOAD, 998 "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", 999 dma->msg, dma->vaddr, (void *)dma->paddr, 1000 dma->nseg, dma->size); 1001#endif 1002 1003 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); 1004 1005 bus_dmamap_sync(dma->tag, dma->map, 1006 (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); 1007 bus_dmamap_unload(dma->tag, dma->map); 1008 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1009 bus_dma_tag_destroy(dma->tag); 1010 } 1011 1012 memset(dma, 0, sizeof(*dma)); 1013} 1014 1015/* 1016 * These indirect read and write routines are only during init. 1017 * The locking is handled by the MCP. 1018 */ 1019 1020void 1021bxe_reg_wr_ind(struct bxe_softc *sc, 1022 uint32_t addr, 1023 uint32_t val) 1024{ 1025 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1026 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); 1027 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1028} 1029 1030uint32_t 1031bxe_reg_rd_ind(struct bxe_softc *sc, 1032 uint32_t addr) 1033{ 1034 uint32_t val; 1035 1036 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); 1037 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); 1038 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 1039 1040 return (val); 1041} 1042 1043#if 0 1044void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl) 1045{ 1046 uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC; 1047 1048 switch (dmae->opcode & DMAE_COMMAND_DST) { 1049 case DMAE_CMD_DST_PCI: 1050 if (src_type == DMAE_CMD_SRC_PCI) 1051 DP(msglvl, "DMAE: opcode 0x%08x\n" 1052 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 1053 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1054 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1055 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1056 dmae->comp_addr_hi, dmae->comp_addr_lo, 1057 dmae->comp_val); 1058 else 1059 DP(msglvl, "DMAE: opcode 0x%08x\n" 1060 "src [%08x], len [%d*4], dst [%x:%08x]\n" 1061 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1062 dmae->opcode, dmae->src_addr_lo >> 2, 1063 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 1064 dmae->comp_addr_hi, dmae->comp_addr_lo, 1065 dmae->comp_val); 1066 break; 1067 case DMAE_CMD_DST_GRC: 1068 if (src_type == DMAE_CMD_SRC_PCI) 1069 DP(msglvl, "DMAE: opcode 0x%08x\n" 1070 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 1071 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1072 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1073 dmae->len, dmae->dst_addr_lo >> 2, 1074 dmae->comp_addr_hi, dmae->comp_addr_lo, 1075 dmae->comp_val); 1076 else 1077 DP(msglvl, "DMAE: opcode 0x%08x\n" 1078 "src [%08x], len [%d*4], dst [%08x]\n" 1079 "comp_addr [%x:%08x], comp_val 0x%08x\n", 1080 dmae->opcode, dmae->src_addr_lo >> 2, 1081 dmae->len, dmae->dst_addr_lo >> 2, 1082 dmae->comp_addr_hi, dmae->comp_addr_lo, 1083 dmae->comp_val); 1084 break; 1085 default: 1086 if (src_type == DMAE_CMD_SRC_PCI) 1087 DP(msglvl, "DMAE: opcode 0x%08x\n" 1088 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 1089 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1090 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 1091 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1092 dmae->comp_val); 1093 else 1094 DP(msglvl, "DMAE: opcode 0x%08x\n" 1095 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 1096 "comp_addr [%x:%08x] comp_val 0x%08x\n", 1097 dmae->opcode, dmae->src_addr_lo >> 2, 1098 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 1099 dmae->comp_val); 1100 break; 1101 } 1102 1103} 1104#endif 1105 1106static int 1107bxe_acquire_hw_lock(struct bxe_softc *sc, 1108 uint32_t resource) 1109{ 1110 uint32_t lock_status; 1111 uint32_t resource_bit = (1 << resource); 1112 int func = SC_FUNC(sc); 1113 uint32_t hw_lock_control_reg; 1114 int cnt; 1115 1116 /* validate the resource is within range */ 1117 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1118 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1119 return (-1); 1120 } 1121 1122 if (func <= 5) { 1123 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1124 } else { 1125 hw_lock_control_reg = 1126 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1127 } 1128 1129 /* validate the resource is not already taken */ 1130 lock_status = REG_RD(sc, hw_lock_control_reg); 1131 if (lock_status & resource_bit) { 1132 BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n", 1133 lock_status, resource_bit); 1134 return (-1); 1135 } 1136 1137 /* try every 5ms for 5 seconds */ 1138 for (cnt = 0; cnt < 1000; cnt++) { 1139 REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); 1140 lock_status = REG_RD(sc, hw_lock_control_reg); 1141 if (lock_status & resource_bit) { 1142 return (0); 1143 } 1144 DELAY(5000); 1145 } 1146 1147 BLOGE(sc, "Resource lock timeout!\n"); 1148 return (-1); 1149} 1150 1151static int 1152bxe_release_hw_lock(struct bxe_softc *sc, 1153 uint32_t resource) 1154{ 1155 uint32_t lock_status; 1156 uint32_t resource_bit = (1 << resource); 1157 int func = SC_FUNC(sc); 1158 uint32_t hw_lock_control_reg; 1159 1160 /* validate the resource is within range */ 1161 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1162 BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); 1163 return (-1); 1164 } 1165 1166 if (func <= 5) { 1167 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); 1168 } else { 1169 hw_lock_control_reg = 1170 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); 1171 } 1172 1173 /* validate the resource is currently taken */ 1174 lock_status = REG_RD(sc, hw_lock_control_reg); 1175 if (!(lock_status & resource_bit)) { 1176 BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n", 1177 lock_status, resource_bit); 1178 return (-1); 1179 } 1180 1181 REG_WR(sc, hw_lock_control_reg, resource_bit); 1182 return (0); 1183} 1184 1185/* 1186 * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, 1187 * had we done things the other way around, if two pfs from the same port 1188 * would attempt to access nvram at the same time, we could run into a 1189 * scenario such as: 1190 * pf A takes the port lock. 1191 * pf B succeeds in taking the same lock since they are from the same port. 1192 * pf A takes the per pf misc lock. Performs eeprom access. 1193 * pf A finishes. Unlocks the per pf misc lock. 1194 * Pf B takes the lock and proceeds to perform it's own access. 1195 * pf A unlocks the per port lock, while pf B is still working (!). 1196 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own 1197 * access corrupted by pf B).* 1198 */ 1199static int 1200bxe_acquire_nvram_lock(struct bxe_softc *sc) 1201{ 1202 int port = SC_PORT(sc); 1203 int count, i; 1204 uint32_t val = 0; 1205 1206 /* acquire HW lock: protect against other PFs in PF Direct Assignment */ 1207 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1208 1209 /* adjust timeout for emulation/FPGA */ 1210 count = NVRAM_TIMEOUT_COUNT; 1211 if (CHIP_REV_IS_SLOW(sc)) { 1212 count *= 100; 1213 } 1214 1215 /* request access to nvram interface */ 1216 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1217 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); 1218 1219 for (i = 0; i < count*10; i++) { 1220 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1221 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1222 break; 1223 } 1224 1225 DELAY(5); 1226 } 1227 1228 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1229 BLOGE(sc, "Cannot get access to nvram interface\n"); 1230 return (-1); 1231 } 1232 1233 return (0); 1234} 1235 1236static int 1237bxe_release_nvram_lock(struct bxe_softc *sc) 1238{ 1239 int port = SC_PORT(sc); 1240 int count, i; 1241 uint32_t val = 0; 1242 1243 /* adjust timeout for emulation/FPGA */ 1244 count = NVRAM_TIMEOUT_COUNT; 1245 if (CHIP_REV_IS_SLOW(sc)) { 1246 count *= 100; 1247 } 1248 1249 /* relinquish nvram interface */ 1250 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 1251 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); 1252 1253 for (i = 0; i < count*10; i++) { 1254 val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); 1255 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1256 break; 1257 } 1258 1259 DELAY(5); 1260 } 1261 1262 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { 1263 BLOGE(sc, "Cannot free access to nvram interface\n"); 1264 return (-1); 1265 } 1266 1267 /* release HW lock: protect against other PFs in PF Direct Assignment */ 1268 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); 1269 1270 return (0); 1271} 1272 1273static void 1274bxe_enable_nvram_access(struct bxe_softc *sc) 1275{ 1276 uint32_t val; 1277 1278 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1279 1280 /* enable both bits, even on read */ 1281 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1282 (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); 1283} 1284 1285static void 1286bxe_disable_nvram_access(struct bxe_softc *sc) 1287{ 1288 uint32_t val; 1289 1290 val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); 1291 1292 /* disable both bits, even after read */ 1293 REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, 1294 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | 1295 MCPR_NVM_ACCESS_ENABLE_WR_EN))); 1296} 1297 1298static int 1299bxe_nvram_read_dword(struct bxe_softc *sc, 1300 uint32_t offset, 1301 uint32_t *ret_val, 1302 uint32_t cmd_flags) 1303{ 1304 int count, i, rc; 1305 uint32_t val; 1306 1307 /* build the command word */ 1308 cmd_flags |= MCPR_NVM_COMMAND_DOIT; 1309 1310 /* need to clear DONE bit separately */ 1311 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1312 1313 /* address of the NVRAM to read from */ 1314 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1315 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1316 1317 /* issue a read command */ 1318 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1319 1320 /* adjust timeout for emulation/FPGA */ 1321 count = NVRAM_TIMEOUT_COUNT; 1322 if (CHIP_REV_IS_SLOW(sc)) { 1323 count *= 100; 1324 } 1325 1326 /* wait for completion */ 1327 *ret_val = 0; 1328 rc = -1; 1329 for (i = 0; i < count; i++) { 1330 DELAY(5); 1331 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1332 1333 if (val & MCPR_NVM_COMMAND_DONE) { 1334 val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); 1335 /* we read nvram data in cpu order 1336 * but ethtool sees it as an array of bytes 1337 * converting to big-endian will do the work 1338 */ 1339 *ret_val = htobe32(val); 1340 rc = 0; 1341 break; 1342 } 1343 } 1344 1345 if (rc == -1) { 1346 BLOGE(sc, "nvram read timeout expired\n"); 1347 } 1348 1349 return (rc); 1350} 1351 1352static int 1353bxe_nvram_read(struct bxe_softc *sc, 1354 uint32_t offset, 1355 uint8_t *ret_buf, 1356 int buf_size) 1357{ 1358 uint32_t cmd_flags; 1359 uint32_t val; 1360 int rc; 1361 1362 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 1363 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1364 offset, buf_size); 1365 return (-1); 1366 } 1367 1368 if ((offset + buf_size) > sc->devinfo.flash_size) { 1369 BLOGE(sc, "Invalid parameter, " 1370 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1371 offset, buf_size, sc->devinfo.flash_size); 1372 return (-1); 1373 } 1374 1375 /* request access to nvram interface */ 1376 rc = bxe_acquire_nvram_lock(sc); 1377 if (rc) { 1378 return (rc); 1379 } 1380 1381 /* enable access to nvram interface */ 1382 bxe_enable_nvram_access(sc); 1383 1384 /* read the first word(s) */ 1385 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1386 while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { 1387 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1388 memcpy(ret_buf, &val, 4); 1389 1390 /* advance to the next dword */ 1391 offset += sizeof(uint32_t); 1392 ret_buf += sizeof(uint32_t); 1393 buf_size -= sizeof(uint32_t); 1394 cmd_flags = 0; 1395 } 1396 1397 if (rc == 0) { 1398 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1399 rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); 1400 memcpy(ret_buf, &val, 4); 1401 } 1402 1403 /* disable access to nvram interface */ 1404 bxe_disable_nvram_access(sc); 1405 bxe_release_nvram_lock(sc); 1406 1407 return (rc); 1408} 1409 1410static int 1411bxe_nvram_write_dword(struct bxe_softc *sc, 1412 uint32_t offset, 1413 uint32_t val, 1414 uint32_t cmd_flags) 1415{ 1416 int count, i, rc; 1417 1418 /* build the command word */ 1419 cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); 1420 1421 /* need to clear DONE bit separately */ 1422 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); 1423 1424 /* write the data */ 1425 REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); 1426 1427 /* address of the NVRAM to write to */ 1428 REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, 1429 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); 1430 1431 /* issue the write command */ 1432 REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); 1433 1434 /* adjust timeout for emulation/FPGA */ 1435 count = NVRAM_TIMEOUT_COUNT; 1436 if (CHIP_REV_IS_SLOW(sc)) { 1437 count *= 100; 1438 } 1439 1440 /* wait for completion */ 1441 rc = -1; 1442 for (i = 0; i < count; i++) { 1443 DELAY(5); 1444 val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); 1445 if (val & MCPR_NVM_COMMAND_DONE) { 1446 rc = 0; 1447 break; 1448 } 1449 } 1450 1451 if (rc == -1) { 1452 BLOGE(sc, "nvram write timeout expired\n"); 1453 } 1454 1455 return (rc); 1456} 1457 1458#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 1459 1460static int 1461bxe_nvram_write1(struct bxe_softc *sc, 1462 uint32_t offset, 1463 uint8_t *data_buf, 1464 int buf_size) 1465{ 1466 uint32_t cmd_flags; 1467 uint32_t align_offset; 1468 uint32_t val; 1469 int rc; 1470 1471 if ((offset + buf_size) > sc->devinfo.flash_size) { 1472 BLOGE(sc, "Invalid parameter, " 1473 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1474 offset, buf_size, sc->devinfo.flash_size); 1475 return (-1); 1476 } 1477 1478 /* request access to nvram interface */ 1479 rc = bxe_acquire_nvram_lock(sc); 1480 if (rc) { 1481 return (rc); 1482 } 1483 1484 /* enable access to nvram interface */ 1485 bxe_enable_nvram_access(sc); 1486 1487 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); 1488 align_offset = (offset & ~0x03); 1489 rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); 1490 1491 if (rc == 0) { 1492 val &= ~(0xff << BYTE_OFFSET(offset)); 1493 val |= (*data_buf << BYTE_OFFSET(offset)); 1494 1495 /* nvram data is returned as an array of bytes 1496 * convert it back to cpu order 1497 */ 1498 val = be32toh(val); 1499 1500 rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); 1501 } 1502 1503 /* disable access to nvram interface */ 1504 bxe_disable_nvram_access(sc); 1505 bxe_release_nvram_lock(sc); 1506 1507 return (rc); 1508} 1509 1510static int 1511bxe_nvram_write(struct bxe_softc *sc, 1512 uint32_t offset, 1513 uint8_t *data_buf, 1514 int buf_size) 1515{ 1516 uint32_t cmd_flags; 1517 uint32_t val; 1518 uint32_t written_so_far; 1519 int rc; 1520 1521 if (buf_size == 1) { 1522 return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); 1523 } 1524 1525 if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { 1526 BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", 1527 offset, buf_size); 1528 return (-1); 1529 } 1530 1531 if (buf_size == 0) { 1532 return (0); /* nothing to do */ 1533 } 1534 1535 if ((offset + buf_size) > sc->devinfo.flash_size) { 1536 BLOGE(sc, "Invalid parameter, " 1537 "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", 1538 offset, buf_size, sc->devinfo.flash_size); 1539 return (-1); 1540 } 1541 1542 /* request access to nvram interface */ 1543 rc = bxe_acquire_nvram_lock(sc); 1544 if (rc) { 1545 return (rc); 1546 } 1547 1548 /* enable access to nvram interface */ 1549 bxe_enable_nvram_access(sc); 1550 1551 written_so_far = 0; 1552 cmd_flags = MCPR_NVM_COMMAND_FIRST; 1553 while ((written_so_far < buf_size) && (rc == 0)) { 1554 if (written_so_far == (buf_size - sizeof(uint32_t))) { 1555 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1556 } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { 1557 cmd_flags |= MCPR_NVM_COMMAND_LAST; 1558 } else if ((offset % NVRAM_PAGE_SIZE) == 0) { 1559 cmd_flags |= MCPR_NVM_COMMAND_FIRST; 1560 } 1561 1562 memcpy(&val, data_buf, 4); 1563 1564 rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); 1565 1566 /* advance to the next dword */ 1567 offset += sizeof(uint32_t); 1568 data_buf += sizeof(uint32_t); 1569 written_so_far += sizeof(uint32_t); 1570 cmd_flags = 0; 1571 } 1572 1573 /* disable access to nvram interface */ 1574 bxe_disable_nvram_access(sc); 1575 bxe_release_nvram_lock(sc); 1576 1577 return (rc); 1578} 1579 1580/* copy command into DMAE command memory and set DMAE command Go */ 1581void 1582bxe_post_dmae(struct bxe_softc *sc, 1583 struct dmae_command *dmae, 1584 int idx) 1585{ 1586 uint32_t cmd_offset; 1587 int i; 1588 1589 cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); 1590 for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { 1591 REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); 1592 } 1593 1594 REG_WR(sc, dmae_reg_go_c[idx], 1); 1595} 1596 1597uint32_t 1598bxe_dmae_opcode_add_comp(uint32_t opcode, 1599 uint8_t comp_type) 1600{ 1601 return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 1602 DMAE_COMMAND_C_TYPE_ENABLE)); 1603} 1604 1605uint32_t 1606bxe_dmae_opcode_clr_src_reset(uint32_t opcode) 1607{ 1608 return (opcode & ~DMAE_COMMAND_SRC_RESET); 1609} 1610 1611uint32_t 1612bxe_dmae_opcode(struct bxe_softc *sc, 1613 uint8_t src_type, 1614 uint8_t dst_type, 1615 uint8_t with_comp, 1616 uint8_t comp_type) 1617{ 1618 uint32_t opcode = 0; 1619 1620 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 1621 (dst_type << DMAE_COMMAND_DST_SHIFT)); 1622 1623 opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); 1624 1625 opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 1626 1627 opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | 1628 (SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); 1629 1630 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 1631 1632#ifdef __BIG_ENDIAN 1633 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 1634#else 1635 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 1636#endif 1637 1638 if (with_comp) { 1639 opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); 1640 } 1641 1642 return (opcode); 1643} 1644 1645static void 1646bxe_prep_dmae_with_comp(struct bxe_softc *sc, 1647 struct dmae_command *dmae, 1648 uint8_t src_type, 1649 uint8_t dst_type) 1650{ 1651 memset(dmae, 0, sizeof(struct dmae_command)); 1652 1653 /* set the opcode */ 1654 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, 1655 TRUE, DMAE_COMP_PCI); 1656 1657 /* fill in the completion parameters */ 1658 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); 1659 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); 1660 dmae->comp_val = DMAE_COMP_VAL; 1661} 1662 1663/* issue a DMAE command over the init channel and wait for completion */ 1664static int 1665bxe_issue_dmae_with_comp(struct bxe_softc *sc, 1666 struct dmae_command *dmae) 1667{ 1668 uint32_t *wb_comp = BXE_SP(sc, wb_comp); 1669 int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; 1670 1671 BXE_DMAE_LOCK(sc); 1672 1673 /* reset completion */ 1674 *wb_comp = 0; 1675 1676 /* post the command on the channel used for initializations */ 1677 bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); 1678 1679 /* wait for completion */ 1680 DELAY(5); 1681 1682 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 1683 if (!timeout || 1684 (sc->recovery_state != BXE_RECOVERY_DONE && 1685 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { 1686 BLOGE(sc, "DMAE timeout!\n"); 1687 BXE_DMAE_UNLOCK(sc); 1688 return (DMAE_TIMEOUT); 1689 } 1690 1691 timeout--; 1692 DELAY(50); 1693 } 1694 1695 if (*wb_comp & DMAE_PCI_ERR_FLAG) { 1696 BLOGE(sc, "DMAE PCI error!\n"); 1697 BXE_DMAE_UNLOCK(sc); 1698 return (DMAE_PCI_ERROR); 1699 } 1700 1701 BXE_DMAE_UNLOCK(sc); 1702 return (0); 1703} 1704 1705void 1706bxe_read_dmae(struct bxe_softc *sc, 1707 uint32_t src_addr, 1708 uint32_t len32) 1709{ 1710 struct dmae_command dmae; 1711 uint32_t *data; 1712 int i, rc; 1713 1714 DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); 1715 1716 if (!sc->dmae_ready) { 1717 data = BXE_SP(sc, wb_data[0]); 1718 1719 for (i = 0; i < len32; i++) { 1720 data[i] = (CHIP_IS_E1(sc)) ? 1721 bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : 1722 REG_RD(sc, (src_addr + (i * 4))); 1723 } 1724 1725 return; 1726 } 1727 1728 /* set opcode and fixed command fields */ 1729 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 1730 1731 /* fill in addresses and len */ 1732 dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ 1733 dmae.src_addr_hi = 0; 1734 dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); 1735 dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); 1736 dmae.len = len32; 1737 1738 /* issue the command and wait for completion */ 1739 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1740 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1741 }; 1742} 1743 1744void 1745bxe_write_dmae(struct bxe_softc *sc, 1746 bus_addr_t dma_addr, 1747 uint32_t dst_addr, 1748 uint32_t len32) 1749{ 1750 struct dmae_command dmae; 1751 int rc; 1752 1753 if (!sc->dmae_ready) { 1754 DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); 1755 1756 if (CHIP_IS_E1(sc)) { 1757 ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1758 } else { 1759 ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); 1760 } 1761 1762 return; 1763 } 1764 1765 /* set opcode and fixed command fields */ 1766 bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 1767 1768 /* fill in addresses and len */ 1769 dmae.src_addr_lo = U64_LO(dma_addr); 1770 dmae.src_addr_hi = U64_HI(dma_addr); 1771 dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ 1772 dmae.dst_addr_hi = 0; 1773 dmae.len = len32; 1774 1775 /* issue the command and wait for completion */ 1776 if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { 1777 bxe_panic(sc, ("DMAE failed (%d)\n", rc)); 1778 } 1779} 1780 1781void 1782bxe_write_dmae_phys_len(struct bxe_softc *sc, 1783 bus_addr_t phys_addr, 1784 uint32_t addr, 1785 uint32_t len) 1786{ 1787 int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); 1788 int offset = 0; 1789 1790 while (len > dmae_wr_max) { 1791 bxe_write_dmae(sc, 1792 (phys_addr + offset), /* src DMA address */ 1793 (addr + offset), /* dst GRC address */ 1794 dmae_wr_max); 1795 offset += (dmae_wr_max * 4); 1796 len -= dmae_wr_max; 1797 } 1798 1799 bxe_write_dmae(sc, 1800 (phys_addr + offset), /* src DMA address */ 1801 (addr + offset), /* dst GRC address */ 1802 len); 1803} 1804 1805void 1806bxe_set_ctx_validation(struct bxe_softc *sc, 1807 struct eth_context *cxt, 1808 uint32_t cid) 1809{ 1810 /* ustorm cxt validation */ 1811 cxt->ustorm_ag_context.cdu_usage = 1812 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1813 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 1814 /* xcontext validation */ 1815 cxt->xstorm_ag_context.cdu_reserved = 1816 CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), 1817 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 1818} 1819 1820static void 1821bxe_storm_memset_hc_timeout(struct bxe_softc *sc, 1822 uint8_t port, 1823 uint8_t fw_sb_id, 1824 uint8_t sb_index, 1825 uint8_t ticks) 1826{ 1827 uint32_t addr = 1828 (BAR_CSTRORM_INTMEM + 1829 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); 1830 1831 REG_WR8(sc, addr, ticks); 1832 1833 BLOGD(sc, DBG_LOAD, 1834 "port %d fw_sb_id %d sb_index %d ticks %d\n", 1835 port, fw_sb_id, sb_index, ticks); 1836} 1837 1838static void 1839bxe_storm_memset_hc_disable(struct bxe_softc *sc, 1840 uint8_t port, 1841 uint16_t fw_sb_id, 1842 uint8_t sb_index, 1843 uint8_t disable) 1844{ 1845 uint32_t enable_flag = 1846 (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 1847 uint32_t addr = 1848 (BAR_CSTRORM_INTMEM + 1849 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); 1850 uint8_t flags; 1851 1852 /* clear and set */ 1853 flags = REG_RD8(sc, addr); 1854 flags &= ~HC_INDEX_DATA_HC_ENABLED; 1855 flags |= enable_flag; 1856 REG_WR8(sc, addr, flags); 1857 1858 BLOGD(sc, DBG_LOAD, 1859 "port %d fw_sb_id %d sb_index %d disable %d\n", 1860 port, fw_sb_id, sb_index, disable); 1861} 1862 1863void 1864bxe_update_coalesce_sb_index(struct bxe_softc *sc, 1865 uint8_t fw_sb_id, 1866 uint8_t sb_index, 1867 uint8_t disable, 1868 uint16_t usec) 1869{ 1870 int port = SC_PORT(sc); 1871 uint8_t ticks = (usec / 4); /* XXX ??? */ 1872 1873 bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); 1874 1875 disable = (disable) ? 1 : ((usec) ? 0 : 1); 1876 bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); 1877} 1878 1879void 1880elink_cb_udelay(struct bxe_softc *sc, 1881 uint32_t usecs) 1882{ 1883 DELAY(usecs); 1884} 1885 1886uint32_t 1887elink_cb_reg_read(struct bxe_softc *sc, 1888 uint32_t reg_addr) 1889{ 1890 return (REG_RD(sc, reg_addr)); 1891} 1892 1893void 1894elink_cb_reg_write(struct bxe_softc *sc, 1895 uint32_t reg_addr, 1896 uint32_t val) 1897{ 1898 REG_WR(sc, reg_addr, val); 1899} 1900 1901void 1902elink_cb_reg_wb_write(struct bxe_softc *sc, 1903 uint32_t offset, 1904 uint32_t *wb_write, 1905 uint16_t len) 1906{ 1907 REG_WR_DMAE(sc, offset, wb_write, len); 1908} 1909 1910void 1911elink_cb_reg_wb_read(struct bxe_softc *sc, 1912 uint32_t offset, 1913 uint32_t *wb_write, 1914 uint16_t len) 1915{ 1916 REG_RD_DMAE(sc, offset, wb_write, len); 1917} 1918 1919uint8_t 1920elink_cb_path_id(struct bxe_softc *sc) 1921{ 1922 return (SC_PATH(sc)); 1923} 1924 1925void 1926elink_cb_event_log(struct bxe_softc *sc, 1927 const elink_log_id_t elink_log_id, 1928 ...) 1929{ 1930 /* XXX */ 1931#if 0 1932 //va_list ap; 1933 va_start(ap, elink_log_id); 1934 _XXX_(sc, lm_log_id, ap); 1935 va_end(ap); 1936#endif 1937 BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); 1938} 1939 1940static int 1941bxe_set_spio(struct bxe_softc *sc, 1942 int spio, 1943 uint32_t mode) 1944{ 1945 uint32_t spio_reg; 1946 1947 /* Only 2 SPIOs are configurable */ 1948 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 1949 BLOGE(sc, "Invalid SPIO 0x%x\n", spio); 1950 return (-1); 1951 } 1952 1953 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1954 1955 /* read SPIO and mask except the float bits */ 1956 spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 1957 1958 switch (mode) { 1959 case MISC_SPIO_OUTPUT_LOW: 1960 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); 1961 /* clear FLOAT and set CLR */ 1962 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1963 spio_reg |= (spio << MISC_SPIO_CLR_POS); 1964 break; 1965 1966 case MISC_SPIO_OUTPUT_HIGH: 1967 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); 1968 /* clear FLOAT and set SET */ 1969 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 1970 spio_reg |= (spio << MISC_SPIO_SET_POS); 1971 break; 1972 1973 case MISC_SPIO_INPUT_HI_Z: 1974 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); 1975 /* set FLOAT */ 1976 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 1977 break; 1978 1979 default: 1980 break; 1981 } 1982 1983 REG_WR(sc, MISC_REG_SPIO, spio_reg); 1984 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); 1985 1986 return (0); 1987} 1988 1989static int 1990bxe_gpio_read(struct bxe_softc *sc, 1991 int gpio_num, 1992 uint8_t port) 1993{ 1994 /* The GPIO should be swapped if swap register is set and active */ 1995 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 1996 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 1997 int gpio_shift = (gpio_num + 1998 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 1999 uint32_t gpio_mask = (1 << gpio_shift); 2000 uint32_t gpio_reg; 2001 2002 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2003 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2004 return (-1); 2005 } 2006 2007 /* read GPIO value */ 2008 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2009 2010 /* get the requested pin value */ 2011 return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; 2012} 2013 2014static int 2015bxe_gpio_write(struct bxe_softc *sc, 2016 int gpio_num, 2017 uint32_t mode, 2018 uint8_t port) 2019{ 2020 /* The GPIO should be swapped if swap register is set and active */ 2021 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2022 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2023 int gpio_shift = (gpio_num + 2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2025 uint32_t gpio_mask = (1 << gpio_shift); 2026 uint32_t gpio_reg; 2027 2028 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2029 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2030 return (-1); 2031 } 2032 2033 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2034 2035 /* read GPIO and mask except the float bits */ 2036 gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2037 2038 switch (mode) { 2039 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2040 BLOGD(sc, DBG_PHY, 2041 "Set GPIO %d (shift %d) -> output low\n", 2042 gpio_num, gpio_shift); 2043 /* clear FLOAT and set CLR */ 2044 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2045 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2046 break; 2047 2048 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2049 BLOGD(sc, DBG_PHY, 2050 "Set GPIO %d (shift %d) -> output high\n", 2051 gpio_num, gpio_shift); 2052 /* clear FLOAT and set SET */ 2053 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2054 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2055 break; 2056 2057 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2058 BLOGD(sc, DBG_PHY, 2059 "Set GPIO %d (shift %d) -> input\n", 2060 gpio_num, gpio_shift); 2061 /* set FLOAT */ 2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2063 break; 2064 2065 default: 2066 break; 2067 } 2068 2069 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2070 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2071 2072 return (0); 2073} 2074 2075static int 2076bxe_gpio_mult_write(struct bxe_softc *sc, 2077 uint8_t pins, 2078 uint32_t mode) 2079{ 2080 uint32_t gpio_reg; 2081 2082 /* any port swapping should be handled by caller */ 2083 2084 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2085 2086 /* read GPIO and mask except the float bits */ 2087 gpio_reg = REG_RD(sc, MISC_REG_GPIO); 2088 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2089 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2090 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2091 2092 switch (mode) { 2093 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2094 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); 2095 /* set CLR */ 2096 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2097 break; 2098 2099 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2100 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); 2101 /* set SET */ 2102 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2103 break; 2104 2105 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2106 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); 2107 /* set FLOAT */ 2108 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2109 break; 2110 2111 default: 2112 BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode); 2113 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2114 return (-1); 2115 } 2116 2117 REG_WR(sc, MISC_REG_GPIO, gpio_reg); 2118 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2119 2120 return (0); 2121} 2122 2123static int 2124bxe_gpio_int_write(struct bxe_softc *sc, 2125 int gpio_num, 2126 uint32_t mode, 2127 uint8_t port) 2128{ 2129 /* The GPIO should be swapped if swap register is set and active */ 2130 int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && 2131 REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); 2132 int gpio_shift = (gpio_num + 2133 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); 2134 uint32_t gpio_mask = (1 << gpio_shift); 2135 uint32_t gpio_reg; 2136 2137 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2138 BLOGE(sc, "Invalid GPIO %d\n", gpio_num); 2139 return (-1); 2140 } 2141 2142 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2143 2144 /* read GPIO int */ 2145 gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); 2146 2147 switch (mode) { 2148 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2149 BLOGD(sc, DBG_PHY, 2150 "Clear GPIO INT %d (shift %d) -> output low\n", 2151 gpio_num, gpio_shift); 2152 /* clear SET and set CLR */ 2153 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2154 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2155 break; 2156 2157 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2158 BLOGD(sc, DBG_PHY, 2159 "Set GPIO INT %d (shift %d) -> output high\n", 2160 gpio_num, gpio_shift); 2161 /* clear CLR and set SET */ 2162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2164 break; 2165 2166 default: 2167 break; 2168 } 2169 2170 REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); 2171 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); 2172 2173 return (0); 2174} 2175 2176uint32_t 2177elink_cb_gpio_read(struct bxe_softc *sc, 2178 uint16_t gpio_num, 2179 uint8_t port) 2180{ 2181 return (bxe_gpio_read(sc, gpio_num, port)); 2182} 2183 2184uint8_t 2185elink_cb_gpio_write(struct bxe_softc *sc, 2186 uint16_t gpio_num, 2187 uint8_t mode, /* 0=low 1=high */ 2188 uint8_t port) 2189{ 2190 return (bxe_gpio_write(sc, gpio_num, mode, port)); 2191} 2192 2193uint8_t 2194elink_cb_gpio_mult_write(struct bxe_softc *sc, 2195 uint8_t pins, 2196 uint8_t mode) /* 0=low 1=high */ 2197{ 2198 return (bxe_gpio_mult_write(sc, pins, mode)); 2199} 2200 2201uint8_t 2202elink_cb_gpio_int_write(struct bxe_softc *sc, 2203 uint16_t gpio_num, 2204 uint8_t mode, /* 0=low 1=high */ 2205 uint8_t port) 2206{ 2207 return (bxe_gpio_int_write(sc, gpio_num, mode, port)); 2208} 2209 2210void 2211elink_cb_notify_link_changed(struct bxe_softc *sc) 2212{ 2213 REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + 2214 (SC_FUNC(sc) * sizeof(uint32_t))), 1); 2215} 2216 2217/* send the MCP a request, block until there is a reply */ 2218uint32_t 2219elink_cb_fw_command(struct bxe_softc *sc, 2220 uint32_t command, 2221 uint32_t param) 2222{ 2223 int mb_idx = SC_FW_MB_IDX(sc); 2224 uint32_t seq; 2225 uint32_t rc = 0; 2226 uint32_t cnt = 1; 2227 uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; 2228 2229 BXE_FWMB_LOCK(sc); 2230 2231 seq = ++sc->fw_seq; 2232 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); 2233 SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); 2234 2235 BLOGD(sc, DBG_PHY, 2236 "wrote command 0x%08x to FW MB param 0x%08x\n", 2237 (command | seq), param); 2238 2239 /* Let the FW do it's magic. GIve it up to 5 seconds... */ 2240 do { 2241 DELAY(delay * 1000); 2242 rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); 2243 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2244 2245 BLOGD(sc, DBG_PHY, 2246 "[after %d ms] read 0x%x seq 0x%x from FW MB\n", 2247 cnt*delay, rc, seq); 2248 2249 /* is this a reply to our command? */ 2250 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 2251 rc &= FW_MSG_CODE_MASK; 2252 } else { 2253 /* Ruh-roh! */ 2254 BLOGE(sc, "FW failed to respond!\n"); 2255 // XXX bxe_fw_dump(sc); 2256 rc = 0; 2257 } 2258 2259 BXE_FWMB_UNLOCK(sc); 2260 return (rc); 2261} 2262 2263static uint32_t 2264bxe_fw_command(struct bxe_softc *sc, 2265 uint32_t command, 2266 uint32_t param) 2267{ 2268 return (elink_cb_fw_command(sc, command, param)); 2269} 2270 2271static void 2272__storm_memset_dma_mapping(struct bxe_softc *sc, 2273 uint32_t addr, 2274 bus_addr_t mapping) 2275{ 2276 REG_WR(sc, addr, U64_LO(mapping)); 2277 REG_WR(sc, (addr + 4), U64_HI(mapping)); 2278} 2279 2280static void 2281storm_memset_spq_addr(struct bxe_softc *sc, 2282 bus_addr_t mapping, 2283 uint16_t abs_fid) 2284{ 2285 uint32_t addr = (XSEM_REG_FAST_MEMORY + 2286 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); 2287 __storm_memset_dma_mapping(sc, addr, mapping); 2288} 2289 2290static void 2291storm_memset_vf_to_pf(struct bxe_softc *sc, 2292 uint16_t abs_fid, 2293 uint16_t pf_id) 2294{ 2295 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2296 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2297 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2298 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); 2299} 2300 2301static void 2302storm_memset_func_en(struct bxe_softc *sc, 2303 uint16_t abs_fid, 2304 uint8_t enable) 2305{ 2306 REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2307 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2308 REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2309 REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); 2310} 2311 2312static void 2313storm_memset_eq_data(struct bxe_softc *sc, 2314 struct event_ring_data *eq_data, 2315 uint16_t pfid) 2316{ 2317 uint32_t addr; 2318 size_t size; 2319 2320 addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); 2321 size = sizeof(struct event_ring_data); 2322 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); 2323} 2324 2325static void 2326storm_memset_eq_prod(struct bxe_softc *sc, 2327 uint16_t eq_prod, 2328 uint16_t pfid) 2329{ 2330 uint32_t addr = (BAR_CSTRORM_INTMEM + 2331 CSTORM_EVENT_RING_PROD_OFFSET(pfid)); 2332 REG_WR16(sc, addr, eq_prod); 2333} 2334 2335/* 2336 * Post a slowpath command. 2337 * 2338 * A slowpath command is used to propogate a configuration change through 2339 * the controller in a controlled manner, allowing each STORM processor and 2340 * other H/W blocks to phase in the change. The commands sent on the 2341 * slowpath are referred to as ramrods. Depending on the ramrod used the 2342 * completion of the ramrod will occur in different ways. Here's a 2343 * breakdown of ramrods and how they complete: 2344 * 2345 * RAMROD_CMD_ID_ETH_PORT_SETUP 2346 * Used to setup the leading connection on a port. Completes on the 2347 * Receive Completion Queue (RCQ) of that port (typically fp[0]). 2348 * 2349 * RAMROD_CMD_ID_ETH_CLIENT_SETUP 2350 * Used to setup an additional connection on a port. Completes on the 2351 * RCQ of the multi-queue/RSS connection being initialized. 2352 * 2353 * RAMROD_CMD_ID_ETH_STAT_QUERY 2354 * Used to force the storm processors to update the statistics database 2355 * in host memory. This ramrod is send on the leading connection CID and 2356 * completes as an index increment of the CSTORM on the default status 2357 * block. 2358 * 2359 * RAMROD_CMD_ID_ETH_UPDATE 2360 * Used to update the state of the leading connection, usually to udpate 2361 * the RSS indirection table. Completes on the RCQ of the leading 2362 * connection. (Not currently used under FreeBSD until OS support becomes 2363 * available.) 2364 * 2365 * RAMROD_CMD_ID_ETH_HALT 2366 * Used when tearing down a connection prior to driver unload. Completes 2367 * on the RCQ of the multi-queue/RSS connection being torn down. Don't 2368 * use this on the leading connection. 2369 * 2370 * RAMROD_CMD_ID_ETH_SET_MAC 2371 * Sets the Unicast/Broadcast/Multicast used by the port. Completes on 2372 * the RCQ of the leading connection. 2373 * 2374 * RAMROD_CMD_ID_ETH_CFC_DEL 2375 * Used when tearing down a conneciton prior to driver unload. Completes 2376 * on the RCQ of the leading connection (since the current connection 2377 * has been completely removed from controller memory). 2378 * 2379 * RAMROD_CMD_ID_ETH_PORT_DEL 2380 * Used to tear down the leading connection prior to driver unload, 2381 * typically fp[0]. Completes as an index increment of the CSTORM on the 2382 * default status block. 2383 * 2384 * RAMROD_CMD_ID_ETH_FORWARD_SETUP 2385 * Used for connection offload. Completes on the RCQ of the multi-queue 2386 * RSS connection that is being offloaded. (Not currently used under 2387 * FreeBSD.) 2388 * 2389 * There can only be one command pending per function. 2390 * 2391 * Returns: 2392 * 0 = Success, !0 = Failure. 2393 */ 2394 2395/* must be called under the spq lock */ 2396static inline 2397struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) 2398{ 2399 struct eth_spe *next_spe = sc->spq_prod_bd; 2400 2401 if (sc->spq_prod_bd == sc->spq_last_bd) { 2402 /* wrap back to the first eth_spq */ 2403 sc->spq_prod_bd = sc->spq; 2404 sc->spq_prod_idx = 0; 2405 } else { 2406 sc->spq_prod_bd++; 2407 sc->spq_prod_idx++; 2408 } 2409 2410 return (next_spe); 2411} 2412 2413/* must be called under the spq lock */ 2414static inline 2415void bxe_sp_prod_update(struct bxe_softc *sc) 2416{ 2417 int func = SC_FUNC(sc); 2418 2419 /* 2420 * Make sure that BD data is updated before writing the producer. 2421 * BD data is written to the memory, the producer is read from the 2422 * memory, thus we need a full memory barrier to ensure the ordering. 2423 */ 2424 mb(); 2425 2426 REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), 2427 sc->spq_prod_idx); 2428 2429 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 2430 BUS_SPACE_BARRIER_WRITE); 2431} 2432 2433/** 2434 * bxe_is_contextless_ramrod - check if the current command ends on EQ 2435 * 2436 * @cmd: command to check 2437 * @cmd_type: command type 2438 */ 2439static inline 2440int bxe_is_contextless_ramrod(int cmd, 2441 int cmd_type) 2442{ 2443 if ((cmd_type == NONE_CONNECTION_TYPE) || 2444 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 2445 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 2446 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 2447 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 2448 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 2449 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { 2450 return (TRUE); 2451 } else { 2452 return (FALSE); 2453 } 2454} 2455 2456/** 2457 * bxe_sp_post - place a single command on an SP ring 2458 * 2459 * @sc: driver handle 2460 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 2461 * @cid: SW CID the command is related to 2462 * @data_hi: command private data address (high 32 bits) 2463 * @data_lo: command private data address (low 32 bits) 2464 * @cmd_type: command type (e.g. NONE, ETH) 2465 * 2466 * SP data is handled as if it's always an address pair, thus data fields are 2467 * not swapped to little endian in upper functions. Instead this function swaps 2468 * data as if it's two uint32 fields. 2469 */ 2470int 2471bxe_sp_post(struct bxe_softc *sc, 2472 int command, 2473 int cid, 2474 uint32_t data_hi, 2475 uint32_t data_lo, 2476 int cmd_type) 2477{ 2478 struct eth_spe *spe; 2479 uint16_t type; 2480 int common; 2481 2482 common = bxe_is_contextless_ramrod(command, cmd_type); 2483 2484 BXE_SP_LOCK(sc); 2485 2486 if (common) { 2487 if (!atomic_load_acq_long(&sc->eq_spq_left)) { 2488 BLOGE(sc, "EQ ring is full!\n"); 2489 BXE_SP_UNLOCK(sc); 2490 return (-1); 2491 } 2492 } else { 2493 if (!atomic_load_acq_long(&sc->cq_spq_left)) { 2494 BLOGE(sc, "SPQ ring is full!\n"); 2495 BXE_SP_UNLOCK(sc); 2496 return (-1); 2497 } 2498 } 2499 2500 spe = bxe_sp_get_next(sc); 2501 2502 /* CID needs port number to be encoded int it */ 2503 spe->hdr.conn_and_cmd_data = 2504 htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); 2505 2506 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 2507 2508 /* TBD: Check if it works for VFs */ 2509 type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & 2510 SPE_HDR_FUNCTION_ID); 2511 2512 spe->hdr.type = htole16(type); 2513 2514 spe->data.update_data_addr.hi = htole32(data_hi); 2515 spe->data.update_data_addr.lo = htole32(data_lo); 2516 2517 /* 2518 * It's ok if the actual decrement is issued towards the memory 2519 * somewhere between the lock and unlock. Thus no more explict 2520 * memory barrier is needed. 2521 */ 2522 if (common) { 2523 atomic_subtract_acq_long(&sc->eq_spq_left, 1); 2524 } else { 2525 atomic_subtract_acq_long(&sc->cq_spq_left, 1); 2526 } 2527 2528 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); 2529 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", 2530 BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); 2531 BLOGD(sc, DBG_SP, 2532 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", 2533 sc->spq_prod_idx, 2534 (uint32_t)U64_HI(sc->spq_dma.paddr), 2535 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), 2536 command, 2537 common, 2538 HW_CID(sc, cid), 2539 data_hi, 2540 data_lo, 2541 type, 2542 atomic_load_acq_long(&sc->cq_spq_left), 2543 atomic_load_acq_long(&sc->eq_spq_left)); 2544 2545 bxe_sp_prod_update(sc); 2546 2547 BXE_SP_UNLOCK(sc); 2548 return (0); 2549} 2550 2551/** 2552 * bxe_debug_print_ind_table - prints the indirection table configuration. 2553 * 2554 * @sc: driver hanlde 2555 * @p: pointer to rss configuration 2556 */ 2557#if 0 2558static void 2559bxe_debug_print_ind_table(struct bxe_softc *sc, 2560 struct ecore_config_rss_params *p) 2561{ 2562 int i; 2563 2564 BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n"); 2565 BLOGD(sc, DBG_LOAD, " 0x0000: "); 2566 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { 2567 BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]); 2568 2569 /* Print 4 bytes in a line */ 2570 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && 2571 (((i + 1) & 0x3) == 0)) { 2572 BLOGD(sc, DBG_LOAD, "\n"); 2573 BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1); 2574 } 2575 } 2576 2577 BLOGD(sc, DBG_LOAD, "\n"); 2578} 2579#endif 2580 2581/* 2582 * FreeBSD Device probe function. 2583 * 2584 * Compares the device found to the driver's list of supported devices and 2585 * reports back to the bsd loader whether this is the right driver for the device. 2586 * This is the driver entry function called from the "kldload" command. 2587 * 2588 * Returns: 2589 * BUS_PROBE_DEFAULT on success, positive value on failure. 2590 */ 2591static int 2592bxe_probe(device_t dev) 2593{ 2594 struct bxe_softc *sc; 2595 struct bxe_device_type *t; 2596 char *descbuf; 2597 uint16_t did, sdid, svid, vid; 2598 2599 /* Find our device structure */ 2600 sc = device_get_softc(dev); 2601 sc->dev = dev; 2602 t = bxe_devs; 2603 2604 /* Get the data for the device to be probed. */ 2605 vid = pci_get_vendor(dev); 2606 did = pci_get_device(dev); 2607 svid = pci_get_subvendor(dev); 2608 sdid = pci_get_subdevice(dev); 2609 2610 BLOGD(sc, DBG_LOAD, 2611 "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " 2612 "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); 2613 2614 /* Look through the list of known devices for a match. */ 2615 while (t->bxe_name != NULL) { 2616 if ((vid == t->bxe_vid) && (did == t->bxe_did) && 2617 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && 2618 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { 2619 descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); 2620 if (descbuf == NULL) 2621 return (ENOMEM); 2622 2623 /* Print out the device identity. */ 2624 snprintf(descbuf, BXE_DEVDESC_MAX, 2625 "%s (%c%d) BXE v:%s\n", t->bxe_name, 2626 (((pci_read_config(dev, PCIR_REVID, 4) & 2627 0xf0) >> 4) + 'A'), 2628 (pci_read_config(dev, PCIR_REVID, 4) & 0xf), 2629 BXE_DRIVER_VERSION); 2630 2631 device_set_desc_copy(dev, descbuf); 2632 free(descbuf, M_TEMP); 2633 return (BUS_PROBE_DEFAULT); 2634 } 2635 t++; 2636 } 2637 2638 return (ENXIO); 2639} 2640 2641static void 2642bxe_init_mutexes(struct bxe_softc *sc) 2643{ 2644#ifdef BXE_CORE_LOCK_SX 2645 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), 2646 "bxe%d_core_lock", sc->unit); 2647 sx_init(&sc->core_sx, sc->core_sx_name); 2648#else 2649 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), 2650 "bxe%d_core_lock", sc->unit); 2651 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); 2652#endif 2653 2654 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), 2655 "bxe%d_sp_lock", sc->unit); 2656 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); 2657 2658 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), 2659 "bxe%d_dmae_lock", sc->unit); 2660 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); 2661 2662 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), 2663 "bxe%d_phy_lock", sc->unit); 2664 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); 2665 2666 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), 2667 "bxe%d_fwmb_lock", sc->unit); 2668 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); 2669 2670 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), 2671 "bxe%d_print_lock", sc->unit); 2672 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); 2673 2674 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), 2675 "bxe%d_stats_lock", sc->unit); 2676 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); 2677 2678 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), 2679 "bxe%d_mcast_lock", sc->unit); 2680 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); 2681} 2682 2683static void 2684bxe_release_mutexes(struct bxe_softc *sc) 2685{ 2686#ifdef BXE_CORE_LOCK_SX 2687 sx_destroy(&sc->core_sx); 2688#else 2689 if (mtx_initialized(&sc->core_mtx)) { 2690 mtx_destroy(&sc->core_mtx); 2691 } 2692#endif 2693 2694 if (mtx_initialized(&sc->sp_mtx)) { 2695 mtx_destroy(&sc->sp_mtx); 2696 } 2697 2698 if (mtx_initialized(&sc->dmae_mtx)) { 2699 mtx_destroy(&sc->dmae_mtx); 2700 } 2701 2702 if (mtx_initialized(&sc->port.phy_mtx)) { 2703 mtx_destroy(&sc->port.phy_mtx); 2704 } 2705 2706 if (mtx_initialized(&sc->fwmb_mtx)) { 2707 mtx_destroy(&sc->fwmb_mtx); 2708 } 2709 2710 if (mtx_initialized(&sc->print_mtx)) { 2711 mtx_destroy(&sc->print_mtx); 2712 } 2713 2714 if (mtx_initialized(&sc->stats_mtx)) { 2715 mtx_destroy(&sc->stats_mtx); 2716 } 2717 2718 if (mtx_initialized(&sc->mcast_mtx)) { 2719 mtx_destroy(&sc->mcast_mtx); 2720 } 2721} 2722 2723static void 2724bxe_tx_disable(struct bxe_softc* sc) 2725{ 2726 struct ifnet *ifp = sc->ifnet; 2727 2728 /* tell the stack the driver is stopped and TX queue is full */ 2729 if (ifp != NULL) { 2730 ifp->if_drv_flags = 0; 2731 } 2732} 2733 2734static void 2735bxe_drv_pulse(struct bxe_softc *sc) 2736{ 2737 SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, 2738 sc->fw_drv_pulse_wr_seq); 2739} 2740 2741static inline uint16_t 2742bxe_tx_avail(struct bxe_softc *sc, 2743 struct bxe_fastpath *fp) 2744{ 2745 int16_t used; 2746 uint16_t prod; 2747 uint16_t cons; 2748 2749 prod = fp->tx_bd_prod; 2750 cons = fp->tx_bd_cons; 2751 2752 used = SUB_S16(prod, cons); 2753 2754#if 0 2755 KASSERT((used < 0), ("used tx bds < 0")); 2756 KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size")); 2757 KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL), 2758 ("invalid number of tx bds used")); 2759#endif 2760 2761 return (int16_t)(sc->tx_ring_size) - used; 2762} 2763 2764static inline int 2765bxe_tx_queue_has_work(struct bxe_fastpath *fp) 2766{ 2767 uint16_t hw_cons; 2768 2769 mb(); /* status block fields can change */ 2770 hw_cons = le16toh(*fp->tx_cons_sb); 2771 return (hw_cons != fp->tx_pkt_cons); 2772} 2773 2774static inline uint8_t 2775bxe_has_tx_work(struct bxe_fastpath *fp) 2776{ 2777 /* expand this for multi-cos if ever supported */ 2778 return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; 2779} 2780 2781static inline int 2782bxe_has_rx_work(struct bxe_fastpath *fp) 2783{ 2784 uint16_t rx_cq_cons_sb; 2785 2786 mb(); /* status block fields can change */ 2787 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); 2788 if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) 2789 rx_cq_cons_sb++; 2790 return (fp->rx_cq_cons != rx_cq_cons_sb); 2791} 2792 2793static void 2794bxe_sp_event(struct bxe_softc *sc, 2795 struct bxe_fastpath *fp, 2796 union eth_rx_cqe *rr_cqe) 2797{ 2798 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2799 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 2800 enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; 2801 struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 2802 2803 BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", 2804 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); 2805 2806#if 0 2807 /* 2808 * If cid is within VF range, replace the slowpath object with the 2809 * one corresponding to this VF 2810 */ 2811 if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) { 2812 bxe_iov_set_queue_sp_obj(sc, cid, &q_obj); 2813 } 2814#endif 2815 2816 switch (command) { 2817 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 2818 BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); 2819 drv_cmd = ECORE_Q_CMD_UPDATE; 2820 break; 2821 2822 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 2823 BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); 2824 drv_cmd = ECORE_Q_CMD_SETUP; 2825 break; 2826 2827 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 2828 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 2829 drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; 2830 break; 2831 2832 case (RAMROD_CMD_ID_ETH_HALT): 2833 BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); 2834 drv_cmd = ECORE_Q_CMD_HALT; 2835 break; 2836 2837 case (RAMROD_CMD_ID_ETH_TERMINATE): 2838 BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); 2839 drv_cmd = ECORE_Q_CMD_TERMINATE; 2840 break; 2841 2842 case (RAMROD_CMD_ID_ETH_EMPTY): 2843 BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); 2844 drv_cmd = ECORE_Q_CMD_EMPTY; 2845 break; 2846 2847 default: 2848 BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", 2849 command, fp->index); 2850 return; 2851 } 2852 2853 if ((drv_cmd != ECORE_Q_CMD_MAX) && 2854 q_obj->complete_cmd(sc, q_obj, drv_cmd)) { 2855 /* 2856 * q_obj->complete_cmd() failure means that this was 2857 * an unexpected completion. 2858 * 2859 * In this case we don't want to increase the sc->spq_left 2860 * because apparently we haven't sent this command the first 2861 * place. 2862 */ 2863 // bxe_panic(sc, ("Unexpected SP completion\n")); 2864 return; 2865 } 2866 2867#if 0 2868 /* SRIOV: reschedule any 'in_progress' operations */ 2869 bxe_iov_sp_event(sc, cid, TRUE); 2870#endif 2871 2872 atomic_add_acq_long(&sc->cq_spq_left, 1); 2873 2874 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", 2875 atomic_load_acq_long(&sc->cq_spq_left)); 2876 2877#if 0 2878 if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 2879 (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) { 2880 /* 2881 * If Queue update ramrod is completed for last Queue in AFEX VIF set 2882 * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to 2883 * prevent case that both bits are cleared. At the end of load/unload 2884 * driver checks that sp_state is cleared and this order prevents 2885 * races. 2886 */ 2887 bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state); 2888 wmb(); 2889 bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state); 2890 2891 /* schedule the sp task as MCP ack is required */ 2892 bxe_schedule_sp_task(sc); 2893 } 2894#endif 2895} 2896 2897/* 2898 * The current mbuf is part of an aggregation. Move the mbuf into the TPA 2899 * aggregation queue, put an empty mbuf back onto the receive chain, and mark 2900 * the current aggregation queue as in-progress. 2901 */ 2902static void 2903bxe_tpa_start(struct bxe_softc *sc, 2904 struct bxe_fastpath *fp, 2905 uint16_t queue, 2906 uint16_t cons, 2907 uint16_t prod, 2908 struct eth_fast_path_rx_cqe *cqe) 2909{ 2910 struct bxe_sw_rx_bd tmp_bd; 2911 struct bxe_sw_rx_bd *rx_buf; 2912 struct eth_rx_bd *rx_bd; 2913 int max_agg_queues; 2914 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 2915 uint16_t index; 2916 2917 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " 2918 "cons=%d prod=%d\n", 2919 fp->index, queue, cons, prod); 2920 2921 max_agg_queues = MAX_AGG_QS(sc); 2922 2923 KASSERT((queue < max_agg_queues), 2924 ("fp[%02d] invalid aggr queue (%d >= %d)!", 2925 fp->index, queue, max_agg_queues)); 2926 2927 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), 2928 ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", 2929 fp->index, queue)); 2930 2931 /* copy the existing mbuf and mapping from the TPA pool */ 2932 tmp_bd = tpa_info->bd; 2933 2934 if (tmp_bd.m == NULL) { 2935 BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n", 2936 fp->index, queue); 2937 /* XXX Error handling? */ 2938 return; 2939 } 2940 2941 /* change the TPA queue to the start state */ 2942 tpa_info->state = BXE_TPA_STATE_START; 2943 tpa_info->placement_offset = cqe->placement_offset; 2944 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); 2945 tpa_info->vlan_tag = le16toh(cqe->vlan_tag); 2946 tpa_info->len_on_bd = le16toh(cqe->len_on_bd); 2947 2948 fp->rx_tpa_queue_used |= (1 << queue); 2949 2950 /* 2951 * If all the buffer descriptors are filled with mbufs then fill in 2952 * the current consumer index with a new BD. Else if a maximum Rx 2953 * buffer limit is imposed then fill in the next producer index. 2954 */ 2955 index = (sc->max_rx_bufs != RX_BD_USABLE) ? 2956 prod : cons; 2957 2958 /* move the received mbuf and mapping to TPA pool */ 2959 tpa_info->bd = fp->rx_mbuf_chain[cons]; 2960 2961 /* release any existing RX BD mbuf mappings */ 2962 if (cons != index) { 2963 rx_buf = &fp->rx_mbuf_chain[cons]; 2964 2965 if (rx_buf->m_map != NULL) { 2966 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 2967 BUS_DMASYNC_POSTREAD); 2968 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 2969 } 2970 2971 /* 2972 * We get here when the maximum number of rx buffers is less than 2973 * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL 2974 * it out here without concern of a memory leak. 2975 */ 2976 fp->rx_mbuf_chain[cons].m = NULL; 2977 } 2978 2979 /* update the Rx SW BD with the mbuf info from the TPA pool */ 2980 fp->rx_mbuf_chain[index] = tmp_bd; 2981 2982 /* update the Rx BD with the empty mbuf phys address from the TPA pool */ 2983 rx_bd = &fp->rx_chain[index]; 2984 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); 2985 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); 2986} 2987 2988/* 2989 * When a TPA aggregation is completed, loop through the individual mbufs 2990 * of the aggregation, combining them into a single mbuf which will be sent 2991 * up the stack. Refill all freed SGEs with mbufs as we go along. 2992 */ 2993static int 2994bxe_fill_frag_mbuf(struct bxe_softc *sc, 2995 struct bxe_fastpath *fp, 2996 struct bxe_sw_tpa_info *tpa_info, 2997 uint16_t queue, 2998 uint16_t pages, 2999 struct mbuf *m, 3000 struct eth_end_agg_rx_cqe *cqe, 3001 uint16_t cqe_idx) 3002{ 3003 struct mbuf *m_frag; 3004 uint32_t frag_len, frag_size, i; 3005 uint16_t sge_idx; 3006 int rc = 0; 3007 int j; 3008 3009 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; 3010 3011 BLOGD(sc, DBG_LRO, 3012 "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", 3013 fp->index, queue, tpa_info->len_on_bd, frag_size, pages); 3014 3015 /* make sure the aggregated frame is not too big to handle */ 3016 if (pages > 8 * PAGES_PER_SGE) { 3017 BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " 3018 "pkt_len=%d len_on_bd=%d frag_size=%d\n", 3019 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), 3020 tpa_info->len_on_bd, frag_size); 3021 bxe_panic(sc, ("sge page count error\n")); 3022 return (EINVAL); 3023 } 3024 3025 /* 3026 * Scan through the scatter gather list pulling individual mbufs into a 3027 * single mbuf for the host stack. 3028 */ 3029 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 3030 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); 3031 3032 /* 3033 * Firmware gives the indices of the SGE as if the ring is an array 3034 * (meaning that the "next" element will consume 2 indices). 3035 */ 3036 frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); 3037 3038 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " 3039 "sge_idx=%d frag_size=%d frag_len=%d\n", 3040 fp->index, queue, i, j, sge_idx, frag_size, frag_len); 3041 3042 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; 3043 3044 /* allocate a new mbuf for the SGE */ 3045 rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); 3046 if (rc) { 3047 /* Leave all remaining SGEs in the ring! */ 3048 return (rc); 3049 } 3050 3051 /* update the fragment length */ 3052 m_frag->m_len = frag_len; 3053 3054 /* concatenate the fragment to the head mbuf */ 3055 m_cat(m, m_frag); 3056 fp->eth_q_stats.mbuf_alloc_sge--; 3057 3058 /* update the TPA mbuf size and remaining fragment size */ 3059 m->m_pkthdr.len += frag_len; 3060 frag_size -= frag_len; 3061 } 3062 3063 BLOGD(sc, DBG_LRO, 3064 "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", 3065 fp->index, queue, frag_size); 3066 3067 return (rc); 3068} 3069 3070static inline void 3071bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) 3072{ 3073 int i, j; 3074 3075 for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { 3076 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; 3077 3078 for (j = 0; j < 2; j++) { 3079 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 3080 idx--; 3081 } 3082 } 3083} 3084 3085static inline void 3086bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) 3087{ 3088 /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ 3089 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 3090 3091 /* 3092 * Clear the two last indices in the page to 1. These are the indices that 3093 * correspond to the "next" element, hence will never be indicated and 3094 * should be removed from the calculations. 3095 */ 3096 bxe_clear_sge_mask_next_elems(fp); 3097} 3098 3099static inline void 3100bxe_update_last_max_sge(struct bxe_fastpath *fp, 3101 uint16_t idx) 3102{ 3103 uint16_t last_max = fp->last_max_sge; 3104 3105 if (SUB_S16(idx, last_max) > 0) { 3106 fp->last_max_sge = idx; 3107 } 3108} 3109 3110static inline void 3111bxe_update_sge_prod(struct bxe_softc *sc, 3112 struct bxe_fastpath *fp, 3113 uint16_t sge_len, 3114 struct eth_end_agg_rx_cqe *cqe) 3115{ 3116 uint16_t last_max, last_elem, first_elem; 3117 uint16_t delta = 0; 3118 uint16_t i; 3119 3120 if (!sge_len) { 3121 return; 3122 } 3123 3124 /* first mark all used pages */ 3125 for (i = 0; i < sge_len; i++) { 3126 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 3127 RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[i]))); 3128 } 3129 3130 BLOGD(sc, DBG_LRO, 3131 "fp[%02d] fp_cqe->sgl[%d] = %d\n", 3132 fp->index, sge_len - 1, 3133 le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 3134 3135 /* assume that the last SGE index is the biggest */ 3136 bxe_update_last_max_sge(fp, 3137 le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1])); 3138 3139 last_max = RX_SGE(fp->last_max_sge); 3140 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 3141 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 3142 3143 /* if ring is not full */ 3144 if (last_elem + 1 != first_elem) { 3145 last_elem++; 3146 } 3147 3148 /* now update the prod */ 3149 for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { 3150 if (__predict_true(fp->sge_mask[i])) { 3151 break; 3152 } 3153 3154 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 3155 delta += BIT_VEC64_ELEM_SZ; 3156 } 3157 3158 if (delta > 0) { 3159 fp->rx_sge_prod += delta; 3160 /* clear page-end entries */ 3161 bxe_clear_sge_mask_next_elems(fp); 3162 } 3163 3164 BLOGD(sc, DBG_LRO, 3165 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", 3166 fp->index, fp->last_max_sge, fp->rx_sge_prod); 3167} 3168 3169/* 3170 * The aggregation on the current TPA queue has completed. Pull the individual 3171 * mbuf fragments together into a single mbuf, perform all necessary checksum 3172 * calculations, and send the resuting mbuf to the stack. 3173 */ 3174static void 3175bxe_tpa_stop(struct bxe_softc *sc, 3176 struct bxe_fastpath *fp, 3177 struct bxe_sw_tpa_info *tpa_info, 3178 uint16_t queue, 3179 uint16_t pages, 3180 struct eth_end_agg_rx_cqe *cqe, 3181 uint16_t cqe_idx) 3182{ 3183 struct ifnet *ifp = sc->ifnet; 3184 struct mbuf *m; 3185 int rc = 0; 3186 3187 BLOGD(sc, DBG_LRO, 3188 "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", 3189 fp->index, queue, tpa_info->placement_offset, 3190 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); 3191 3192 m = tpa_info->bd.m; 3193 3194 /* allocate a replacement before modifying existing mbuf */ 3195 rc = bxe_alloc_rx_tpa_mbuf(fp, queue); 3196 if (rc) { 3197 /* drop the frame and log an error */ 3198 fp->eth_q_stats.rx_soft_errors++; 3199 goto bxe_tpa_stop_exit; 3200 } 3201 3202 /* we have a replacement, fixup the current mbuf */ 3203 m_adj(m, tpa_info->placement_offset); 3204 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; 3205 3206 /* mark the checksums valid (taken care of by the firmware) */ 3207 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3208 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3209 m->m_pkthdr.csum_data = 0xffff; 3210 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 3211 CSUM_IP_VALID | 3212 CSUM_DATA_VALID | 3213 CSUM_PSEUDO_HDR); 3214 3215 /* aggregate all of the SGEs into a single mbuf */ 3216 rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); 3217 if (rc) { 3218 /* drop the packet and log an error */ 3219 fp->eth_q_stats.rx_soft_errors++; 3220 m_freem(m); 3221 } else { 3222 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) { 3223 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; 3224 m->m_flags |= M_VLANTAG; 3225 } 3226 3227 /* assign packet to this interface interface */ 3228 m->m_pkthdr.rcvif = ifp; 3229 3230#if __FreeBSD_version >= 800000 3231 /* specify what RSS queue was used for this flow */ 3232 m->m_pkthdr.flowid = fp->index; 3233 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3234#endif 3235 3236 ifp->if_ipackets++; 3237 fp->eth_q_stats.rx_tpa_pkts++; 3238 3239 /* pass the frame to the stack */ 3240 (*ifp->if_input)(ifp, m); 3241 } 3242 3243 /* we passed an mbuf up the stack or dropped the frame */ 3244 fp->eth_q_stats.mbuf_alloc_tpa--; 3245 3246bxe_tpa_stop_exit: 3247 3248 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; 3249 fp->rx_tpa_queue_used &= ~(1 << queue); 3250} 3251 3252static uint8_t 3253bxe_rxeof(struct bxe_softc *sc, 3254 struct bxe_fastpath *fp) 3255{ 3256 struct ifnet *ifp = sc->ifnet; 3257 uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 3258 uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; 3259 int rx_pkts = 0; 3260 int rc = 0; 3261 3262 BXE_FP_RX_LOCK(fp); 3263 3264 /* CQ "next element" is of the size of the regular element */ 3265 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); 3266 if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { 3267 hw_cq_cons++; 3268 } 3269 3270 bd_cons = fp->rx_bd_cons; 3271 bd_prod = fp->rx_bd_prod; 3272 bd_prod_fw = bd_prod; 3273 sw_cq_cons = fp->rx_cq_cons; 3274 sw_cq_prod = fp->rx_cq_prod; 3275 3276 /* 3277 * Memory barrier necessary as speculative reads of the rx 3278 * buffer can be ahead of the index in the status block 3279 */ 3280 rmb(); 3281 3282 BLOGD(sc, DBG_RX, 3283 "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", 3284 fp->index, hw_cq_cons, sw_cq_cons); 3285 3286 while (sw_cq_cons != hw_cq_cons) { 3287 struct bxe_sw_rx_bd *rx_buf = NULL; 3288 union eth_rx_cqe *cqe; 3289 struct eth_fast_path_rx_cqe *cqe_fp; 3290 uint8_t cqe_fp_flags; 3291 enum eth_rx_cqe_type cqe_fp_type; 3292 uint16_t len, pad; 3293 struct mbuf *m = NULL; 3294 3295 comp_ring_cons = RCQ(sw_cq_cons); 3296 bd_prod = RX_BD(bd_prod); 3297 bd_cons = RX_BD(bd_cons); 3298 3299 cqe = &fp->rcq_chain[comp_ring_cons]; 3300 cqe_fp = &cqe->fast_path_cqe; 3301 cqe_fp_flags = cqe_fp->type_error_flags; 3302 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 3303 3304 BLOGD(sc, DBG_RX, 3305 "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " 3306 "BD prod=%d cons=%d CQE type=0x%x err=0x%x " 3307 "status=0x%x rss_hash=0x%x vlan=0x%x len=%u\n", 3308 fp->index, 3309 hw_cq_cons, 3310 sw_cq_cons, 3311 bd_prod, 3312 bd_cons, 3313 CQE_TYPE(cqe_fp_flags), 3314 cqe_fp_flags, 3315 cqe_fp->status_flags, 3316 le32toh(cqe_fp->rss_hash_result), 3317 le16toh(cqe_fp->vlan_tag), 3318 le16toh(cqe_fp->pkt_len_or_gro_seg_len)); 3319 3320 /* is this a slowpath msg? */ 3321 if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { 3322 bxe_sp_event(sc, fp, cqe); 3323 goto next_cqe; 3324 } 3325 3326 rx_buf = &fp->rx_mbuf_chain[bd_cons]; 3327 3328 if (!CQE_TYPE_FAST(cqe_fp_type)) { 3329 struct bxe_sw_tpa_info *tpa_info; 3330 uint16_t frag_size, pages; 3331 uint8_t queue; 3332 3333#if 0 3334 /* sanity check */ 3335 if (!fp->tpa_enable && 3336 (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) { 3337 BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n", 3338 CQE_TYPE(cqe_fp_type)); 3339 } 3340#endif 3341 3342 if (CQE_TYPE_START(cqe_fp_type)) { 3343 bxe_tpa_start(sc, fp, cqe_fp->queue_index, 3344 bd_cons, bd_prod, cqe_fp); 3345 m = NULL; /* packet not ready yet */ 3346 goto next_rx; 3347 } 3348 3349 KASSERT(CQE_TYPE_STOP(cqe_fp_type), 3350 ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); 3351 3352 queue = cqe->end_agg_cqe.queue_index; 3353 tpa_info = &fp->rx_tpa_info[queue]; 3354 3355 BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", 3356 fp->index, queue); 3357 3358 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - 3359 tpa_info->len_on_bd); 3360 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 3361 3362 bxe_tpa_stop(sc, fp, tpa_info, queue, pages, 3363 &cqe->end_agg_cqe, comp_ring_cons); 3364 3365 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe); 3366 3367 goto next_cqe; 3368 } 3369 3370 /* non TPA */ 3371 3372 /* is this an error packet? */ 3373 if (__predict_false(cqe_fp_flags & 3374 ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { 3375 BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); 3376 fp->eth_q_stats.rx_soft_errors++; 3377 goto next_rx; 3378 } 3379 3380 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); 3381 pad = cqe_fp->placement_offset; 3382 3383 m = rx_buf->m; 3384 3385 if (__predict_false(m == NULL)) { 3386 BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", 3387 bd_cons, fp->index); 3388 goto next_rx; 3389 } 3390 3391 /* XXX double copy if packet length under a threshold */ 3392 3393 /* 3394 * If all the buffer descriptors are filled with mbufs then fill in 3395 * the current consumer index with a new BD. Else if a maximum Rx 3396 * buffer limit is imposed then fill in the next producer index. 3397 */ 3398 rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, 3399 (sc->max_rx_bufs != RX_BD_USABLE) ? 3400 bd_prod : bd_cons); 3401 if (rc != 0) { 3402 3403 /* we simply reuse the received mbuf and don't post it to the stack */ 3404 m = NULL; 3405 3406 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 3407 fp->index, rc); 3408 fp->eth_q_stats.rx_soft_errors++; 3409 3410 if (sc->max_rx_bufs != RX_BD_USABLE) { 3411 /* copy this consumer index to the producer index */ 3412 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, 3413 sizeof(struct bxe_sw_rx_bd)); 3414 memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); 3415 } 3416 3417 goto next_rx; 3418 } 3419 3420 /* current mbuf was detached from the bd */ 3421 fp->eth_q_stats.mbuf_alloc_rx--; 3422 3423 /* we allocated a replacement mbuf, fixup the current one */ 3424 m_adj(m, pad); 3425 m->m_pkthdr.len = m->m_len = len; 3426 3427 /* assign packet to this interface interface */ 3428 m->m_pkthdr.rcvif = ifp; 3429 3430 /* assume no hardware checksum has complated */ 3431 m->m_pkthdr.csum_flags = 0; 3432 3433 /* validate checksum if offload enabled */ 3434 if (ifp->if_capenable & IFCAP_RXCSUM) { 3435 /* check for a valid IP frame */ 3436 if (!(cqe->fast_path_cqe.status_flags & 3437 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { 3438 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3439 if (__predict_false(cqe_fp_flags & 3440 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { 3441 fp->eth_q_stats.rx_hw_csum_errors++; 3442 } else { 3443 fp->eth_q_stats.rx_ofld_frames_csum_ip++; 3444 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3445 } 3446 } 3447 3448 /* check for a valid TCP/UDP frame */ 3449 if (!(cqe->fast_path_cqe.status_flags & 3450 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { 3451 if (__predict_false(cqe_fp_flags & 3452 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { 3453 fp->eth_q_stats.rx_hw_csum_errors++; 3454 } else { 3455 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; 3456 m->m_pkthdr.csum_data = 0xFFFF; 3457 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | 3458 CSUM_PSEUDO_HDR); 3459 } 3460 } 3461 } 3462 3463 /* if there is a VLAN tag then flag that info */ 3464 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) { 3465 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; 3466 m->m_flags |= M_VLANTAG; 3467 } 3468 3469#if __FreeBSD_version >= 800000 3470 /* specify what RSS queue was used for this flow */ 3471 m->m_pkthdr.flowid = fp->index; 3472 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3473#endif 3474 3475next_rx: 3476 3477 bd_cons = RX_BD_NEXT(bd_cons); 3478 bd_prod = RX_BD_NEXT(bd_prod); 3479 bd_prod_fw = RX_BD_NEXT(bd_prod_fw); 3480 3481 /* pass the frame to the stack */ 3482 if (__predict_true(m != NULL)) { 3483 ifp->if_ipackets++; 3484 rx_pkts++; 3485 (*ifp->if_input)(ifp, m); 3486 } 3487 3488next_cqe: 3489 3490 sw_cq_prod = RCQ_NEXT(sw_cq_prod); 3491 sw_cq_cons = RCQ_NEXT(sw_cq_cons); 3492 3493 /* limit spinning on the queue */ 3494 if (rc != 0) 3495 break; 3496 3497 if (rx_pkts == sc->rx_budget) { 3498 fp->eth_q_stats.rx_budget_reached++; 3499 break; 3500 } 3501 } /* while work to do */ 3502 3503 fp->rx_bd_cons = bd_cons; 3504 fp->rx_bd_prod = bd_prod_fw; 3505 fp->rx_cq_cons = sw_cq_cons; 3506 fp->rx_cq_prod = sw_cq_prod; 3507 3508 /* Update producers */ 3509 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); 3510 3511 fp->eth_q_stats.rx_pkts += rx_pkts; 3512 fp->eth_q_stats.rx_calls++; 3513 3514 BXE_FP_RX_UNLOCK(fp); 3515 3516 return (sw_cq_cons != hw_cq_cons); 3517} 3518 3519static uint16_t 3520bxe_free_tx_pkt(struct bxe_softc *sc, 3521 struct bxe_fastpath *fp, 3522 uint16_t idx) 3523{ 3524 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; 3525 struct eth_tx_start_bd *tx_start_bd; 3526 uint16_t bd_idx = TX_BD(tx_buf->first_bd); 3527 uint16_t new_cons; 3528 int nbd; 3529 3530 /* unmap the mbuf from non-paged memory */ 3531 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 3532 3533 tx_start_bd = &fp->tx_chain[bd_idx].start_bd; 3534 nbd = le16toh(tx_start_bd->nbd) - 1; 3535 3536#if 0 3537 if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) { 3538 bxe_panic(sc, ("BAD nbd!\n")); 3539 } 3540#endif 3541 3542 new_cons = (tx_buf->first_bd + nbd); 3543 3544#if 0 3545 struct eth_tx_bd *tx_data_bd; 3546 3547 /* 3548 * The following code doesn't do anything but is left here 3549 * for clarity on what the new value of new_cons skipped. 3550 */ 3551 3552 /* get the next bd */ 3553 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3554 3555 /* skip the parse bd */ 3556 --nbd; 3557 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3558 3559 /* skip the TSO split header bd since they have no mapping */ 3560 if (tx_buf->flags & BXE_TSO_SPLIT_BD) { 3561 --nbd; 3562 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3563 } 3564 3565 /* now free frags */ 3566 while (nbd > 0) { 3567 tx_data_bd = &fp->tx_chain[bd_idx].reg_bd; 3568 if (--nbd) { 3569 bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); 3570 } 3571 } 3572#endif 3573 3574 /* free the mbuf */ 3575 if (__predict_true(tx_buf->m != NULL)) { 3576 m_freem(tx_buf->m); 3577 fp->eth_q_stats.mbuf_alloc_tx--; 3578 } else { 3579 fp->eth_q_stats.tx_chain_lost_mbuf++; 3580 } 3581 3582 tx_buf->m = NULL; 3583 tx_buf->first_bd = 0; 3584 3585 return (new_cons); 3586} 3587 3588/* transmit timeout watchdog */ 3589static int 3590bxe_watchdog(struct bxe_softc *sc, 3591 struct bxe_fastpath *fp) 3592{ 3593 BXE_FP_TX_LOCK(fp); 3594 3595 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { 3596 BXE_FP_TX_UNLOCK(fp); 3597 return (0); 3598 } 3599 3600 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); 3601 3602 BXE_FP_TX_UNLOCK(fp); 3603 3604 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 3605 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 3606 3607 return (-1); 3608} 3609 3610/* processes transmit completions */ 3611static uint8_t 3612bxe_txeof(struct bxe_softc *sc, 3613 struct bxe_fastpath *fp) 3614{ 3615 struct ifnet *ifp = sc->ifnet; 3616 uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; 3617 uint16_t tx_bd_avail; 3618 3619 BXE_FP_TX_LOCK_ASSERT(fp); 3620 3621 bd_cons = fp->tx_bd_cons; 3622 hw_cons = le16toh(*fp->tx_cons_sb); 3623 sw_cons = fp->tx_pkt_cons; 3624 3625 while (sw_cons != hw_cons) { 3626 pkt_cons = TX_BD(sw_cons); 3627 3628 BLOGD(sc, DBG_TX, 3629 "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", 3630 fp->index, hw_cons, sw_cons, pkt_cons); 3631 3632 bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); 3633 3634 sw_cons++; 3635 } 3636 3637 fp->tx_pkt_cons = sw_cons; 3638 fp->tx_bd_cons = bd_cons; 3639 3640 BLOGD(sc, DBG_TX, 3641 "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", 3642 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); 3643 3644 mb(); 3645 3646 tx_bd_avail = bxe_tx_avail(sc, fp); 3647 3648 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 3649 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3650 } else { 3651 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3652 } 3653 3654 if (fp->tx_pkt_prod != fp->tx_pkt_cons) { 3655 /* reset the watchdog timer if there are pending transmits */ 3656 fp->watchdog_timer = BXE_TX_TIMEOUT; 3657 return (TRUE); 3658 } else { 3659 /* clear watchdog when there are no pending transmits */ 3660 fp->watchdog_timer = 0; 3661 return (FALSE); 3662 } 3663} 3664 3665static void 3666bxe_drain_tx_queues(struct bxe_softc *sc) 3667{ 3668 struct bxe_fastpath *fp; 3669 int i, count; 3670 3671 /* wait until all TX fastpath tasks have completed */ 3672 for (i = 0; i < sc->num_queues; i++) { 3673 fp = &sc->fp[i]; 3674 3675 count = 1000; 3676 3677 while (bxe_has_tx_work(fp)) { 3678 3679 BXE_FP_TX_LOCK(fp); 3680 bxe_txeof(sc, fp); 3681 BXE_FP_TX_UNLOCK(fp); 3682 3683 if (count == 0) { 3684 BLOGE(sc, "Timeout waiting for fp[%d] " 3685 "transmits to complete!\n", i); 3686 bxe_panic(sc, ("tx drain failure\n")); 3687 return; 3688 } 3689 3690 count--; 3691 DELAY(1000); 3692 rmb(); 3693 } 3694 } 3695 3696 return; 3697} 3698 3699static int 3700bxe_del_all_macs(struct bxe_softc *sc, 3701 struct ecore_vlan_mac_obj *mac_obj, 3702 int mac_type, 3703 uint8_t wait_for_comp) 3704{ 3705 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 3706 int rc; 3707 3708 /* wait for completion of requested */ 3709 if (wait_for_comp) { 3710 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3711 } 3712 3713 /* Set the mac type of addresses we want to clear */ 3714 bxe_set_bit(mac_type, &vlan_mac_flags); 3715 3716 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); 3717 if (rc < 0) { 3718 BLOGE(sc, "Failed to delete MACs (%d)\n", rc); 3719 } 3720 3721 return (rc); 3722} 3723 3724static int 3725bxe_fill_accept_flags(struct bxe_softc *sc, 3726 uint32_t rx_mode, 3727 unsigned long *rx_accept_flags, 3728 unsigned long *tx_accept_flags) 3729{ 3730 /* Clear the flags first */ 3731 *rx_accept_flags = 0; 3732 *tx_accept_flags = 0; 3733 3734 switch (rx_mode) { 3735 case BXE_RX_MODE_NONE: 3736 /* 3737 * 'drop all' supersedes any accept flags that may have been 3738 * passed to the function. 3739 */ 3740 break; 3741 3742 case BXE_RX_MODE_NORMAL: 3743 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3744 bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); 3745 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3746 3747 /* internal switching mode */ 3748 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3749 bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); 3750 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3751 3752 break; 3753 3754 case BXE_RX_MODE_ALLMULTI: 3755 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3756 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3757 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3758 3759 /* internal switching mode */ 3760 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3761 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3762 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3763 3764 break; 3765 3766 case BXE_RX_MODE_PROMISC: 3767 /* 3768 * According to deffinition of SI mode, iface in promisc mode 3769 * should receive matched and unmatched (in resolution of port) 3770 * unicast packets. 3771 */ 3772 bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); 3773 bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); 3774 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); 3775 bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); 3776 3777 /* internal switching mode */ 3778 bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); 3779 bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); 3780 3781 if (IS_MF_SI(sc)) { 3782 bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); 3783 } else { 3784 bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); 3785 } 3786 3787 break; 3788 3789 default: 3790 BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode); 3791 return (-1); 3792 } 3793 3794 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 3795 if (rx_mode != BXE_RX_MODE_NONE) { 3796 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); 3797 bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); 3798 } 3799 3800 return (0); 3801} 3802 3803static int 3804bxe_set_q_rx_mode(struct bxe_softc *sc, 3805 uint8_t cl_id, 3806 unsigned long rx_mode_flags, 3807 unsigned long rx_accept_flags, 3808 unsigned long tx_accept_flags, 3809 unsigned long ramrod_flags) 3810{ 3811 struct ecore_rx_mode_ramrod_params ramrod_param; 3812 int rc; 3813 3814 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3815 3816 /* Prepare ramrod parameters */ 3817 ramrod_param.cid = 0; 3818 ramrod_param.cl_id = cl_id; 3819 ramrod_param.rx_mode_obj = &sc->rx_mode_obj; 3820 ramrod_param.func_id = SC_FUNC(sc); 3821 3822 ramrod_param.pstate = &sc->sp_state; 3823 ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; 3824 3825 ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); 3826 ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); 3827 3828 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 3829 3830 ramrod_param.ramrod_flags = ramrod_flags; 3831 ramrod_param.rx_mode_flags = rx_mode_flags; 3832 3833 ramrod_param.rx_accept_flags = rx_accept_flags; 3834 ramrod_param.tx_accept_flags = tx_accept_flags; 3835 3836 rc = ecore_config_rx_mode(sc, &ramrod_param); 3837 if (rc < 0) { 3838 BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode); 3839 return (rc); 3840 } 3841 3842 return (0); 3843} 3844 3845static int 3846bxe_set_storm_rx_mode(struct bxe_softc *sc) 3847{ 3848 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 3849 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 3850 int rc; 3851 3852 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, 3853 &tx_accept_flags); 3854 if (rc) { 3855 return (rc); 3856 } 3857 3858 bxe_set_bit(RAMROD_RX, &ramrod_flags); 3859 bxe_set_bit(RAMROD_TX, &ramrod_flags); 3860 3861 /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ 3862 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, 3863 rx_accept_flags, tx_accept_flags, 3864 ramrod_flags)); 3865} 3866 3867/* returns the "mcp load_code" according to global load_count array */ 3868static int 3869bxe_nic_load_no_mcp(struct bxe_softc *sc) 3870{ 3871 int path = SC_PATH(sc); 3872 int port = SC_PORT(sc); 3873 3874 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3875 path, load_count[path][0], load_count[path][1], 3876 load_count[path][2]); 3877 load_count[path][0]++; 3878 load_count[path][1 + port]++; 3879 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3880 path, load_count[path][0], load_count[path][1], 3881 load_count[path][2]); 3882 if (load_count[path][0] == 1) { 3883 return (FW_MSG_CODE_DRV_LOAD_COMMON); 3884 } else if (load_count[path][1 + port] == 1) { 3885 return (FW_MSG_CODE_DRV_LOAD_PORT); 3886 } else { 3887 return (FW_MSG_CODE_DRV_LOAD_FUNCTION); 3888 } 3889} 3890 3891/* returns the "mcp load_code" according to global load_count array */ 3892static int 3893bxe_nic_unload_no_mcp(struct bxe_softc *sc) 3894{ 3895 int port = SC_PORT(sc); 3896 int path = SC_PATH(sc); 3897 3898 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", 3899 path, load_count[path][0], load_count[path][1], 3900 load_count[path][2]); 3901 load_count[path][0]--; 3902 load_count[path][1 + port]--; 3903 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", 3904 path, load_count[path][0], load_count[path][1], 3905 load_count[path][2]); 3906 if (load_count[path][0] == 0) { 3907 return (FW_MSG_CODE_DRV_UNLOAD_COMMON); 3908 } else if (load_count[path][1 + port] == 0) { 3909 return (FW_MSG_CODE_DRV_UNLOAD_PORT); 3910 } else { 3911 return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); 3912 } 3913} 3914 3915/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ 3916static uint32_t 3917bxe_send_unload_req(struct bxe_softc *sc, 3918 int unload_mode) 3919{ 3920 uint32_t reset_code = 0; 3921#if 0 3922 int port = SC_PORT(sc); 3923 int path = SC_PATH(sc); 3924#endif 3925 3926 /* Select the UNLOAD request mode */ 3927 if (unload_mode == UNLOAD_NORMAL) { 3928 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3929 } 3930#if 0 3931 else if (sc->flags & BXE_NO_WOL_FLAG) { 3932 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 3933 } else if (sc->wol) { 3934 uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 3935 uint8_t *mac_addr = sc->dev->dev_addr; 3936 uint32_t val; 3937 uint16_t pmc; 3938 3939 /* 3940 * The mac address is written to entries 1-4 to 3941 * preserve entry 0 which is used by the PMF 3942 */ 3943 uint8_t entry = (SC_VN(sc) + 1)*8; 3944 3945 val = (mac_addr[0] << 8) | mac_addr[1]; 3946 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val); 3947 3948 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 3949 (mac_addr[4] << 8) | mac_addr[5]; 3950 EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 3951 3952 /* Enable the PME and clear the status */ 3953 pmc = pci_read_config(sc->dev, 3954 (sc->devinfo.pcie_pm_cap_reg + 3955 PCIR_POWER_STATUS), 3956 2); 3957 pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME; 3958 pci_write_config(sc->dev, 3959 (sc->devinfo.pcie_pm_cap_reg + 3960 PCIR_POWER_STATUS), 3961 pmc, 4); 3962 3963 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 3964 } 3965#endif 3966 else { 3967 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 3968 } 3969 3970 /* Send the request to the MCP */ 3971 if (!BXE_NOMCP(sc)) { 3972 reset_code = bxe_fw_command(sc, reset_code, 0); 3973 } else { 3974 reset_code = bxe_nic_unload_no_mcp(sc); 3975 } 3976 3977 return (reset_code); 3978} 3979 3980/* send UNLOAD_DONE command to the MCP */ 3981static void 3982bxe_send_unload_done(struct bxe_softc *sc, 3983 uint8_t keep_link) 3984{ 3985 uint32_t reset_param = 3986 keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 3987 3988 /* Report UNLOAD_DONE to MCP */ 3989 if (!BXE_NOMCP(sc)) { 3990 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 3991 } 3992} 3993 3994static int 3995bxe_func_wait_started(struct bxe_softc *sc) 3996{ 3997 int tout = 50; 3998 3999 if (!sc->port.pmf) { 4000 return (0); 4001 } 4002 4003 /* 4004 * (assumption: No Attention from MCP at this stage) 4005 * PMF probably in the middle of TX disable/enable transaction 4006 * 1. Sync IRS for default SB 4007 * 2. Sync SP queue - this guarantees us that attention handling started 4008 * 3. Wait, that TX disable/enable transaction completes 4009 * 4010 * 1+2 guarantee that if DCBX attention was scheduled it already changed 4011 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 4012 * received completion for the transaction the state is TX_STOPPED. 4013 * State will return to STARTED after completion of TX_STOPPED-->STARTED 4014 * transaction. 4015 */ 4016 4017 /* XXX make sure default SB ISR is done */ 4018 /* need a way to synchronize an irq (intr_mtx?) */ 4019 4020 /* XXX flush any work queues */ 4021 4022 while (ecore_func_get_state(sc, &sc->func_obj) != 4023 ECORE_F_STATE_STARTED && tout--) { 4024 DELAY(20000); 4025 } 4026 4027 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { 4028 /* 4029 * Failed to complete the transaction in a "good way" 4030 * Force both transactions with CLR bit. 4031 */ 4032 struct ecore_func_state_params func_params = { NULL }; 4033 4034 BLOGE(sc, "Unexpected function state! " 4035 "Forcing STARTED-->TX_STOPPED-->STARTED\n"); 4036 4037 func_params.f_obj = &sc->func_obj; 4038 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4039 4040 /* STARTED-->TX_STOPPED */ 4041 func_params.cmd = ECORE_F_CMD_TX_STOP; 4042 ecore_func_state_change(sc, &func_params); 4043 4044 /* TX_STOPPED-->STARTED */ 4045 func_params.cmd = ECORE_F_CMD_TX_START; 4046 return (ecore_func_state_change(sc, &func_params)); 4047 } 4048 4049 return (0); 4050} 4051 4052static int 4053bxe_stop_queue(struct bxe_softc *sc, 4054 int index) 4055{ 4056 struct bxe_fastpath *fp = &sc->fp[index]; 4057 struct ecore_queue_state_params q_params = { NULL }; 4058 int rc; 4059 4060 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); 4061 4062 q_params.q_obj = &sc->sp_objs[fp->index].q_obj; 4063 /* We want to wait for completion in this context */ 4064 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 4065 4066 /* Stop the primary connection: */ 4067 4068 /* ...halt the connection */ 4069 q_params.cmd = ECORE_Q_CMD_HALT; 4070 rc = ecore_queue_state_change(sc, &q_params); 4071 if (rc) { 4072 return (rc); 4073 } 4074 4075 /* ...terminate the connection */ 4076 q_params.cmd = ECORE_Q_CMD_TERMINATE; 4077 memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); 4078 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 4079 rc = ecore_queue_state_change(sc, &q_params); 4080 if (rc) { 4081 return (rc); 4082 } 4083 4084 /* ...delete cfc entry */ 4085 q_params.cmd = ECORE_Q_CMD_CFC_DEL; 4086 memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); 4087 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 4088 return (ecore_queue_state_change(sc, &q_params)); 4089} 4090 4091/* wait for the outstanding SP commands */ 4092static inline uint8_t 4093bxe_wait_sp_comp(struct bxe_softc *sc, 4094 unsigned long mask) 4095{ 4096 unsigned long tmp; 4097 int tout = 5000; /* wait for 5 secs tops */ 4098 4099 while (tout--) { 4100 mb(); 4101 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { 4102 return (TRUE); 4103 } 4104 4105 DELAY(1000); 4106 } 4107 4108 mb(); 4109 4110 tmp = atomic_load_acq_long(&sc->sp_state); 4111 if (tmp & mask) { 4112 BLOGE(sc, "Filtering completion timed out: " 4113 "sp_state 0x%lx, mask 0x%lx\n", 4114 tmp, mask); 4115 return (FALSE); 4116 } 4117 4118 return (FALSE); 4119} 4120 4121static int 4122bxe_func_stop(struct bxe_softc *sc) 4123{ 4124 struct ecore_func_state_params func_params = { NULL }; 4125 int rc; 4126 4127 /* prepare parameters for function state transitions */ 4128 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4129 func_params.f_obj = &sc->func_obj; 4130 func_params.cmd = ECORE_F_CMD_STOP; 4131 4132 /* 4133 * Try to stop the function the 'good way'. If it fails (in case 4134 * of a parity error during bxe_chip_cleanup()) and we are 4135 * not in a debug mode, perform a state transaction in order to 4136 * enable further HW_RESET transaction. 4137 */ 4138 rc = ecore_func_state_change(sc, &func_params); 4139 if (rc) { 4140 BLOGE(sc, "FUNC_STOP ramrod failed. " 4141 "Running a dry transaction\n"); 4142 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 4143 return (ecore_func_state_change(sc, &func_params)); 4144 } 4145 4146 return (0); 4147} 4148 4149static int 4150bxe_reset_hw(struct bxe_softc *sc, 4151 uint32_t load_code) 4152{ 4153 struct ecore_func_state_params func_params = { NULL }; 4154 4155 /* Prepare parameters for function state transitions */ 4156 bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 4157 4158 func_params.f_obj = &sc->func_obj; 4159 func_params.cmd = ECORE_F_CMD_HW_RESET; 4160 4161 func_params.params.hw_init.load_phase = load_code; 4162 4163 return (ecore_func_state_change(sc, &func_params)); 4164} 4165 4166static void 4167bxe_int_disable_sync(struct bxe_softc *sc, 4168 int disable_hw) 4169{ 4170 if (disable_hw) { 4171 /* prevent the HW from sending interrupts */ 4172 bxe_int_disable(sc); 4173 } 4174 4175 /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ 4176 /* make sure all ISRs are done */ 4177 4178 /* XXX make sure sp_task is not running */ 4179 /* cancel and flush work queues */ 4180} 4181 4182static void 4183bxe_chip_cleanup(struct bxe_softc *sc, 4184 uint32_t unload_mode, 4185 uint8_t keep_link) 4186{ 4187 int port = SC_PORT(sc); 4188 struct ecore_mcast_ramrod_params rparam = { NULL }; 4189 uint32_t reset_code; 4190 int i, rc = 0; 4191 4192 bxe_drain_tx_queues(sc); 4193 4194 /* give HW time to discard old tx messages */ 4195 DELAY(1000); 4196 4197 /* Clean all ETH MACs */ 4198 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); 4199 if (rc < 0) { 4200 BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); 4201 } 4202 4203 /* Clean up UC list */ 4204 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); 4205 if (rc < 0) { 4206 BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); 4207 } 4208 4209 /* Disable LLH */ 4210 if (!CHIP_IS_E1(sc)) { 4211 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 4212 } 4213 4214 /* Set "drop all" to stop Rx */ 4215 4216 /* 4217 * We need to take the BXE_MCAST_LOCK() here in order to prevent 4218 * a race between the completion code and this code. 4219 */ 4220 BXE_MCAST_LOCK(sc); 4221 4222 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 4223 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 4224 } else { 4225 bxe_set_storm_rx_mode(sc); 4226 } 4227 4228 /* Clean up multicast configuration */ 4229 rparam.mcast_obj = &sc->mcast_obj; 4230 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4231 if (rc < 0) { 4232 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4233 } 4234 4235 BXE_MCAST_UNLOCK(sc); 4236 4237 // XXX bxe_iov_chip_cleanup(sc); 4238 4239 /* 4240 * Send the UNLOAD_REQUEST to the MCP. This will return if 4241 * this function should perform FUNCTION, PORT, or COMMON HW 4242 * reset. 4243 */ 4244 reset_code = bxe_send_unload_req(sc, unload_mode); 4245 4246 /* 4247 * (assumption: No Attention from MCP at this stage) 4248 * PMF probably in the middle of TX disable/enable transaction 4249 */ 4250 rc = bxe_func_wait_started(sc); 4251 if (rc) { 4252 BLOGE(sc, "bxe_func_wait_started failed\n"); 4253 } 4254 4255 /* 4256 * Close multi and leading connections 4257 * Completions for ramrods are collected in a synchronous way 4258 */ 4259 for (i = 0; i < sc->num_queues; i++) { 4260 if (bxe_stop_queue(sc, i)) { 4261 goto unload_error; 4262 } 4263 } 4264 4265 /* 4266 * If SP settings didn't get completed so far - something 4267 * very wrong has happen. 4268 */ 4269 if (!bxe_wait_sp_comp(sc, ~0x0UL)) { 4270 BLOGE(sc, "Common slow path ramrods got stuck!\n"); 4271 } 4272 4273unload_error: 4274 4275 rc = bxe_func_stop(sc); 4276 if (rc) { 4277 BLOGE(sc, "Function stop failed!\n"); 4278 } 4279 4280 /* disable HW interrupts */ 4281 bxe_int_disable_sync(sc, TRUE); 4282 4283 /* detach interrupts */ 4284 bxe_interrupt_detach(sc); 4285 4286 /* Reset the chip */ 4287 rc = bxe_reset_hw(sc, reset_code); 4288 if (rc) { 4289 BLOGE(sc, "Hardware reset failed\n"); 4290 } 4291 4292 /* Report UNLOAD_DONE to MCP */ 4293 bxe_send_unload_done(sc, keep_link); 4294} 4295 4296static void 4297bxe_disable_close_the_gate(struct bxe_softc *sc) 4298{ 4299 uint32_t val; 4300 int port = SC_PORT(sc); 4301 4302 BLOGD(sc, DBG_LOAD, 4303 "Disabling 'close the gates'\n"); 4304 4305 if (CHIP_IS_E1(sc)) { 4306 uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4307 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4308 val = REG_RD(sc, addr); 4309 val &= ~(0x300); 4310 REG_WR(sc, addr, val); 4311 } else { 4312 val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); 4313 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 4314 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 4315 REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); 4316 } 4317} 4318 4319/* 4320 * Cleans the object that have internal lists without sending 4321 * ramrods. Should be run when interrutps are disabled. 4322 */ 4323static void 4324bxe_squeeze_objects(struct bxe_softc *sc) 4325{ 4326 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 4327 struct ecore_mcast_ramrod_params rparam = { NULL }; 4328 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 4329 int rc; 4330 4331 /* Cleanup MACs' object first... */ 4332 4333 /* Wait for completion of requested */ 4334 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 4335 /* Perform a dry cleanup */ 4336 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 4337 4338 /* Clean ETH primary MAC */ 4339 bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); 4340 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, 4341 &ramrod_flags); 4342 if (rc != 0) { 4343 BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); 4344 } 4345 4346 /* Cleanup UC list */ 4347 vlan_mac_flags = 0; 4348 bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); 4349 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, 4350 &ramrod_flags); 4351 if (rc != 0) { 4352 BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); 4353 } 4354 4355 /* Now clean mcast object... */ 4356 4357 rparam.mcast_obj = &sc->mcast_obj; 4358 bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 4359 4360 /* Add a DEL command... */ 4361 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 4362 if (rc < 0) { 4363 BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); 4364 } 4365 4366 /* now wait until all pending commands are cleared */ 4367 4368 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4369 while (rc != 0) { 4370 if (rc < 0) { 4371 BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); 4372 return; 4373 } 4374 4375 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 4376 } 4377} 4378 4379/* stop the controller */ 4380static __noinline int 4381bxe_nic_unload(struct bxe_softc *sc, 4382 uint32_t unload_mode, 4383 uint8_t keep_link) 4384{ 4385 uint8_t global = FALSE; 4386 uint32_t val; 4387 4388 BXE_CORE_LOCK_ASSERT(sc); 4389 4390 BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); 4391 4392 /* mark driver as unloaded in shmem2 */ 4393 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 4394 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 4395 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 4396 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 4397 } 4398 4399 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && 4400 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { 4401 /* 4402 * We can get here if the driver has been unloaded 4403 * during parity error recovery and is either waiting for a 4404 * leader to complete or for other functions to unload and 4405 * then ifconfig down has been issued. In this case we want to 4406 * unload and let other functions to complete a recovery 4407 * process. 4408 */ 4409 sc->recovery_state = BXE_RECOVERY_DONE; 4410 sc->is_leader = 0; 4411 bxe_release_leader_lock(sc); 4412 mb(); 4413 4414 BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); 4415 BLOGE(sc, "Can't unload in closed or error state\n"); 4416 return (-1); 4417 } 4418 4419 /* 4420 * Nothing to do during unload if previous bxe_nic_load() 4421 * did not completed succesfully - all resourses are released. 4422 */ 4423 if ((sc->state == BXE_STATE_CLOSED) || 4424 (sc->state == BXE_STATE_ERROR)) { 4425 return (0); 4426 } 4427 4428 sc->state = BXE_STATE_CLOSING_WAITING_HALT; 4429 mb(); 4430 4431 /* stop tx */ 4432 bxe_tx_disable(sc); 4433 4434 sc->rx_mode = BXE_RX_MODE_NONE; 4435 /* XXX set rx mode ??? */ 4436 4437 if (IS_PF(sc)) { 4438 /* set ALWAYS_ALIVE bit in shmem */ 4439 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 4440 4441 bxe_drv_pulse(sc); 4442 4443 bxe_stats_handle(sc, STATS_EVENT_STOP); 4444 bxe_save_statistics(sc); 4445 } 4446 4447 /* wait till consumers catch up with producers in all queues */ 4448 bxe_drain_tx_queues(sc); 4449 4450 /* if VF indicate to PF this function is going down (PF will delete sp 4451 * elements and clear initializations 4452 */ 4453 if (IS_VF(sc)) { 4454 ; /* bxe_vfpf_close_vf(sc); */ 4455 } else if (unload_mode != UNLOAD_RECOVERY) { 4456 /* if this is a normal/close unload need to clean up chip */ 4457 bxe_chip_cleanup(sc, unload_mode, keep_link); 4458 } else { 4459 /* Send the UNLOAD_REQUEST to the MCP */ 4460 bxe_send_unload_req(sc, unload_mode); 4461 4462 /* 4463 * Prevent transactions to host from the functions on the 4464 * engine that doesn't reset global blocks in case of global 4465 * attention once gloabl blocks are reset and gates are opened 4466 * (the engine which leader will perform the recovery 4467 * last). 4468 */ 4469 if (!CHIP_IS_E1x(sc)) { 4470 bxe_pf_disable(sc); 4471 } 4472 4473 /* disable HW interrupts */ 4474 bxe_int_disable_sync(sc, TRUE); 4475 4476 /* detach interrupts */ 4477 bxe_interrupt_detach(sc); 4478 4479 /* Report UNLOAD_DONE to MCP */ 4480 bxe_send_unload_done(sc, FALSE); 4481 } 4482 4483 /* 4484 * At this stage no more interrupts will arrive so we may safely clean 4485 * the queue'able objects here in case they failed to get cleaned so far. 4486 */ 4487 if (IS_PF(sc)) { 4488 bxe_squeeze_objects(sc); 4489 } 4490 4491 /* There should be no more pending SP commands at this stage */ 4492 sc->sp_state = 0; 4493 4494 sc->port.pmf = 0; 4495 4496 bxe_free_fp_buffers(sc); 4497 4498 if (IS_PF(sc)) { 4499 bxe_free_mem(sc); 4500 } 4501 4502 bxe_free_fw_stats_mem(sc); 4503 4504 sc->state = BXE_STATE_CLOSED; 4505 4506 /* 4507 * Check if there are pending parity attentions. If there are - set 4508 * RECOVERY_IN_PROGRESS. 4509 */ 4510 if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { 4511 bxe_set_reset_in_progress(sc); 4512 4513 /* Set RESET_IS_GLOBAL if needed */ 4514 if (global) { 4515 bxe_set_reset_global(sc); 4516 } 4517 } 4518 4519 /* 4520 * The last driver must disable a "close the gate" if there is no 4521 * parity attention or "process kill" pending. 4522 */ 4523 if (IS_PF(sc) && !bxe_clear_pf_load(sc) && 4524 bxe_reset_is_done(sc, SC_PATH(sc))) { 4525 bxe_disable_close_the_gate(sc); 4526 } 4527 4528 BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); 4529 4530 return (0); 4531} 4532 4533/* 4534 * Called by the OS to set various media options (i.e. link, speed, etc.) when 4535 * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". 4536 */ 4537static int 4538bxe_ifmedia_update(struct ifnet *ifp) 4539{ 4540 struct bxe_softc *sc = (struct bxe_softc *)ifp->if_softc; 4541 struct ifmedia *ifm; 4542 4543 ifm = &sc->ifmedia; 4544 4545 /* We only support Ethernet media type. */ 4546 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 4547 return (EINVAL); 4548 } 4549 4550 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4551 case IFM_AUTO: 4552 break; 4553 case IFM_10G_CX4: 4554 case IFM_10G_SR: 4555 case IFM_10G_T: 4556 case IFM_10G_TWINAX: 4557 default: 4558 /* We don't support changing the media type. */ 4559 BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", 4560 IFM_SUBTYPE(ifm->ifm_media)); 4561 return (EINVAL); 4562 } 4563 4564 return (0); 4565} 4566 4567/* 4568 * Called by the OS to get the current media status (i.e. link, speed, etc.). 4569 */ 4570static void 4571bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 4572{ 4573 struct bxe_softc *sc = ifp->if_softc; 4574 4575 /* Report link down if the driver isn't running. */ 4576 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4577 ifmr->ifm_active |= IFM_NONE; 4578 return; 4579 } 4580 4581 /* Setup the default interface info. */ 4582 ifmr->ifm_status = IFM_AVALID; 4583 ifmr->ifm_active = IFM_ETHER; 4584 4585 if (sc->link_vars.link_up) { 4586 ifmr->ifm_status |= IFM_ACTIVE; 4587 } else { 4588 ifmr->ifm_active |= IFM_NONE; 4589 return; 4590 } 4591 4592 ifmr->ifm_active |= sc->media; 4593 4594 if (sc->link_vars.duplex == DUPLEX_FULL) { 4595 ifmr->ifm_active |= IFM_FDX; 4596 } else { 4597 ifmr->ifm_active |= IFM_HDX; 4598 } 4599} 4600 4601static int 4602bxe_ioctl_nvram(struct bxe_softc *sc, 4603 uint32_t priv_op, 4604 struct ifreq *ifr) 4605{ 4606 struct bxe_nvram_data nvdata_base; 4607 struct bxe_nvram_data *nvdata; 4608 int len; 4609 int error = 0; 4610 4611 copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); 4612 4613 len = (sizeof(struct bxe_nvram_data) + 4614 nvdata_base.len - 4615 sizeof(uint32_t)); 4616 4617 if (len > sizeof(struct bxe_nvram_data)) { 4618 if ((nvdata = (struct bxe_nvram_data *) 4619 malloc(len, M_DEVBUF, 4620 (M_NOWAIT | M_ZERO))) == NULL) { 4621 BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n"); 4622 return (1); 4623 } 4624 memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); 4625 } else { 4626 nvdata = &nvdata_base; 4627 } 4628 4629 if (priv_op == BXE_IOC_RD_NVRAM) { 4630 BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", 4631 nvdata->offset, nvdata->len); 4632 error = bxe_nvram_read(sc, 4633 nvdata->offset, 4634 (uint8_t *)nvdata->value, 4635 nvdata->len); 4636 copyout(nvdata, ifr->ifr_data, len); 4637 } else { /* BXE_IOC_WR_NVRAM */ 4638 BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", 4639 nvdata->offset, nvdata->len); 4640 copyin(ifr->ifr_data, nvdata, len); 4641 error = bxe_nvram_write(sc, 4642 nvdata->offset, 4643 (uint8_t *)nvdata->value, 4644 nvdata->len); 4645 } 4646 4647 if (len > sizeof(struct bxe_nvram_data)) { 4648 free(nvdata, M_DEVBUF); 4649 } 4650 4651 return (error); 4652} 4653 4654static int 4655bxe_ioctl_stats_show(struct bxe_softc *sc, 4656 uint32_t priv_op, 4657 struct ifreq *ifr) 4658{ 4659 const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); 4660 const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); 4661 caddr_t p_tmp; 4662 uint32_t *offset; 4663 int i; 4664 4665 switch (priv_op) 4666 { 4667 case BXE_IOC_STATS_SHOW_NUM: 4668 memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); 4669 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = 4670 BXE_NUM_ETH_STATS; 4671 ((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = 4672 STAT_NAME_LEN; 4673 return (0); 4674 4675 case BXE_IOC_STATS_SHOW_STR: 4676 memset(ifr->ifr_data, 0, str_size); 4677 p_tmp = ifr->ifr_data; 4678 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4679 strcpy(p_tmp, bxe_eth_stats_arr[i].string); 4680 p_tmp += STAT_NAME_LEN; 4681 } 4682 return (0); 4683 4684 case BXE_IOC_STATS_SHOW_CNT: 4685 memset(ifr->ifr_data, 0, stats_size); 4686 p_tmp = ifr->ifr_data; 4687 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 4688 offset = ((uint32_t *)&sc->eth_stats + 4689 bxe_eth_stats_arr[i].offset); 4690 switch (bxe_eth_stats_arr[i].size) { 4691 case 4: 4692 *((uint64_t *)p_tmp) = (uint64_t)*offset; 4693 break; 4694 case 8: 4695 *((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); 4696 break; 4697 default: 4698 *((uint64_t *)p_tmp) = 0; 4699 } 4700 p_tmp += sizeof(uint64_t); 4701 } 4702 return (0); 4703 4704 default: 4705 return (-1); 4706 } 4707} 4708 4709static void 4710bxe_handle_chip_tq(void *context, 4711 int pending) 4712{ 4713 struct bxe_softc *sc = (struct bxe_softc *)context; 4714 long work = atomic_load_acq_long(&sc->chip_tq_flags); 4715 4716 switch (work) 4717 { 4718 case CHIP_TQ_START: 4719 if ((sc->ifnet->if_flags & IFF_UP) && 4720 !(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 4721 /* start the interface */ 4722 BLOGD(sc, DBG_LOAD, "Starting the interface...\n"); 4723 BXE_CORE_LOCK(sc); 4724 bxe_init_locked(sc); 4725 BXE_CORE_UNLOCK(sc); 4726 } 4727 break; 4728 4729 case CHIP_TQ_STOP: 4730 if (!(sc->ifnet->if_flags & IFF_UP) && 4731 (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 4732 /* bring down the interface */ 4733 BLOGD(sc, DBG_LOAD, "Stopping the interface...\n"); 4734 bxe_periodic_stop(sc); 4735 BXE_CORE_LOCK(sc); 4736 bxe_stop_locked(sc); 4737 BXE_CORE_UNLOCK(sc); 4738 } 4739 break; 4740 4741 case CHIP_TQ_REINIT: 4742 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 4743 /* restart the interface */ 4744 BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); 4745 bxe_periodic_stop(sc); 4746 BXE_CORE_LOCK(sc); 4747 bxe_stop_locked(sc); 4748 bxe_init_locked(sc); 4749 BXE_CORE_UNLOCK(sc); 4750 } 4751 break; 4752 4753 default: 4754 break; 4755 } 4756} 4757 4758/* 4759 * Handles any IOCTL calls from the operating system. 4760 * 4761 * Returns: 4762 * 0 = Success, >0 Failure 4763 */ 4764static int 4765bxe_ioctl(struct ifnet *ifp, 4766 u_long command, 4767 caddr_t data) 4768{ 4769 struct bxe_softc *sc = ifp->if_softc; 4770 struct ifreq *ifr = (struct ifreq *)data; 4771 struct bxe_nvram_data *nvdata; 4772 uint32_t priv_op; 4773 int mask = 0; 4774 int reinit = 0; 4775 int error = 0; 4776 4777 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); 4778 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); 4779 4780 switch (command) 4781 { 4782 case SIOCSIFMTU: 4783 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", 4784 ifr->ifr_mtu); 4785 4786 if (sc->mtu == ifr->ifr_mtu) { 4787 /* nothing to change */ 4788 break; 4789 } 4790 4791 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { 4792 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", 4793 ifr->ifr_mtu, mtu_min, mtu_max); 4794 error = EINVAL; 4795 break; 4796 } 4797 4798 atomic_store_rel_int((volatile unsigned int *)&sc->mtu, 4799 (unsigned long)ifr->ifr_mtu); 4800 atomic_store_rel_long((volatile unsigned long *)&ifp->if_mtu, 4801 (unsigned long)ifr->ifr_mtu); 4802 4803 reinit = 1; 4804 break; 4805 4806 case SIOCSIFFLAGS: 4807 /* toggle the interface state up or down */ 4808 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); 4809 4810 /* check if the interface is up */ 4811 if (ifp->if_flags & IFF_UP) { 4812 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4813 /* set the receive mode flags */ 4814 bxe_set_rx_mode(sc); 4815 } else { 4816 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_START); 4817 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 4818 } 4819 } else { 4820 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4821 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_STOP); 4822 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 4823 } 4824 } 4825 4826 break; 4827 4828 case SIOCADDMULTI: 4829 case SIOCDELMULTI: 4830 /* add/delete multicast addresses */ 4831 BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); 4832 4833 /* check if the interface is up */ 4834 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4835 /* set the receive mode flags */ 4836 bxe_set_rx_mode(sc); 4837 } 4838 4839 break; 4840 4841 case SIOCSIFCAP: 4842 /* find out which capabilities have changed */ 4843 mask = (ifr->ifr_reqcap ^ ifp->if_capenable); 4844 4845 BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", 4846 mask); 4847 4848 /* toggle the LRO capabilites enable flag */ 4849 if (mask & IFCAP_LRO) { 4850 ifp->if_capenable ^= IFCAP_LRO; 4851 BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", 4852 (ifp->if_capenable & IFCAP_LRO) ? "ON" : "OFF"); 4853 reinit = 1; 4854 } 4855 4856 /* toggle the TXCSUM checksum capabilites enable flag */ 4857 if (mask & IFCAP_TXCSUM) { 4858 ifp->if_capenable ^= IFCAP_TXCSUM; 4859 BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", 4860 (ifp->if_capenable & IFCAP_TXCSUM) ? "ON" : "OFF"); 4861 if (ifp->if_capenable & IFCAP_TXCSUM) { 4862 ifp->if_hwassist = (CSUM_IP | 4863 CSUM_TCP | 4864 CSUM_UDP | 4865 CSUM_TSO | 4866 CSUM_TCP_IPV6 | 4867 CSUM_UDP_IPV6); 4868 } else { 4869 ifp->if_hwassist = 0; 4870 } 4871 } 4872 4873 /* toggle the RXCSUM checksum capabilities enable flag */ 4874 if (mask & IFCAP_RXCSUM) { 4875 ifp->if_capenable ^= IFCAP_RXCSUM; 4876 BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", 4877 (ifp->if_capenable & IFCAP_RXCSUM) ? "ON" : "OFF"); 4878 if (ifp->if_capenable & IFCAP_RXCSUM) { 4879 ifp->if_hwassist = (CSUM_IP | 4880 CSUM_TCP | 4881 CSUM_UDP | 4882 CSUM_TSO | 4883 CSUM_TCP_IPV6 | 4884 CSUM_UDP_IPV6); 4885 } else { 4886 ifp->if_hwassist = 0; 4887 } 4888 } 4889 4890 /* toggle TSO4 capabilities enabled flag */ 4891 if (mask & IFCAP_TSO4) { 4892 ifp->if_capenable ^= IFCAP_TSO4; 4893 BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", 4894 (ifp->if_capenable & IFCAP_TSO4) ? "ON" : "OFF"); 4895 } 4896 4897 /* toggle TSO6 capabilities enabled flag */ 4898 if (mask & IFCAP_TSO6) { 4899 ifp->if_capenable ^= IFCAP_TSO6; 4900 BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", 4901 (ifp->if_capenable & IFCAP_TSO6) ? "ON" : "OFF"); 4902 } 4903 4904 /* toggle VLAN_HWTSO capabilities enabled flag */ 4905 if (mask & IFCAP_VLAN_HWTSO) { 4906 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 4907 BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", 4908 (ifp->if_capenable & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); 4909 } 4910 4911 /* toggle VLAN_HWCSUM capabilities enabled flag */ 4912 if (mask & IFCAP_VLAN_HWCSUM) { 4913 /* XXX investigate this... */ 4914 BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); 4915 error = EINVAL; 4916 } 4917 4918 /* toggle VLAN_MTU capabilities enable flag */ 4919 if (mask & IFCAP_VLAN_MTU) { 4920 /* XXX investigate this... */ 4921 BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); 4922 error = EINVAL; 4923 } 4924 4925 /* toggle VLAN_HWTAGGING capabilities enabled flag */ 4926 if (mask & IFCAP_VLAN_HWTAGGING) { 4927 /* XXX investigate this... */ 4928 BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); 4929 error = EINVAL; 4930 } 4931 4932 /* toggle VLAN_HWFILTER capabilities enabled flag */ 4933 if (mask & IFCAP_VLAN_HWFILTER) { 4934 /* XXX investigate this... */ 4935 BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); 4936 error = EINVAL; 4937 } 4938 4939 /* XXX not yet... 4940 * IFCAP_WOL_MAGIC 4941 */ 4942 4943 break; 4944 4945 case SIOCSIFMEDIA: 4946 case SIOCGIFMEDIA: 4947 /* set/get interface media */ 4948 BLOGD(sc, DBG_IOCTL, 4949 "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", 4950 (command & 0xff)); 4951 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 4952 break; 4953 4954 case SIOCGPRIVATE_0: 4955 copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); 4956 4957 switch (priv_op) 4958 { 4959 case BXE_IOC_RD_NVRAM: 4960 case BXE_IOC_WR_NVRAM: 4961 nvdata = (struct bxe_nvram_data *)ifr->ifr_data; 4962 BLOGD(sc, DBG_IOCTL, 4963 "Received Private NVRAM ioctl addr=0x%x size=%u\n", 4964 nvdata->offset, nvdata->len); 4965 error = bxe_ioctl_nvram(sc, priv_op, ifr); 4966 break; 4967 4968 case BXE_IOC_STATS_SHOW_NUM: 4969 case BXE_IOC_STATS_SHOW_STR: 4970 case BXE_IOC_STATS_SHOW_CNT: 4971 BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", 4972 priv_op); 4973 error = bxe_ioctl_stats_show(sc, priv_op, ifr); 4974 break; 4975 4976 default: 4977 BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); 4978 error = EINVAL; 4979 break; 4980 } 4981 4982 break; 4983 4984 default: 4985 BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", 4986 (command & 0xff)); 4987 error = ether_ioctl(ifp, command, data); 4988 break; 4989 } 4990 4991 if (reinit && (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 4992 BLOGD(sc, DBG_LOAD | DBG_IOCTL, 4993 "Re-initializing hardware from IOCTL change\n"); 4994 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); 4995 taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); 4996 } 4997 4998 return (error); 4999} 5000 5001static __noinline void 5002bxe_dump_mbuf(struct bxe_softc *sc, 5003 struct mbuf *m, 5004 uint8_t contents) 5005{ 5006 char * type; 5007 int i = 0; 5008 5009 if (!(sc->debug & DBG_MBUF)) { 5010 return; 5011 } 5012 5013 if (m == NULL) { 5014 BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); 5015 return; 5016 } 5017 5018 while (m) { 5019 BLOGD(sc, DBG_MBUF, 5020 "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", 5021 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); 5022 5023 if (m->m_flags & M_PKTHDR) { 5024 BLOGD(sc, DBG_MBUF, 5025 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", 5026 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, 5027 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 5028 } 5029 5030 if (m->m_flags & M_EXT) { 5031 switch (m->m_ext.ext_type) { 5032 case EXT_CLUSTER: type = "EXT_CLUSTER"; break; 5033 case EXT_SFBUF: type = "EXT_SFBUF"; break; 5034 case EXT_JUMBOP: type = "EXT_JUMBOP"; break; 5035 case EXT_JUMBO9: type = "EXT_JUMBO9"; break; 5036 case EXT_JUMBO16: type = "EXT_JUMBO16"; break; 5037 case EXT_PACKET: type = "EXT_PACKET"; break; 5038 case EXT_MBUF: type = "EXT_MBUF"; break; 5039 case EXT_NET_DRV: type = "EXT_NET_DRV"; break; 5040 case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; 5041 case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; 5042 case EXT_EXTREF: type = "EXT_EXTREF"; break; 5043 default: type = "UNKNOWN"; break; 5044 } 5045 5046 BLOGD(sc, DBG_MBUF, 5047 "%02d: - m_ext: %p ext_size=%d type=%s\n", 5048 i, m->m_ext.ext_buf, m->m_ext.ext_size, type); 5049 } 5050 5051 if (contents) { 5052 bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); 5053 } 5054 5055 m = m->m_next; 5056 i++; 5057 } 5058} 5059 5060/* 5061 * Checks to ensure the 13 bd sliding window is >= MSS for TSO. 5062 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. 5063 * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD 5064 * The headers comes in a seperate bd in FreeBSD so 13-3=10. 5065 * Returns: 0 if OK to send, 1 if packet needs further defragmentation 5066 */ 5067static int 5068bxe_chktso_window(struct bxe_softc *sc, 5069 int nsegs, 5070 bus_dma_segment_t *segs, 5071 struct mbuf *m) 5072{ 5073 uint32_t num_wnds, wnd_size, wnd_sum; 5074 int32_t frag_idx, wnd_idx; 5075 unsigned short lso_mss; 5076 int defrag; 5077 5078 defrag = 0; 5079 wnd_sum = 0; 5080 wnd_size = 10; 5081 num_wnds = nsegs - wnd_size; 5082 lso_mss = htole16(m->m_pkthdr.tso_segsz); 5083 5084 /* 5085 * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the 5086 * first window sum of data while skipping the first assuming it is the 5087 * header in FreeBSD. 5088 */ 5089 for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { 5090 wnd_sum += htole16(segs[frag_idx].ds_len); 5091 } 5092 5093 /* check the first 10 bd window size */ 5094 if (wnd_sum < lso_mss) { 5095 return (1); 5096 } 5097 5098 /* run through the windows */ 5099 for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { 5100 /* subtract the first mbuf->m_len of the last wndw(-header) */ 5101 wnd_sum -= htole16(segs[wnd_idx+1].ds_len); 5102 /* add the next mbuf len to the len of our new window */ 5103 wnd_sum += htole16(segs[frag_idx].ds_len); 5104 if (wnd_sum < lso_mss) { 5105 return (1); 5106 } 5107 } 5108 5109 return (0); 5110} 5111 5112static uint8_t 5113bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, 5114 struct mbuf *m, 5115 uint32_t *parsing_data) 5116{ 5117 struct ether_vlan_header *eh = NULL; 5118 struct ip *ip4 = NULL; 5119 struct ip6_hdr *ip6 = NULL; 5120 caddr_t ip = NULL; 5121 struct tcphdr *th = NULL; 5122 int e_hlen, ip_hlen, l4_off; 5123 uint16_t proto; 5124 5125 if (m->m_pkthdr.csum_flags == CSUM_IP) { 5126 /* no L4 checksum offload needed */ 5127 return (0); 5128 } 5129 5130 /* get the Ethernet header */ 5131 eh = mtod(m, struct ether_vlan_header *); 5132 5133 /* handle VLAN encapsulation if present */ 5134 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5135 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5136 proto = ntohs(eh->evl_proto); 5137 } else { 5138 e_hlen = ETHER_HDR_LEN; 5139 proto = ntohs(eh->evl_encap_proto); 5140 } 5141 5142 switch (proto) { 5143 case ETHERTYPE_IP: 5144 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5145 ip4 = (m->m_len < sizeof(struct ip)) ? 5146 (struct ip *)m->m_next->m_data : 5147 (struct ip *)(m->m_data + e_hlen); 5148 /* ip_hl is number of 32-bit words */ 5149 ip_hlen = (ip4->ip_hl << 2); 5150 ip = (caddr_t)ip4; 5151 break; 5152 case ETHERTYPE_IPV6: 5153 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5154 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5155 (struct ip6_hdr *)m->m_next->m_data : 5156 (struct ip6_hdr *)(m->m_data + e_hlen); 5157 /* XXX cannot support offload with IPv6 extensions */ 5158 ip_hlen = sizeof(struct ip6_hdr); 5159 ip = (caddr_t)ip6; 5160 break; 5161 default: 5162 /* We can't offload in this case... */ 5163 /* XXX error stat ??? */ 5164 return (0); 5165 } 5166 5167 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5168 l4_off = (e_hlen + ip_hlen); 5169 5170 *parsing_data |= 5171 (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & 5172 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); 5173 5174 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5175 CSUM_TSO | 5176 CSUM_TCP_IPV6)) { 5177 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5178 th = (struct tcphdr *)(ip + ip_hlen); 5179 /* th_off is number of 32-bit words */ 5180 *parsing_data |= ((th->th_off << 5181 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 5182 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); 5183 return (l4_off + (th->th_off << 2)); /* entire header length */ 5184 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5185 CSUM_UDP_IPV6)) { 5186 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5187 return (l4_off + sizeof(struct udphdr)); /* entire header length */ 5188 } else { 5189 /* XXX error stat ??? */ 5190 return (0); 5191 } 5192} 5193 5194static uint8_t 5195bxe_set_pbd_csum(struct bxe_fastpath *fp, 5196 struct mbuf *m, 5197 struct eth_tx_parse_bd_e1x *pbd) 5198{ 5199 struct ether_vlan_header *eh = NULL; 5200 struct ip *ip4 = NULL; 5201 struct ip6_hdr *ip6 = NULL; 5202 caddr_t ip = NULL; 5203 struct tcphdr *th = NULL; 5204 struct udphdr *uh = NULL; 5205 int e_hlen, ip_hlen; 5206 uint16_t proto; 5207 uint8_t hlen; 5208 uint16_t tmp_csum; 5209 uint32_t *tmp_uh; 5210 5211 /* get the Ethernet header */ 5212 eh = mtod(m, struct ether_vlan_header *); 5213 5214 /* handle VLAN encapsulation if present */ 5215 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 5216 e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 5217 proto = ntohs(eh->evl_proto); 5218 } else { 5219 e_hlen = ETHER_HDR_LEN; 5220 proto = ntohs(eh->evl_encap_proto); 5221 } 5222 5223 switch (proto) { 5224 case ETHERTYPE_IP: 5225 /* get the IP header, if mbuf len < 20 then header in next mbuf */ 5226 ip4 = (m->m_len < sizeof(struct ip)) ? 5227 (struct ip *)m->m_next->m_data : 5228 (struct ip *)(m->m_data + e_hlen); 5229 /* ip_hl is number of 32-bit words */ 5230 ip_hlen = (ip4->ip_hl << 1); 5231 ip = (caddr_t)ip4; 5232 break; 5233 case ETHERTYPE_IPV6: 5234 /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ 5235 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? 5236 (struct ip6_hdr *)m->m_next->m_data : 5237 (struct ip6_hdr *)(m->m_data + e_hlen); 5238 /* XXX cannot support offload with IPv6 extensions */ 5239 ip_hlen = (sizeof(struct ip6_hdr) >> 1); 5240 ip = (caddr_t)ip6; 5241 break; 5242 default: 5243 /* We can't offload in this case... */ 5244 /* XXX error stat ??? */ 5245 return (0); 5246 } 5247 5248 hlen = (e_hlen >> 1); 5249 5250 /* note that rest of global_data is indirectly zeroed here */ 5251 if (m->m_flags & M_VLANTAG) { 5252 pbd->global_data = 5253 htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 5254 } else { 5255 pbd->global_data = htole16(hlen); 5256 } 5257 5258 pbd->ip_hlen_w = ip_hlen; 5259 5260 hlen += pbd->ip_hlen_w; 5261 5262 /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ 5263 5264 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5265 CSUM_TSO | 5266 CSUM_TCP_IPV6)) { 5267 th = (struct tcphdr *)(ip + (ip_hlen << 1)); 5268 /* th_off is number of 32-bit words */ 5269 hlen += (uint16_t)(th->th_off << 1); 5270 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5271 CSUM_UDP_IPV6)) { 5272 uh = (struct udphdr *)(ip + (ip_hlen << 1)); 5273 hlen += (sizeof(struct udphdr) / 2); 5274 } else { 5275 /* valid case as only CSUM_IP was set */ 5276 return (0); 5277 } 5278 5279 pbd->total_hlen_w = htole16(hlen); 5280 5281 if (m->m_pkthdr.csum_flags & (CSUM_TCP | 5282 CSUM_TSO | 5283 CSUM_TCP_IPV6)) { 5284 fp->eth_q_stats.tx_ofld_frames_csum_tcp++; 5285 pbd->tcp_pseudo_csum = ntohs(th->th_sum); 5286 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | 5287 CSUM_UDP_IPV6)) { 5288 fp->eth_q_stats.tx_ofld_frames_csum_udp++; 5289 5290 /* 5291 * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP 5292 * checksums and does not know anything about the UDP header and where 5293 * the checksum field is located. It only knows about TCP. Therefore 5294 * we "lie" to the hardware for outgoing UDP packets w/ checksum 5295 * offload. Since the checksum field offset for TCP is 16 bytes and 5296 * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 5297 * bytes less than the start of the UDP header. This allows the 5298 * hardware to write the checksum in the correct spot. But the 5299 * hardware will compute a checksum which includes the last 10 bytes 5300 * of the IP header. To correct this we tweak the stack computed 5301 * pseudo checksum by folding in the calculation of the inverse 5302 * checksum for those final 10 bytes of the IP header. This allows 5303 * the correct checksum to be computed by the hardware. 5304 */ 5305 5306 /* set pointer 10 bytes before UDP header */ 5307 tmp_uh = (uint32_t *)((uint8_t *)uh - 10); 5308 5309 /* calculate a pseudo header checksum over the first 10 bytes */ 5310 tmp_csum = in_pseudo(*tmp_uh, 5311 *(tmp_uh + 1), 5312 *(uint16_t *)(tmp_uh + 2)); 5313 5314 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); 5315 } 5316 5317 return (hlen * 2); /* entire header length, number of bytes */ 5318} 5319 5320static void 5321bxe_set_pbd_lso_e2(struct mbuf *m, 5322 uint32_t *parsing_data) 5323{ 5324 *parsing_data |= ((m->m_pkthdr.tso_segsz << 5325 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 5326 ETH_TX_PARSE_BD_E2_LSO_MSS); 5327 5328 /* XXX test for IPv6 with extension header... */ 5329#if 0 5330 struct ip6_hdr *ip6; 5331 if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header') 5332 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 5333#endif 5334} 5335 5336static void 5337bxe_set_pbd_lso(struct mbuf *m, 5338 struct eth_tx_parse_bd_e1x *pbd) 5339{ 5340 struct ether_vlan_header *eh = NULL; 5341 struct ip *ip = NULL; 5342 struct tcphdr *th = NULL; 5343 int e_hlen; 5344 5345 /* get the Ethernet header */ 5346 eh = mtod(m, struct ether_vlan_header *); 5347 5348 /* handle VLAN encapsulation if present */ 5349 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? 5350 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; 5351 5352 /* get the IP and TCP header, with LSO entire header in first mbuf */ 5353 /* XXX assuming IPv4 */ 5354 ip = (struct ip *)(m->m_data + e_hlen); 5355 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 5356 5357 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); 5358 pbd->tcp_send_seq = ntohl(th->th_seq); 5359 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); 5360 5361#if 1 5362 /* XXX IPv4 */ 5363 pbd->ip_id = ntohs(ip->ip_id); 5364 pbd->tcp_pseudo_csum = 5365 ntohs(in_pseudo(ip->ip_src.s_addr, 5366 ip->ip_dst.s_addr, 5367 htons(IPPROTO_TCP))); 5368#else 5369 /* XXX IPv6 */ 5370 pbd->tcp_pseudo_csum = 5371 ntohs(in_pseudo(&ip6->ip6_src, 5372 &ip6->ip6_dst, 5373 htons(IPPROTO_TCP))); 5374#endif 5375 5376 pbd->global_data |= 5377 htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 5378} 5379 5380/* 5381 * Encapsulte an mbuf cluster into the tx bd chain and makes the memory 5382 * visible to the controller. 5383 * 5384 * If an mbuf is submitted to this routine and cannot be given to the 5385 * controller (e.g. it has too many fragments) then the function may free 5386 * the mbuf and return to the caller. 5387 * 5388 * Returns: 5389 * 0 = Success, !0 = Failure 5390 * Note the side effect that an mbuf may be freed if it causes a problem. 5391 */ 5392static int 5393bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) 5394{ 5395 bus_dma_segment_t segs[32]; 5396 struct mbuf *m0; 5397 struct bxe_sw_tx_bd *tx_buf; 5398 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 5399 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 5400 /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ 5401 struct eth_tx_bd *tx_data_bd; 5402 struct eth_tx_bd *tx_total_pkt_size_bd; 5403 struct eth_tx_start_bd *tx_start_bd; 5404 uint16_t bd_prod, pkt_prod, total_pkt_size; 5405 uint8_t mac_type; 5406 int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; 5407 struct bxe_softc *sc; 5408 uint16_t tx_bd_avail; 5409 struct ether_vlan_header *eh; 5410 uint32_t pbd_e2_parsing_data = 0; 5411 uint8_t hlen = 0; 5412 int tmp_bd; 5413 int i; 5414 5415 sc = fp->sc; 5416 5417 M_ASSERTPKTHDR(*m_head); 5418 5419 m0 = *m_head; 5420 rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; 5421 tx_start_bd = NULL; 5422 tx_data_bd = NULL; 5423 tx_total_pkt_size_bd = NULL; 5424 5425 /* get the H/W pointer for packets and BDs */ 5426 pkt_prod = fp->tx_pkt_prod; 5427 bd_prod = fp->tx_bd_prod; 5428 5429 mac_type = UNICAST_ADDRESS; 5430 5431 /* map the mbuf into the next open DMAable memory */ 5432 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; 5433 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5434 tx_buf->m_map, m0, 5435 segs, &nsegs, BUS_DMA_NOWAIT); 5436 5437 /* mapping errors */ 5438 if(__predict_false(error != 0)) { 5439 fp->eth_q_stats.tx_dma_mapping_failure++; 5440 if (error == ENOMEM) { 5441 /* resource issue, try again later */ 5442 rc = ENOMEM; 5443 } else if (error == EFBIG) { 5444 /* possibly recoverable with defragmentation */ 5445 fp->eth_q_stats.mbuf_defrag_attempts++; 5446 m0 = m_defrag(*m_head, M_DONTWAIT); 5447 if (m0 == NULL) { 5448 fp->eth_q_stats.mbuf_defrag_failures++; 5449 rc = ENOBUFS; 5450 } else { 5451 /* defrag successful, try mapping again */ 5452 *m_head = m0; 5453 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5454 tx_buf->m_map, m0, 5455 segs, &nsegs, BUS_DMA_NOWAIT); 5456 if (error) { 5457 fp->eth_q_stats.tx_dma_mapping_failure++; 5458 rc = error; 5459 } 5460 } 5461 } else { 5462 /* unknown, unrecoverable mapping error */ 5463 BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); 5464 bxe_dump_mbuf(sc, m0, FALSE); 5465 rc = error; 5466 } 5467 5468 goto bxe_tx_encap_continue; 5469 } 5470 5471 tx_bd_avail = bxe_tx_avail(sc, fp); 5472 5473 /* make sure there is enough room in the send queue */ 5474 if (__predict_false(tx_bd_avail < (nsegs + 2))) { 5475 /* Recoverable, try again later. */ 5476 fp->eth_q_stats.tx_hw_queue_full++; 5477 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5478 rc = ENOMEM; 5479 goto bxe_tx_encap_continue; 5480 } 5481 5482 /* capture the current H/W TX chain high watermark */ 5483 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < 5484 (TX_BD_USABLE - tx_bd_avail))) { 5485 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); 5486 } 5487 5488 /* make sure it fits in the packet window */ 5489 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5490 /* 5491 * The mbuf may be to big for the controller to handle. If the frame 5492 * is a TSO frame we'll need to do an additional check. 5493 */ 5494 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5495 if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { 5496 goto bxe_tx_encap_continue; /* OK to send */ 5497 } else { 5498 fp->eth_q_stats.tx_window_violation_tso++; 5499 } 5500 } else { 5501 fp->eth_q_stats.tx_window_violation_std++; 5502 } 5503 5504 /* lets try to defragment this mbuf and remap it */ 5505 fp->eth_q_stats.mbuf_defrag_attempts++; 5506 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5507 5508 m0 = m_defrag(*m_head, M_DONTWAIT); 5509 if (m0 == NULL) { 5510 fp->eth_q_stats.mbuf_defrag_failures++; 5511 /* Ugh, just drop the frame... :( */ 5512 rc = ENOBUFS; 5513 } else { 5514 /* defrag successful, try mapping again */ 5515 *m_head = m0; 5516 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, 5517 tx_buf->m_map, m0, 5518 segs, &nsegs, BUS_DMA_NOWAIT); 5519 if (error) { 5520 fp->eth_q_stats.tx_dma_mapping_failure++; 5521 /* No sense in trying to defrag/copy chain, drop it. :( */ 5522 rc = error; 5523 } 5524 else { 5525 /* if the chain is still too long then drop it */ 5526 if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { 5527 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); 5528 rc = ENODEV; 5529 } 5530 } 5531 } 5532 } 5533 5534bxe_tx_encap_continue: 5535 5536 /* Check for errors */ 5537 if (rc) { 5538 if (rc == ENOMEM) { 5539 /* recoverable try again later */ 5540 } else { 5541 fp->eth_q_stats.tx_soft_errors++; 5542 fp->eth_q_stats.mbuf_alloc_tx--; 5543 m_freem(*m_head); 5544 *m_head = NULL; 5545 } 5546 5547 return (rc); 5548 } 5549 5550 /* set flag according to packet type (UNICAST_ADDRESS is default) */ 5551 if (m0->m_flags & M_BCAST) { 5552 mac_type = BROADCAST_ADDRESS; 5553 } else if (m0->m_flags & M_MCAST) { 5554 mac_type = MULTICAST_ADDRESS; 5555 } 5556 5557 /* store the mbuf into the mbuf ring */ 5558 tx_buf->m = m0; 5559 tx_buf->first_bd = fp->tx_bd_prod; 5560 tx_buf->flags = 0; 5561 5562 /* prepare the first transmit (start) BD for the mbuf */ 5563 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; 5564 5565 BLOGD(sc, DBG_TX, 5566 "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", 5567 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 5568 5569 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 5570 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 5571 tx_start_bd->nbytes = htole16(segs[0].ds_len); 5572 total_pkt_size += tx_start_bd->nbytes; 5573 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 5574 5575 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 5576 5577 /* all frames have at least Start BD + Parsing BD */ 5578 nbds = nsegs + 1; 5579 tx_start_bd->nbd = htole16(nbds); 5580 5581 if (m0->m_flags & M_VLANTAG) { 5582 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); 5583 tx_start_bd->bd_flags.as_bitfield |= 5584 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 5585 } else { 5586 /* vf tx, start bd must hold the ethertype for fw to enforce it */ 5587 if (IS_VF(sc)) { 5588 /* map ethernet header to find type and header length */ 5589 eh = mtod(m0, struct ether_vlan_header *); 5590 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; 5591 } else { 5592 /* used by FW for packet accounting */ 5593 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); 5594#if 0 5595 /* 5596 * If NPAR-SD is active then FW should do the tagging regardless 5597 * of value of priority. Otherwise, if priority indicates this is 5598 * a control packet we need to indicate to FW to avoid tagging. 5599 */ 5600 if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) { 5601 SET_FLAG(tx_start_bd->general_data, 5602 ETH_TX_START_BD_FORCE_VLAN_MODE, 1); 5603 } 5604#endif 5605 } 5606 } 5607 5608 /* 5609 * add a parsing BD from the chain. The parsing BD is always added 5610 * though it is only used for TSO and chksum 5611 */ 5612 bd_prod = TX_BD_NEXT(bd_prod); 5613 5614 if (m0->m_pkthdr.csum_flags) { 5615 if (m0->m_pkthdr.csum_flags & CSUM_IP) { 5616 fp->eth_q_stats.tx_ofld_frames_csum_ip++; 5617 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; 5618 } 5619 5620 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { 5621 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5622 ETH_TX_BD_FLAGS_L4_CSUM); 5623 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { 5624 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | 5625 ETH_TX_BD_FLAGS_IS_UDP | 5626 ETH_TX_BD_FLAGS_L4_CSUM); 5627 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || 5628 (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5629 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 5630 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { 5631 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | 5632 ETH_TX_BD_FLAGS_IS_UDP); 5633 } 5634 } 5635 5636 if (!CHIP_IS_E1x(sc)) { 5637 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; 5638 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 5639 5640 if (m0->m_pkthdr.csum_flags) { 5641 hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); 5642 } 5643 5644#if 0 5645 /* 5646 * Add the MACs to the parsing BD if the module param was 5647 * explicitly set, if this is a vf, or in switch independent 5648 * mode. 5649 */ 5650 if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) { 5651 eh = mtod(m0, struct ether_vlan_header *); 5652 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, 5653 &pbd_e2->data.mac_addr.src_mid, 5654 &pbd_e2->data.mac_addr.src_lo, 5655 eh->evl_shost); 5656 bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 5657 &pbd_e2->data.mac_addr.dst_mid, 5658 &pbd_e2->data.mac_addr.dst_lo, 5659 eh->evl_dhost); 5660 } 5661#endif 5662 5663 SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, 5664 mac_type); 5665 } else { 5666 uint16_t global_data = 0; 5667 5668 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; 5669 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 5670 5671 if (m0->m_pkthdr.csum_flags) { 5672 hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); 5673 } 5674 5675 SET_FLAG(global_data, 5676 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); 5677 pbd_e1x->global_data |= htole16(global_data); 5678 } 5679 5680 /* setup the parsing BD with TSO specific info */ 5681 if (m0->m_pkthdr.csum_flags & CSUM_TSO) { 5682 fp->eth_q_stats.tx_ofld_frames_lso++; 5683 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 5684 5685 if (__predict_false(tx_start_bd->nbytes > hlen)) { 5686 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; 5687 5688 /* split the first BD into header/data making the fw job easy */ 5689 nbds++; 5690 tx_start_bd->nbd = htole16(nbds); 5691 tx_start_bd->nbytes = htole16(hlen); 5692 5693 bd_prod = TX_BD_NEXT(bd_prod); 5694 5695 /* new transmit BD after the tx_parse_bd */ 5696 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5697 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); 5698 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); 5699 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); 5700 if (tx_total_pkt_size_bd == NULL) { 5701 tx_total_pkt_size_bd = tx_data_bd; 5702 } 5703 5704 BLOGD(sc, DBG_TX, 5705 "TSO split header size is %d (%x:%x) nbds %d\n", 5706 le16toh(tx_start_bd->nbytes), 5707 le32toh(tx_start_bd->addr_hi), 5708 le32toh(tx_start_bd->addr_lo), 5709 nbds); 5710 } 5711 5712 if (!CHIP_IS_E1x(sc)) { 5713 bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); 5714 } else { 5715 bxe_set_pbd_lso(m0, pbd_e1x); 5716 } 5717 } 5718 5719 if (pbd_e2_parsing_data) { 5720 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); 5721 } 5722 5723 /* prepare remaining BDs, start tx bd contains first seg/frag */ 5724 for (i = 1; i < nsegs ; i++) { 5725 bd_prod = TX_BD_NEXT(bd_prod); 5726 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; 5727 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); 5728 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); 5729 tx_data_bd->nbytes = htole16(segs[i].ds_len); 5730 if (tx_total_pkt_size_bd == NULL) { 5731 tx_total_pkt_size_bd = tx_data_bd; 5732 } 5733 total_pkt_size += tx_data_bd->nbytes; 5734 } 5735 5736 BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); 5737 5738 if (tx_total_pkt_size_bd != NULL) { 5739 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; 5740 } 5741 5742 if (__predict_false(sc->debug & DBG_TX)) { 5743 tmp_bd = tx_buf->first_bd; 5744 for (i = 0; i < nbds; i++) 5745 { 5746 if (i == 0) { 5747 BLOGD(sc, DBG_TX, 5748 "TX Strt: %p bd=%d nbd=%d vlan=0x%x " 5749 "bd_flags=0x%x hdr_nbds=%d\n", 5750 tx_start_bd, 5751 tmp_bd, 5752 le16toh(tx_start_bd->nbd), 5753 le16toh(tx_start_bd->vlan_or_ethertype), 5754 tx_start_bd->bd_flags.as_bitfield, 5755 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); 5756 } else if (i == 1) { 5757 if (pbd_e1x) { 5758 BLOGD(sc, DBG_TX, 5759 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " 5760 "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " 5761 "tcp_seq=%u total_hlen_w=%u\n", 5762 pbd_e1x, 5763 tmp_bd, 5764 pbd_e1x->global_data, 5765 pbd_e1x->ip_hlen_w, 5766 pbd_e1x->ip_id, 5767 pbd_e1x->lso_mss, 5768 pbd_e1x->tcp_flags, 5769 pbd_e1x->tcp_pseudo_csum, 5770 pbd_e1x->tcp_send_seq, 5771 le16toh(pbd_e1x->total_hlen_w)); 5772 } else { /* if (pbd_e2) */ 5773 BLOGD(sc, DBG_TX, 5774 "-> Parse: %p bd=%d dst=%02x:%02x:%02x " 5775 "src=%02x:%02x:%02x parsing_data=0x%x\n", 5776 pbd_e2, 5777 tmp_bd, 5778 pbd_e2->data.mac_addr.dst_hi, 5779 pbd_e2->data.mac_addr.dst_mid, 5780 pbd_e2->data.mac_addr.dst_lo, 5781 pbd_e2->data.mac_addr.src_hi, 5782 pbd_e2->data.mac_addr.src_mid, 5783 pbd_e2->data.mac_addr.src_lo, 5784 pbd_e2->parsing_data); 5785 } 5786 } 5787 5788 if (i != 1) { /* skip parse db as it doesn't hold data */ 5789 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; 5790 BLOGD(sc, DBG_TX, 5791 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", 5792 tx_data_bd, 5793 tmp_bd, 5794 le16toh(tx_data_bd->nbytes), 5795 le32toh(tx_data_bd->addr_hi), 5796 le32toh(tx_data_bd->addr_lo)); 5797 } 5798 5799 tmp_bd = TX_BD_NEXT(tmp_bd); 5800 } 5801 } 5802 5803 BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); 5804 5805 /* update TX BD producer index value for next TX */ 5806 bd_prod = TX_BD_NEXT(bd_prod); 5807 5808 /* 5809 * If the chain of tx_bd's describing this frame is adjacent to or spans 5810 * an eth_tx_next_bd element then we need to increment the nbds value. 5811 */ 5812 if (TX_BD_IDX(bd_prod) < nbds) { 5813 nbds++; 5814 } 5815 5816 /* don't allow reordering of writes for nbd and packets */ 5817 mb(); 5818 5819 fp->tx_db.data.prod += nbds; 5820 5821 /* producer points to the next free tx_bd at this point */ 5822 fp->tx_pkt_prod++; 5823 fp->tx_bd_prod = bd_prod; 5824 5825 DOORBELL(sc, fp->index, fp->tx_db.raw); 5826 5827 fp->eth_q_stats.tx_pkts++; 5828 5829 /* Prevent speculative reads from getting ahead of the status block. */ 5830 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 5831 0, 0, BUS_SPACE_BARRIER_READ); 5832 5833 /* Prevent speculative reads from getting ahead of the doorbell. */ 5834 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 5835 0, 0, BUS_SPACE_BARRIER_READ); 5836 5837 return (0); 5838} 5839 5840static void 5841bxe_tx_start_locked(struct bxe_softc *sc, 5842 struct ifnet *ifp, 5843 struct bxe_fastpath *fp) 5844{ 5845 struct mbuf *m = NULL; 5846 int tx_count = 0; 5847 uint16_t tx_bd_avail; 5848 5849 BXE_FP_TX_LOCK_ASSERT(fp); 5850 5851 /* keep adding entries while there are frames to send */ 5852 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 5853 5854 /* 5855 * check for any frames to send 5856 * dequeue can still be NULL even if queue is not empty 5857 */ 5858 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 5859 if (__predict_false(m == NULL)) { 5860 break; 5861 } 5862 5863 /* the mbuf now belongs to us */ 5864 fp->eth_q_stats.mbuf_alloc_tx++; 5865 5866 /* 5867 * Put the frame into the transmit ring. If we don't have room, 5868 * place the mbuf back at the head of the TX queue, set the 5869 * OACTIVE flag, and wait for the NIC to drain the chain. 5870 */ 5871 if (__predict_false(bxe_tx_encap(fp, &m))) { 5872 fp->eth_q_stats.tx_encap_failures++; 5873 if (m != NULL) { 5874 /* mark the TX queue as full and return the frame */ 5875 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 5876 IFQ_DRV_PREPEND(&ifp->if_snd, m); 5877 fp->eth_q_stats.mbuf_alloc_tx--; 5878 fp->eth_q_stats.tx_queue_xoff++; 5879 } 5880 5881 /* stop looking for more work */ 5882 break; 5883 } 5884 5885 /* the frame was enqueued successfully */ 5886 tx_count++; 5887 5888 /* send a copy of the frame to any BPF listeners. */ 5889 BPF_MTAP(ifp, m); 5890 5891 tx_bd_avail = bxe_tx_avail(sc, fp); 5892 5893 /* handle any completions if we're running low */ 5894 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 5895 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 5896 bxe_txeof(sc, fp); 5897 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 5898 break; 5899 } 5900 } 5901 } 5902 5903 /* all TX packets were dequeued and/or the tx ring is full */ 5904 if (tx_count > 0) { 5905 /* reset the TX watchdog timeout timer */ 5906 fp->watchdog_timer = BXE_TX_TIMEOUT; 5907 } 5908} 5909 5910/* Legacy (non-RSS) dispatch routine */ 5911static void 5912bxe_tx_start(struct ifnet *ifp) 5913{ 5914 struct bxe_softc *sc; 5915 struct bxe_fastpath *fp; 5916 5917 sc = ifp->if_softc; 5918 5919 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 5920 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 5921 return; 5922 } 5923 5924 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 5925 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 5926 return; 5927 } 5928 5929 if (!sc->link_vars.link_up) { 5930 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 5931 return; 5932 } 5933 5934 fp = &sc->fp[0]; 5935 5936 BXE_FP_TX_LOCK(fp); 5937 bxe_tx_start_locked(sc, ifp, fp); 5938 BXE_FP_TX_UNLOCK(fp); 5939} 5940 5941#if __FreeBSD_version >= 800000 5942 5943static int 5944bxe_tx_mq_start_locked(struct bxe_softc *sc, 5945 struct ifnet *ifp, 5946 struct bxe_fastpath *fp, 5947 struct mbuf *m) 5948{ 5949 struct buf_ring *tx_br = fp->tx_br; 5950 struct mbuf *next; 5951 int depth, rc, tx_count; 5952 uint16_t tx_bd_avail; 5953 5954 rc = tx_count = 0; 5955 5956 if (!tx_br) { 5957 BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); 5958 return (EINVAL); 5959 } 5960 5961 /* fetch the depth of the driver queue */ 5962 depth = drbr_inuse(ifp, tx_br); 5963 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { 5964 fp->eth_q_stats.tx_max_drbr_queue_depth = depth; 5965 } 5966 5967 BXE_FP_TX_LOCK_ASSERT(fp); 5968 5969 if (m == NULL) { 5970 /* no new work, check for pending frames */ 5971 next = drbr_dequeue(ifp, tx_br); 5972 } else if (drbr_needs_enqueue(ifp, tx_br)) { 5973 /* have both new and pending work, maintain packet order */ 5974 rc = drbr_enqueue(ifp, tx_br, m); 5975 if (rc != 0) { 5976 fp->eth_q_stats.tx_soft_errors++; 5977 goto bxe_tx_mq_start_locked_exit; 5978 } 5979 next = drbr_dequeue(ifp, tx_br); 5980 } else { 5981 /* new work only and nothing pending */ 5982 next = m; 5983 } 5984 5985 /* keep adding entries while there are frames to send */ 5986 while (next != NULL) { 5987 5988 /* the mbuf now belongs to us */ 5989 fp->eth_q_stats.mbuf_alloc_tx++; 5990 5991 /* 5992 * Put the frame into the transmit ring. If we don't have room, 5993 * place the mbuf back at the head of the TX queue, set the 5994 * OACTIVE flag, and wait for the NIC to drain the chain. 5995 */ 5996 rc = bxe_tx_encap(fp, &next); 5997 if (__predict_false(rc != 0)) { 5998 fp->eth_q_stats.tx_encap_failures++; 5999 if (next != NULL) { 6000 /* mark the TX queue as full and save the frame */ 6001 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 6002 /* XXX this may reorder the frame */ 6003 rc = drbr_enqueue(ifp, tx_br, next); 6004 fp->eth_q_stats.mbuf_alloc_tx--; 6005 fp->eth_q_stats.tx_frames_deferred++; 6006 } 6007 6008 /* stop looking for more work */ 6009 break; 6010 } 6011 6012 /* the transmit frame was enqueued successfully */ 6013 tx_count++; 6014 6015 /* send a copy of the frame to any BPF listeners */ 6016 BPF_MTAP(ifp, next); 6017 6018 tx_bd_avail = bxe_tx_avail(sc, fp); 6019 6020 /* handle any completions if we're running low */ 6021 if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { 6022 /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ 6023 bxe_txeof(sc, fp); 6024 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 6025 break; 6026 } 6027 } 6028 6029 next = drbr_dequeue(ifp, tx_br); 6030 } 6031 6032 /* all TX packets were dequeued and/or the tx ring is full */ 6033 if (tx_count > 0) { 6034 /* reset the TX watchdog timeout timer */ 6035 fp->watchdog_timer = BXE_TX_TIMEOUT; 6036 } 6037 6038bxe_tx_mq_start_locked_exit: 6039 6040 return (rc); 6041} 6042 6043/* Multiqueue (TSS) dispatch routine. */ 6044static int 6045bxe_tx_mq_start(struct ifnet *ifp, 6046 struct mbuf *m) 6047{ 6048 struct bxe_softc *sc = ifp->if_softc; 6049 struct bxe_fastpath *fp; 6050 int fp_index, rc; 6051 6052 fp_index = 0; /* default is the first queue */ 6053 6054 /* check if flowid is set */ 6055 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 6056 fp_index = (m->m_pkthdr.flowid % sc->num_queues); 6057 6058 fp = &sc->fp[fp_index]; 6059 6060 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 6061 BLOGW(sc, "Interface not running, ignoring transmit request\n"); 6062 return (ENETDOWN); 6063 } 6064 6065 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { 6066 BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); 6067 return (EBUSY); 6068 } 6069 6070 if (!sc->link_vars.link_up) { 6071 BLOGW(sc, "Interface link is down, ignoring transmit request\n"); 6072 return (ENETDOWN); 6073 } 6074 6075 /* XXX change to TRYLOCK here and if failed then schedule taskqueue */ 6076 6077 BXE_FP_TX_LOCK(fp); 6078 rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); 6079 BXE_FP_TX_UNLOCK(fp); 6080 6081 return (rc); 6082} 6083 6084static void 6085bxe_mq_flush(struct ifnet *ifp) 6086{ 6087 struct bxe_softc *sc = ifp->if_softc; 6088 struct bxe_fastpath *fp; 6089 struct mbuf *m; 6090 int i; 6091 6092 for (i = 0; i < sc->num_queues; i++) { 6093 fp = &sc->fp[i]; 6094 6095 if (fp->state != BXE_FP_STATE_OPEN) { 6096 BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", 6097 fp->index, fp->state); 6098 continue; 6099 } 6100 6101 if (fp->tx_br != NULL) { 6102 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); 6103 BXE_FP_TX_LOCK(fp); 6104 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6105 m_freem(m); 6106 } 6107 BXE_FP_TX_UNLOCK(fp); 6108 } 6109 } 6110 6111 if_qflush(ifp); 6112} 6113 6114#endif /* FreeBSD_version >= 800000 */ 6115 6116static uint16_t 6117bxe_cid_ilt_lines(struct bxe_softc *sc) 6118{ 6119 if (IS_SRIOV(sc)) { 6120 return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); 6121 } 6122 return (L2_ILT_LINES(sc)); 6123} 6124 6125static void 6126bxe_ilt_set_info(struct bxe_softc *sc) 6127{ 6128 struct ilt_client_info *ilt_client; 6129 struct ecore_ilt *ilt = sc->ilt; 6130 uint16_t line = 0; 6131 6132 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); 6133 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); 6134 6135 /* CDU */ 6136 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 6137 ilt_client->client_num = ILT_CLIENT_CDU; 6138 ilt_client->page_size = CDU_ILT_PAGE_SZ; 6139 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 6140 ilt_client->start = line; 6141 line += bxe_cid_ilt_lines(sc); 6142 6143 if (CNIC_SUPPORT(sc)) { 6144 line += CNIC_ILT_LINES; 6145 } 6146 6147 ilt_client->end = (line - 1); 6148 6149 BLOGD(sc, DBG_LOAD, 6150 "ilt client[CDU]: start %d, end %d, " 6151 "psz 0x%x, flags 0x%x, hw psz %d\n", 6152 ilt_client->start, ilt_client->end, 6153 ilt_client->page_size, 6154 ilt_client->flags, 6155 ilog2(ilt_client->page_size >> 12)); 6156 6157 /* QM */ 6158 if (QM_INIT(sc->qm_cid_count)) { 6159 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 6160 ilt_client->client_num = ILT_CLIENT_QM; 6161 ilt_client->page_size = QM_ILT_PAGE_SZ; 6162 ilt_client->flags = 0; 6163 ilt_client->start = line; 6164 6165 /* 4 bytes for each cid */ 6166 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 6167 QM_ILT_PAGE_SZ); 6168 6169 ilt_client->end = (line - 1); 6170 6171 BLOGD(sc, DBG_LOAD, 6172 "ilt client[QM]: start %d, end %d, " 6173 "psz 0x%x, flags 0x%x, hw psz %d\n", 6174 ilt_client->start, ilt_client->end, 6175 ilt_client->page_size, ilt_client->flags, 6176 ilog2(ilt_client->page_size >> 12)); 6177 } 6178 6179 if (CNIC_SUPPORT(sc)) { 6180 /* SRC */ 6181 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 6182 ilt_client->client_num = ILT_CLIENT_SRC; 6183 ilt_client->page_size = SRC_ILT_PAGE_SZ; 6184 ilt_client->flags = 0; 6185 ilt_client->start = line; 6186 line += SRC_ILT_LINES; 6187 ilt_client->end = (line - 1); 6188 6189 BLOGD(sc, DBG_LOAD, 6190 "ilt client[SRC]: start %d, end %d, " 6191 "psz 0x%x, flags 0x%x, hw psz %d\n", 6192 ilt_client->start, ilt_client->end, 6193 ilt_client->page_size, ilt_client->flags, 6194 ilog2(ilt_client->page_size >> 12)); 6195 6196 /* TM */ 6197 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 6198 ilt_client->client_num = ILT_CLIENT_TM; 6199 ilt_client->page_size = TM_ILT_PAGE_SZ; 6200 ilt_client->flags = 0; 6201 ilt_client->start = line; 6202 line += TM_ILT_LINES; 6203 ilt_client->end = (line - 1); 6204 6205 BLOGD(sc, DBG_LOAD, 6206 "ilt client[TM]: start %d, end %d, " 6207 "psz 0x%x, flags 0x%x, hw psz %d\n", 6208 ilt_client->start, ilt_client->end, 6209 ilt_client->page_size, ilt_client->flags, 6210 ilog2(ilt_client->page_size >> 12)); 6211 } 6212 6213 KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); 6214} 6215 6216static void 6217bxe_set_fp_rx_buf_size(struct bxe_softc *sc) 6218{ 6219 int i; 6220 6221 BLOGD(sc, DBG_LOAD, "mtu = %d\n", sc->mtu); 6222 6223 for (i = 0; i < sc->num_queues; i++) { 6224 /* get the Rx buffer size for RX frames */ 6225 sc->fp[i].rx_buf_size = 6226 (IP_HEADER_ALIGNMENT_PADDING + 6227 ETH_OVERHEAD + 6228 sc->mtu); 6229 6230 BLOGD(sc, DBG_LOAD, "rx_buf_size for fp[%02d] = %d\n", 6231 i, sc->fp[i].rx_buf_size); 6232 6233 /* get the mbuf allocation size for RX frames */ 6234 if (sc->fp[i].rx_buf_size <= MCLBYTES) { 6235 sc->fp[i].mbuf_alloc_size = MCLBYTES; 6236 } else if (sc->fp[i].rx_buf_size <= BCM_PAGE_SIZE) { 6237 sc->fp[i].mbuf_alloc_size = PAGE_SIZE; 6238 } else { 6239 sc->fp[i].mbuf_alloc_size = MJUM9BYTES; 6240 } 6241 6242 BLOGD(sc, DBG_LOAD, "mbuf_alloc_size for fp[%02d] = %d\n", 6243 i, sc->fp[i].mbuf_alloc_size); 6244 } 6245} 6246 6247static int 6248bxe_alloc_ilt_mem(struct bxe_softc *sc) 6249{ 6250 int rc = 0; 6251 6252 if ((sc->ilt = 6253 (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), 6254 M_BXE_ILT, 6255 (M_NOWAIT | M_ZERO))) == NULL) { 6256 rc = 1; 6257 } 6258 6259 return (rc); 6260} 6261 6262static int 6263bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) 6264{ 6265 int rc = 0; 6266 6267 if ((sc->ilt->lines = 6268 (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), 6269 M_BXE_ILT, 6270 (M_NOWAIT | M_ZERO))) == NULL) { 6271 rc = 1; 6272 } 6273 6274 return (rc); 6275} 6276 6277static void 6278bxe_free_ilt_mem(struct bxe_softc *sc) 6279{ 6280 if (sc->ilt != NULL) { 6281 free(sc->ilt, M_BXE_ILT); 6282 sc->ilt = NULL; 6283 } 6284} 6285 6286static void 6287bxe_free_ilt_lines_mem(struct bxe_softc *sc) 6288{ 6289 if (sc->ilt->lines != NULL) { 6290 free(sc->ilt->lines, M_BXE_ILT); 6291 sc->ilt->lines = NULL; 6292 } 6293} 6294 6295static void 6296bxe_free_mem(struct bxe_softc *sc) 6297{ 6298 int i; 6299 6300#if 0 6301 if (!CONFIGURE_NIC_MODE(sc)) { 6302 /* free searcher T2 table */ 6303 bxe_dma_free(sc, &sc->t2); 6304 } 6305#endif 6306 6307 for (i = 0; i < L2_ILT_LINES(sc); i++) { 6308 bxe_dma_free(sc, &sc->context[i].vcxt_dma); 6309 sc->context[i].vcxt = NULL; 6310 sc->context[i].size = 0; 6311 } 6312 6313 ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); 6314 6315 bxe_free_ilt_lines_mem(sc); 6316 6317#if 0 6318 bxe_iov_free_mem(sc); 6319#endif 6320} 6321 6322static int 6323bxe_alloc_mem(struct bxe_softc *sc) 6324{ 6325 int context_size; 6326 int allocated; 6327 int i; 6328 6329#if 0 6330 if (!CONFIGURE_NIC_MODE(sc)) { 6331 /* allocate searcher T2 table */ 6332 if (bxe_dma_alloc(sc, SRC_T2_SZ, 6333 &sc->t2, "searcher t2 table") != 0) { 6334 return (-1); 6335 } 6336 } 6337#endif 6338 6339 /* 6340 * Allocate memory for CDU context: 6341 * This memory is allocated separately and not in the generic ILT 6342 * functions because CDU differs in few aspects: 6343 * 1. There can be multiple entities allocating memory for context - 6344 * regular L2, CNIC, and SRIOV drivers. Each separately controls 6345 * its own ILT lines. 6346 * 2. Since CDU page-size is not a single 4KB page (which is the case 6347 * for the other ILT clients), to be efficient we want to support 6348 * allocation of sub-page-size in the last entry. 6349 * 3. Context pointers are used by the driver to pass to FW / update 6350 * the context (for the other ILT clients the pointers are used just to 6351 * free the memory during unload). 6352 */ 6353 context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); 6354 for (i = 0, allocated = 0; allocated < context_size; i++) { 6355 sc->context[i].size = min(CDU_ILT_PAGE_SZ, 6356 (context_size - allocated)); 6357 6358 if (bxe_dma_alloc(sc, sc->context[i].size, 6359 &sc->context[i].vcxt_dma, 6360 "cdu context") != 0) { 6361 bxe_free_mem(sc); 6362 return (-1); 6363 } 6364 6365 sc->context[i].vcxt = 6366 (union cdu_context *)sc->context[i].vcxt_dma.vaddr; 6367 6368 allocated += sc->context[i].size; 6369 } 6370 6371 bxe_alloc_ilt_lines_mem(sc); 6372 6373 BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", 6374 sc->ilt, sc->ilt->start_line, sc->ilt->lines); 6375 { 6376 for (i = 0; i < 4; i++) { 6377 BLOGD(sc, DBG_LOAD, 6378 "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", 6379 i, 6380 sc->ilt->clients[i].page_size, 6381 sc->ilt->clients[i].start, 6382 sc->ilt->clients[i].end, 6383 sc->ilt->clients[i].client_num, 6384 sc->ilt->clients[i].flags); 6385 } 6386 } 6387 if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { 6388 BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); 6389 bxe_free_mem(sc); 6390 return (-1); 6391 } 6392 6393#if 0 6394 if (bxe_iov_alloc_mem(sc)) { 6395 BLOGE(sc, "Failed to allocate memory for SRIOV\n"); 6396 bxe_free_mem(sc); 6397 return (-1); 6398 } 6399#endif 6400 6401 return (0); 6402} 6403 6404static void 6405bxe_free_rx_bd_chain(struct bxe_fastpath *fp) 6406{ 6407 struct bxe_softc *sc; 6408 int i; 6409 6410 sc = fp->sc; 6411 6412 if (fp->rx_mbuf_tag == NULL) { 6413 return; 6414 } 6415 6416 /* free all mbufs and unload all maps */ 6417 for (i = 0; i < RX_BD_TOTAL; i++) { 6418 if (fp->rx_mbuf_chain[i].m_map != NULL) { 6419 bus_dmamap_sync(fp->rx_mbuf_tag, 6420 fp->rx_mbuf_chain[i].m_map, 6421 BUS_DMASYNC_POSTREAD); 6422 bus_dmamap_unload(fp->rx_mbuf_tag, 6423 fp->rx_mbuf_chain[i].m_map); 6424 } 6425 6426 if (fp->rx_mbuf_chain[i].m != NULL) { 6427 m_freem(fp->rx_mbuf_chain[i].m); 6428 fp->rx_mbuf_chain[i].m = NULL; 6429 fp->eth_q_stats.mbuf_alloc_rx--; 6430 } 6431 } 6432} 6433 6434static void 6435bxe_free_tpa_pool(struct bxe_fastpath *fp) 6436{ 6437 struct bxe_softc *sc; 6438 int i, max_agg_queues; 6439 6440 sc = fp->sc; 6441 6442 if (fp->rx_mbuf_tag == NULL) { 6443 return; 6444 } 6445 6446 max_agg_queues = MAX_AGG_QS(sc); 6447 6448 /* release all mbufs and unload all DMA maps in the TPA pool */ 6449 for (i = 0; i < max_agg_queues; i++) { 6450 if (fp->rx_tpa_info[i].bd.m_map != NULL) { 6451 bus_dmamap_sync(fp->rx_mbuf_tag, 6452 fp->rx_tpa_info[i].bd.m_map, 6453 BUS_DMASYNC_POSTREAD); 6454 bus_dmamap_unload(fp->rx_mbuf_tag, 6455 fp->rx_tpa_info[i].bd.m_map); 6456 } 6457 6458 if (fp->rx_tpa_info[i].bd.m != NULL) { 6459 m_freem(fp->rx_tpa_info[i].bd.m); 6460 fp->rx_tpa_info[i].bd.m = NULL; 6461 fp->eth_q_stats.mbuf_alloc_tpa--; 6462 } 6463 } 6464} 6465 6466static void 6467bxe_free_sge_chain(struct bxe_fastpath *fp) 6468{ 6469 struct bxe_softc *sc; 6470 int i; 6471 6472 sc = fp->sc; 6473 6474 if (fp->rx_sge_mbuf_tag == NULL) { 6475 return; 6476 } 6477 6478 /* rree all mbufs and unload all maps */ 6479 for (i = 0; i < RX_SGE_TOTAL; i++) { 6480 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { 6481 bus_dmamap_sync(fp->rx_sge_mbuf_tag, 6482 fp->rx_sge_mbuf_chain[i].m_map, 6483 BUS_DMASYNC_POSTREAD); 6484 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 6485 fp->rx_sge_mbuf_chain[i].m_map); 6486 } 6487 6488 if (fp->rx_sge_mbuf_chain[i].m != NULL) { 6489 m_freem(fp->rx_sge_mbuf_chain[i].m); 6490 fp->rx_sge_mbuf_chain[i].m = NULL; 6491 fp->eth_q_stats.mbuf_alloc_sge--; 6492 } 6493 } 6494} 6495 6496static void 6497bxe_free_fp_buffers(struct bxe_softc *sc) 6498{ 6499 struct bxe_fastpath *fp; 6500 int i; 6501 6502 for (i = 0; i < sc->num_queues; i++) { 6503 fp = &sc->fp[i]; 6504 6505#if __FreeBSD_version >= 800000 6506 if (fp->tx_br != NULL) { 6507 struct mbuf *m; 6508 /* just in case bxe_mq_flush() wasn't called */ 6509 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { 6510 m_freem(m); 6511 } 6512 buf_ring_free(fp->tx_br, M_DEVBUF); 6513 fp->tx_br = NULL; 6514 } 6515#endif 6516 6517 /* free all RX buffers */ 6518 bxe_free_rx_bd_chain(fp); 6519 bxe_free_tpa_pool(fp); 6520 bxe_free_sge_chain(fp); 6521 6522 if (fp->eth_q_stats.mbuf_alloc_rx != 0) { 6523 BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", 6524 fp->eth_q_stats.mbuf_alloc_rx); 6525 } 6526 6527 if (fp->eth_q_stats.mbuf_alloc_sge != 0) { 6528 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6529 fp->eth_q_stats.mbuf_alloc_sge); 6530 } 6531 6532 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { 6533 BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", 6534 fp->eth_q_stats.mbuf_alloc_tpa); 6535 } 6536 6537 if (fp->eth_q_stats.mbuf_alloc_tx != 0) { 6538 BLOGE(sc, "failed to release tx mbufs (%d left)\n", 6539 fp->eth_q_stats.mbuf_alloc_tx); 6540 } 6541 6542 /* XXX verify all mbufs were reclaimed */ 6543 6544 if (mtx_initialized(&fp->tx_mtx)) { 6545 mtx_destroy(&fp->tx_mtx); 6546 } 6547 6548 if (mtx_initialized(&fp->rx_mtx)) { 6549 mtx_destroy(&fp->rx_mtx); 6550 } 6551 } 6552} 6553 6554static int 6555bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, 6556 uint16_t prev_index, 6557 uint16_t index) 6558{ 6559 struct bxe_sw_rx_bd *rx_buf; 6560 struct eth_rx_bd *rx_bd; 6561 bus_dma_segment_t segs[1]; 6562 bus_dmamap_t map; 6563 struct mbuf *m; 6564 int nsegs, rc; 6565 6566 rc = 0; 6567 6568 /* allocate the new RX BD mbuf */ 6569 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6570 if (__predict_false(m == NULL)) { 6571 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; 6572 return (ENOBUFS); 6573 } 6574 6575 fp->eth_q_stats.mbuf_alloc_rx++; 6576 6577 /* initialize the mbuf buffer length */ 6578 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6579 6580 /* map the mbuf into non-paged pool */ 6581 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6582 fp->rx_mbuf_spare_map, 6583 m, segs, &nsegs, BUS_DMA_NOWAIT); 6584 if (__predict_false(rc != 0)) { 6585 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; 6586 m_freem(m); 6587 fp->eth_q_stats.mbuf_alloc_rx--; 6588 return (rc); 6589 } 6590 6591 /* all mbufs must map to a single segment */ 6592 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6593 6594 /* release any existing RX BD mbuf mappings */ 6595 6596 if (prev_index != index) { 6597 rx_buf = &fp->rx_mbuf_chain[prev_index]; 6598 6599 if (rx_buf->m_map != NULL) { 6600 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6601 BUS_DMASYNC_POSTREAD); 6602 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6603 } 6604 6605 /* 6606 * We only get here from bxe_rxeof() when the maximum number 6607 * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already 6608 * holds the mbuf in the prev_index so it's OK to NULL it out 6609 * here without concern of a memory leak. 6610 */ 6611 fp->rx_mbuf_chain[prev_index].m = NULL; 6612 } 6613 6614 rx_buf = &fp->rx_mbuf_chain[index]; 6615 6616 if (rx_buf->m_map != NULL) { 6617 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6618 BUS_DMASYNC_POSTREAD); 6619 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); 6620 } 6621 6622 /* save the mbuf and mapping info for a future packet */ 6623 map = (prev_index != index) ? 6624 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; 6625 rx_buf->m_map = fp->rx_mbuf_spare_map; 6626 fp->rx_mbuf_spare_map = map; 6627 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, 6628 BUS_DMASYNC_PREREAD); 6629 rx_buf->m = m; 6630 6631 rx_bd = &fp->rx_chain[index]; 6632 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6633 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6634 6635 return (rc); 6636} 6637 6638static int 6639bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, 6640 int queue) 6641{ 6642 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; 6643 bus_dma_segment_t segs[1]; 6644 bus_dmamap_t map; 6645 struct mbuf *m; 6646 int nsegs; 6647 int rc = 0; 6648 6649 /* allocate the new TPA mbuf */ 6650 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); 6651 if (__predict_false(m == NULL)) { 6652 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; 6653 return (ENOBUFS); 6654 } 6655 6656 fp->eth_q_stats.mbuf_alloc_tpa++; 6657 6658 /* initialize the mbuf buffer length */ 6659 m->m_pkthdr.len = m->m_len = fp->rx_buf_size; 6660 6661 /* map the mbuf into non-paged pool */ 6662 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, 6663 fp->rx_tpa_info_mbuf_spare_map, 6664 m, segs, &nsegs, BUS_DMA_NOWAIT); 6665 if (__predict_false(rc != 0)) { 6666 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; 6667 m_free(m); 6668 fp->eth_q_stats.mbuf_alloc_tpa--; 6669 return (rc); 6670 } 6671 6672 /* all mbufs must map to a single segment */ 6673 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6674 6675 /* release any existing TPA mbuf mapping */ 6676 if (tpa_info->bd.m_map != NULL) { 6677 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6678 BUS_DMASYNC_POSTREAD); 6679 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); 6680 } 6681 6682 /* save the mbuf and mapping info for the TPA mbuf */ 6683 map = tpa_info->bd.m_map; 6684 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; 6685 fp->rx_tpa_info_mbuf_spare_map = map; 6686 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, 6687 BUS_DMASYNC_PREREAD); 6688 tpa_info->bd.m = m; 6689 tpa_info->seg = segs[0]; 6690 6691 return (rc); 6692} 6693 6694/* 6695 * Allocate an mbuf and assign it to the receive scatter gather chain. The 6696 * caller must take care to save a copy of the existing mbuf in the SG mbuf 6697 * chain. 6698 */ 6699static int 6700bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, 6701 uint16_t index) 6702{ 6703 struct bxe_sw_rx_bd *sge_buf; 6704 struct eth_rx_sge *sge; 6705 bus_dma_segment_t segs[1]; 6706 bus_dmamap_t map; 6707 struct mbuf *m; 6708 int nsegs; 6709 int rc = 0; 6710 6711 /* allocate a new SGE mbuf */ 6712 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); 6713 if (__predict_false(m == NULL)) { 6714 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; 6715 return (ENOMEM); 6716 } 6717 6718 fp->eth_q_stats.mbuf_alloc_sge++; 6719 6720 /* initialize the mbuf buffer length */ 6721 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; 6722 6723 /* map the SGE mbuf into non-paged pool */ 6724 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, 6725 fp->rx_sge_mbuf_spare_map, 6726 m, segs, &nsegs, BUS_DMA_NOWAIT); 6727 if (__predict_false(rc != 0)) { 6728 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; 6729 m_freem(m); 6730 fp->eth_q_stats.mbuf_alloc_sge--; 6731 return (rc); 6732 } 6733 6734 /* all mbufs must map to a single segment */ 6735 KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); 6736 6737 sge_buf = &fp->rx_sge_mbuf_chain[index]; 6738 6739 /* release any existing SGE mbuf mapping */ 6740 if (sge_buf->m_map != NULL) { 6741 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6742 BUS_DMASYNC_POSTREAD); 6743 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); 6744 } 6745 6746 /* save the mbuf and mapping info for a future packet */ 6747 map = sge_buf->m_map; 6748 sge_buf->m_map = fp->rx_sge_mbuf_spare_map; 6749 fp->rx_sge_mbuf_spare_map = map; 6750 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, 6751 BUS_DMASYNC_PREREAD); 6752 sge_buf->m = m; 6753 6754 sge = &fp->rx_sge_chain[index]; 6755 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); 6756 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); 6757 6758 return (rc); 6759} 6760 6761static __noinline int 6762bxe_alloc_fp_buffers(struct bxe_softc *sc) 6763{ 6764 struct bxe_fastpath *fp; 6765 int i, j, rc = 0; 6766 int ring_prod, cqe_ring_prod; 6767 int max_agg_queues; 6768 6769 for (i = 0; i < sc->num_queues; i++) { 6770 fp = &sc->fp[i]; 6771 6772#if __FreeBSD_version >= 800000 6773 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, 6774 M_DONTWAIT, &fp->tx_mtx); 6775 if (fp->tx_br == NULL) { 6776 BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i); 6777 goto bxe_alloc_fp_buffers_error; 6778 } 6779#endif 6780 6781 ring_prod = cqe_ring_prod = 0; 6782 fp->rx_bd_cons = 0; 6783 fp->rx_cq_cons = 0; 6784 6785 /* allocate buffers for the RX BDs in RX BD chain */ 6786 for (j = 0; j < sc->max_rx_bufs; j++) { 6787 rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); 6788 if (rc != 0) { 6789 BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", 6790 i, rc); 6791 goto bxe_alloc_fp_buffers_error; 6792 } 6793 6794 ring_prod = RX_BD_NEXT(ring_prod); 6795 cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); 6796 } 6797 6798 fp->rx_bd_prod = ring_prod; 6799 fp->rx_cq_prod = cqe_ring_prod; 6800 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; 6801 6802 if (sc->ifnet->if_capenable & IFCAP_LRO) { 6803 max_agg_queues = MAX_AGG_QS(sc); 6804 6805 fp->tpa_enable = TRUE; 6806 6807 /* fill the TPA pool */ 6808 for (j = 0; j < max_agg_queues; j++) { 6809 rc = bxe_alloc_rx_tpa_mbuf(fp, j); 6810 if (rc != 0) { 6811 BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", 6812 i, j); 6813 fp->tpa_enable = FALSE; 6814 goto bxe_alloc_fp_buffers_error; 6815 } 6816 6817 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; 6818 } 6819 6820 if (fp->tpa_enable) { 6821 /* fill the RX SGE chain */ 6822 ring_prod = 0; 6823 for (j = 0; j < RX_SGE_USABLE; j++) { 6824 rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); 6825 if (rc != 0) { 6826 BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", 6827 i, ring_prod); 6828 fp->tpa_enable = FALSE; 6829 ring_prod = 0; 6830 goto bxe_alloc_fp_buffers_error; 6831 } 6832 6833 ring_prod = RX_SGE_NEXT(ring_prod); 6834 } 6835 6836 fp->rx_sge_prod = ring_prod; 6837 } 6838 } 6839 } 6840 6841 return (0); 6842 6843bxe_alloc_fp_buffers_error: 6844 6845 /* unwind what was already allocated */ 6846 bxe_free_rx_bd_chain(fp); 6847 bxe_free_tpa_pool(fp); 6848 bxe_free_sge_chain(fp); 6849 6850 return (ENOBUFS); 6851} 6852 6853static void 6854bxe_free_fw_stats_mem(struct bxe_softc *sc) 6855{ 6856 bxe_dma_free(sc, &sc->fw_stats_dma); 6857 6858 sc->fw_stats_num = 0; 6859 6860 sc->fw_stats_req_size = 0; 6861 sc->fw_stats_req = NULL; 6862 sc->fw_stats_req_mapping = 0; 6863 6864 sc->fw_stats_data_size = 0; 6865 sc->fw_stats_data = NULL; 6866 sc->fw_stats_data_mapping = 0; 6867} 6868 6869static int 6870bxe_alloc_fw_stats_mem(struct bxe_softc *sc) 6871{ 6872 uint8_t num_queue_stats; 6873 int num_groups; 6874 6875 /* number of queues for statistics is number of eth queues */ 6876 num_queue_stats = BXE_NUM_ETH_QUEUES(sc); 6877 6878 /* 6879 * Total number of FW statistics requests = 6880 * 1 for port stats + 1 for PF stats + num of queues 6881 */ 6882 sc->fw_stats_num = (2 + num_queue_stats); 6883 6884 /* 6885 * Request is built from stats_query_header and an array of 6886 * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT 6887 * rules. The real number or requests is configured in the 6888 * stats_query_header. 6889 */ 6890 num_groups = 6891 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + 6892 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); 6893 6894 BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", 6895 sc->fw_stats_num, num_groups); 6896 6897 sc->fw_stats_req_size = 6898 (sizeof(struct stats_query_header) + 6899 (num_groups * sizeof(struct stats_query_cmd_group))); 6900 6901 /* 6902 * Data for statistics requests + stats_counter. 6903 * stats_counter holds per-STORM counters that are incremented when 6904 * STORM has finished with the current request. Memory for FCoE 6905 * offloaded statistics are counted anyway, even if they will not be sent. 6906 * VF stats are not accounted for here as the data of VF stats is stored 6907 * in memory allocated by the VF, not here. 6908 */ 6909 sc->fw_stats_data_size = 6910 (sizeof(struct stats_counter) + 6911 sizeof(struct per_port_stats) + 6912 sizeof(struct per_pf_stats) + 6913 /* sizeof(struct fcoe_statistics_params) + */ 6914 (sizeof(struct per_queue_stats) * num_queue_stats)); 6915 6916 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), 6917 &sc->fw_stats_dma, "fw stats") != 0) { 6918 bxe_free_fw_stats_mem(sc); 6919 return (-1); 6920 } 6921 6922 /* set up the shortcuts */ 6923 6924 sc->fw_stats_req = 6925 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; 6926 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; 6927 6928 sc->fw_stats_data = 6929 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + 6930 sc->fw_stats_req_size); 6931 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + 6932 sc->fw_stats_req_size); 6933 6934 BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", 6935 (uintmax_t)sc->fw_stats_req_mapping); 6936 6937 BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", 6938 (uintmax_t)sc->fw_stats_data_mapping); 6939 6940 return (0); 6941} 6942 6943/* 6944 * Bits map: 6945 * 0-7 - Engine0 load counter. 6946 * 8-15 - Engine1 load counter. 6947 * 16 - Engine0 RESET_IN_PROGRESS bit. 6948 * 17 - Engine1 RESET_IN_PROGRESS bit. 6949 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active 6950 * function on the engine 6951 * 19 - Engine1 ONE_IS_LOADED. 6952 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 6953 * leader to complete (check for both RESET_IN_PROGRESS bits and not 6954 * for just the one belonging to its engine). 6955 */ 6956#define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 6957#define BXE_PATH0_LOAD_CNT_MASK 0x000000ff 6958#define BXE_PATH0_LOAD_CNT_SHIFT 0 6959#define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 6960#define BXE_PATH1_LOAD_CNT_SHIFT 8 6961#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 6962#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 6963#define BXE_GLOBAL_RESET_BIT 0x00040000 6964 6965/* set the GLOBAL_RESET bit, should be run under rtnl lock */ 6966static void 6967bxe_set_reset_global(struct bxe_softc *sc) 6968{ 6969 uint32_t val; 6970 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6971 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6972 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); 6973 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6974} 6975 6976/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ 6977static void 6978bxe_clear_reset_global(struct bxe_softc *sc) 6979{ 6980 uint32_t val; 6981 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6982 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6983 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); 6984 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 6985} 6986 6987/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ 6988static uint8_t 6989bxe_reset_is_global(struct bxe_softc *sc) 6990{ 6991 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 6992 BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); 6993 return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; 6994} 6995 6996/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ 6997static void 6998bxe_set_reset_done(struct bxe_softc *sc) 6999{ 7000 uint32_t val; 7001 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7002 BXE_PATH0_RST_IN_PROG_BIT; 7003 7004 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7005 7006 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7007 /* Clear the bit */ 7008 val &= ~bit; 7009 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7010 7011 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7012} 7013 7014/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ 7015static void 7016bxe_set_reset_in_progress(struct bxe_softc *sc) 7017{ 7018 uint32_t val; 7019 uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : 7020 BXE_PATH0_RST_IN_PROG_BIT; 7021 7022 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7023 7024 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7025 /* Set the bit */ 7026 val |= bit; 7027 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7028 7029 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7030} 7031 7032/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ 7033static uint8_t 7034bxe_reset_is_done(struct bxe_softc *sc, 7035 int engine) 7036{ 7037 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7038 uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : 7039 BXE_PATH0_RST_IN_PROG_BIT; 7040 7041 /* return false if bit is set */ 7042 return (val & bit) ? FALSE : TRUE; 7043} 7044 7045/* get the load status for an engine, should be run under rtnl lock */ 7046static uint8_t 7047bxe_get_load_status(struct bxe_softc *sc, 7048 int engine) 7049{ 7050 uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : 7051 BXE_PATH0_LOAD_CNT_MASK; 7052 uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : 7053 BXE_PATH0_LOAD_CNT_SHIFT; 7054 uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7055 7056 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7057 7058 val = ((val & mask) >> shift); 7059 7060 BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); 7061 7062 return (val != 0); 7063} 7064 7065/* set pf load mark */ 7066/* XXX needs to be under rtnl lock */ 7067static void 7068bxe_set_pf_load(struct bxe_softc *sc) 7069{ 7070 uint32_t val; 7071 uint32_t val1; 7072 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7073 BXE_PATH0_LOAD_CNT_MASK; 7074 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7075 BXE_PATH0_LOAD_CNT_SHIFT; 7076 7077 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7078 7079 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7080 BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); 7081 7082 /* get the current counter value */ 7083 val1 = ((val & mask) >> shift); 7084 7085 /* set bit of this PF */ 7086 val1 |= (1 << SC_ABS_FUNC(sc)); 7087 7088 /* clear the old value */ 7089 val &= ~mask; 7090 7091 /* set the new one */ 7092 val |= ((val1 << shift) & mask); 7093 7094 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7095 7096 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7097} 7098 7099/* clear pf load mark */ 7100/* XXX needs to be under rtnl lock */ 7101static uint8_t 7102bxe_clear_pf_load(struct bxe_softc *sc) 7103{ 7104 uint32_t val1, val; 7105 uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : 7106 BXE_PATH0_LOAD_CNT_MASK; 7107 uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : 7108 BXE_PATH0_LOAD_CNT_SHIFT; 7109 7110 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7111 val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); 7112 BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); 7113 7114 /* get the current counter value */ 7115 val1 = (val & mask) >> shift; 7116 7117 /* clear bit of that PF */ 7118 val1 &= ~(1 << SC_ABS_FUNC(sc)); 7119 7120 /* clear the old value */ 7121 val &= ~mask; 7122 7123 /* set the new one */ 7124 val |= ((val1 << shift) & mask); 7125 7126 REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); 7127 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); 7128 return (val1 != 0); 7129} 7130 7131/* send load requrest to mcp and analyze response */ 7132static int 7133bxe_nic_load_request(struct bxe_softc *sc, 7134 uint32_t *load_code) 7135{ 7136 /* init fw_seq */ 7137 sc->fw_seq = 7138 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 7139 DRV_MSG_SEQ_NUMBER_MASK); 7140 7141 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); 7142 7143 /* get the current FW pulse sequence */ 7144 sc->fw_drv_pulse_wr_seq = 7145 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & 7146 DRV_PULSE_SEQ_MASK); 7147 7148 BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", 7149 sc->fw_drv_pulse_wr_seq); 7150 7151 /* load request */ 7152 (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 7153 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 7154 7155 /* if the MCP fails to respond we must abort */ 7156 if (!(*load_code)) { 7157 BLOGE(sc, "MCP response failure!\n"); 7158 return (-1); 7159 } 7160 7161 /* if MCP refused then must abort */ 7162 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { 7163 BLOGE(sc, "MCP refused load request\n"); 7164 return (-1); 7165 } 7166 7167 return (0); 7168} 7169 7170/* 7171 * Check whether another PF has already loaded FW to chip. In virtualized 7172 * environments a pf from anoth VM may have already initialized the device 7173 * including loading FW. 7174 */ 7175static int 7176bxe_nic_load_analyze_req(struct bxe_softc *sc, 7177 uint32_t load_code) 7178{ 7179 uint32_t my_fw, loaded_fw; 7180 7181 /* is another pf loaded on this engine? */ 7182 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 7183 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 7184 /* build my FW version dword */ 7185 my_fw = (BCM_5710_FW_MAJOR_VERSION + 7186 (BCM_5710_FW_MINOR_VERSION << 8 ) + 7187 (BCM_5710_FW_REVISION_VERSION << 16) + 7188 (BCM_5710_FW_ENGINEERING_VERSION << 24)); 7189 7190 /* read loaded FW from chip */ 7191 loaded_fw = REG_RD(sc, XSEM_REG_PRAM); 7192 BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", 7193 loaded_fw, my_fw); 7194 7195 /* abort nic load if version mismatch */ 7196 if (my_fw != loaded_fw) { 7197 BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", 7198 loaded_fw, my_fw); 7199 return (-1); 7200 } 7201 } 7202 7203 return (0); 7204} 7205 7206/* mark PMF if applicable */ 7207static void 7208bxe_nic_load_pmf(struct bxe_softc *sc, 7209 uint32_t load_code) 7210{ 7211 uint32_t ncsi_oem_data_addr; 7212 7213 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 7214 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 7215 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 7216 /* 7217 * Barrier here for ordering between the writing to sc->port.pmf here 7218 * and reading it from the periodic task. 7219 */ 7220 sc->port.pmf = 1; 7221 mb(); 7222 } else { 7223 sc->port.pmf = 0; 7224 } 7225 7226 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); 7227 7228 /* XXX needed? */ 7229 if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { 7230 if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { 7231 ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); 7232 if (ncsi_oem_data_addr) { 7233 REG_WR(sc, 7234 (ncsi_oem_data_addr + 7235 offsetof(struct glob_ncsi_oem_data, driver_version)), 7236 0); 7237 } 7238 } 7239 } 7240} 7241 7242static void 7243bxe_read_mf_cfg(struct bxe_softc *sc) 7244{ 7245 int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); 7246 int abs_func; 7247 int vn; 7248 7249 if (BXE_NOMCP(sc)) { 7250 return; /* what should be the default bvalue in this case */ 7251 } 7252 7253 /* 7254 * The formula for computing the absolute function number is... 7255 * For 2 port configuration (4 functions per port): 7256 * abs_func = 2 * vn + SC_PORT + SC_PATH 7257 * For 4 port configuration (2 functions per port): 7258 * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH 7259 */ 7260 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 7261 abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); 7262 if (abs_func >= E1H_FUNC_MAX) { 7263 break; 7264 } 7265 sc->devinfo.mf_info.mf_config[vn] = 7266 MFCFG_RD(sc, func_mf_config[abs_func].config); 7267 } 7268 7269 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & 7270 FUNC_MF_CFG_FUNC_DISABLED) { 7271 BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); 7272 sc->flags |= BXE_MF_FUNC_DIS; 7273 } else { 7274 BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); 7275 sc->flags &= ~BXE_MF_FUNC_DIS; 7276 } 7277} 7278 7279/* acquire split MCP access lock register */ 7280static int bxe_acquire_alr(struct bxe_softc *sc) 7281{ 7282 uint32_t j, val; 7283 7284 for (j = 0; j < 1000; j++) { 7285 val = (1UL << 31); 7286 REG_WR(sc, GRCBASE_MCP + 0x9c, val); 7287 val = REG_RD(sc, GRCBASE_MCP + 0x9c); 7288 if (val & (1L << 31)) 7289 break; 7290 7291 DELAY(5000); 7292 } 7293 7294 if (!(val & (1L << 31))) { 7295 BLOGE(sc, "Cannot acquire MCP access lock register\n"); 7296 return (-1); 7297 } 7298 7299 return (0); 7300} 7301 7302/* release split MCP access lock register */ 7303static void bxe_release_alr(struct bxe_softc *sc) 7304{ 7305 REG_WR(sc, GRCBASE_MCP + 0x9c, 0); 7306} 7307 7308static void 7309bxe_fan_failure(struct bxe_softc *sc) 7310{ 7311 int port = SC_PORT(sc); 7312 uint32_t ext_phy_config; 7313 7314 /* mark the failure */ 7315 ext_phy_config = 7316 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 7317 7318 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 7319 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 7320 SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, 7321 ext_phy_config); 7322 7323 /* log the failure */ 7324 BLOGW(sc, "Fan Failure has caused the driver to shutdown " 7325 "the card to prevent permanent damage. " 7326 "Please contact OEM Support for assistance\n"); 7327 7328 /* XXX */ 7329#if 1 7330 bxe_panic(sc, ("Schedule task to handle fan failure\n")); 7331#else 7332 /* 7333 * Schedule device reset (unload) 7334 * This is due to some boards consuming sufficient power when driver is 7335 * up to overheat if fan fails. 7336 */ 7337 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); 7338 schedule_delayed_work(&sc->sp_rtnl_task, 0); 7339#endif 7340} 7341 7342/* this function is called upon a link interrupt */ 7343static void 7344bxe_link_attn(struct bxe_softc *sc) 7345{ 7346 uint32_t pause_enabled = 0; 7347 struct host_port_stats *pstats; 7348 int cmng_fns; 7349 7350 /* Make sure that we are synced with the current statistics */ 7351 bxe_stats_handle(sc, STATS_EVENT_STOP); 7352 7353 elink_link_update(&sc->link_params, &sc->link_vars); 7354 7355 if (sc->link_vars.link_up) { 7356 7357 /* dropless flow control */ 7358 if (!CHIP_IS_E1(sc) && sc->dropless_fc) { 7359 pause_enabled = 0; 7360 7361 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 7362 pause_enabled = 1; 7363 } 7364 7365 REG_WR(sc, 7366 (BAR_USTRORM_INTMEM + 7367 USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), 7368 pause_enabled); 7369 } 7370 7371 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { 7372 pstats = BXE_SP(sc, port_stats); 7373 /* reset old mac stats */ 7374 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); 7375 } 7376 7377 if (sc->state == BXE_STATE_OPEN) { 7378 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 7379 } 7380 } 7381 7382 if (sc->link_vars.link_up && sc->link_vars.line_speed) { 7383 cmng_fns = bxe_get_cmng_fns_mode(sc); 7384 7385 if (cmng_fns != CMNG_FNS_NONE) { 7386 bxe_cmng_fns_init(sc, FALSE, cmng_fns); 7387 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7388 } else { 7389 /* rate shaping and fairness are disabled */ 7390 BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); 7391 } 7392 } 7393 7394 bxe_link_report_locked(sc); 7395 7396 if (IS_MF(sc)) { 7397 ; // XXX bxe_link_sync_notify(sc); 7398 } 7399} 7400 7401static void 7402bxe_attn_int_asserted(struct bxe_softc *sc, 7403 uint32_t asserted) 7404{ 7405 int port = SC_PORT(sc); 7406 uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 7407 MISC_REG_AEU_MASK_ATTN_FUNC_0; 7408 uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 7409 NIG_REG_MASK_INTERRUPT_PORT0; 7410 uint32_t aeu_mask; 7411 uint32_t nig_mask = 0; 7412 uint32_t reg_addr; 7413 uint32_t igu_acked; 7414 uint32_t cnt; 7415 7416 if (sc->attn_state & asserted) { 7417 BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); 7418 } 7419 7420 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7421 7422 aeu_mask = REG_RD(sc, aeu_addr); 7423 7424 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", 7425 aeu_mask, asserted); 7426 7427 aeu_mask &= ~(asserted & 0x3ff); 7428 7429 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 7430 7431 REG_WR(sc, aeu_addr, aeu_mask); 7432 7433 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 7434 7435 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 7436 sc->attn_state |= asserted; 7437 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 7438 7439 if (asserted & ATTN_HARD_WIRED_MASK) { 7440 if (asserted & ATTN_NIG_FOR_FUNC) { 7441 7442 BXE_PHY_LOCK(sc); 7443 7444 /* save nig interrupt mask */ 7445 nig_mask = REG_RD(sc, nig_int_mask_addr); 7446 7447 /* If nig_mask is not set, no need to call the update function */ 7448 if (nig_mask) { 7449 REG_WR(sc, nig_int_mask_addr, 0); 7450 7451 bxe_link_attn(sc); 7452 } 7453 7454 /* handle unicore attn? */ 7455 } 7456 7457 if (asserted & ATTN_SW_TIMER_4_FUNC) { 7458 BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); 7459 } 7460 7461 if (asserted & GPIO_2_FUNC) { 7462 BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); 7463 } 7464 7465 if (asserted & GPIO_3_FUNC) { 7466 BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); 7467 } 7468 7469 if (asserted & GPIO_4_FUNC) { 7470 BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); 7471 } 7472 7473 if (port == 0) { 7474 if (asserted & ATTN_GENERAL_ATTN_1) { 7475 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); 7476 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 7477 } 7478 if (asserted & ATTN_GENERAL_ATTN_2) { 7479 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); 7480 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 7481 } 7482 if (asserted & ATTN_GENERAL_ATTN_3) { 7483 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); 7484 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 7485 } 7486 } else { 7487 if (asserted & ATTN_GENERAL_ATTN_4) { 7488 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); 7489 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 7490 } 7491 if (asserted & ATTN_GENERAL_ATTN_5) { 7492 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); 7493 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 7494 } 7495 if (asserted & ATTN_GENERAL_ATTN_6) { 7496 BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); 7497 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 7498 } 7499 } 7500 } /* hardwired */ 7501 7502 if (sc->devinfo.int_block == INT_BLOCK_HC) { 7503 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); 7504 } else { 7505 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 7506 } 7507 7508 BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", 7509 asserted, 7510 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 7511 REG_WR(sc, reg_addr, asserted); 7512 7513 /* now set back the mask */ 7514 if (asserted & ATTN_NIG_FOR_FUNC) { 7515 /* 7516 * Verify that IGU ack through BAR was written before restoring 7517 * NIG mask. This loop should exit after 2-3 iterations max. 7518 */ 7519 if (sc->devinfo.int_block != INT_BLOCK_HC) { 7520 cnt = 0; 7521 7522 do { 7523 igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); 7524 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 7525 (++cnt < MAX_IGU_ATTN_ACK_TO)); 7526 7527 if (!igu_acked) { 7528 BLOGE(sc, "Failed to verify IGU ack on time\n"); 7529 } 7530 7531 mb(); 7532 } 7533 7534 REG_WR(sc, nig_int_mask_addr, nig_mask); 7535 7536 BXE_PHY_UNLOCK(sc); 7537 } 7538} 7539 7540static void 7541bxe_print_next_block(struct bxe_softc *sc, 7542 int idx, 7543 const char *blk) 7544{ 7545 BLOGI(sc, "%s%s", idx ? ", " : "", blk); 7546} 7547 7548static int 7549bxe_check_blocks_with_parity0(struct bxe_softc *sc, 7550 uint32_t sig, 7551 int par_num, 7552 uint8_t print) 7553{ 7554 uint32_t cur_bit = 0; 7555 int i = 0; 7556 7557 for (i = 0; sig; i++) { 7558 cur_bit = ((uint32_t)0x1 << i); 7559 if (sig & cur_bit) { 7560 switch (cur_bit) { 7561 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 7562 if (print) 7563 bxe_print_next_block(sc, par_num++, "BRB"); 7564 break; 7565 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 7566 if (print) 7567 bxe_print_next_block(sc, par_num++, "PARSER"); 7568 break; 7569 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 7570 if (print) 7571 bxe_print_next_block(sc, par_num++, "TSDM"); 7572 break; 7573 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 7574 if (print) 7575 bxe_print_next_block(sc, par_num++, "SEARCHER"); 7576 break; 7577 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 7578 if (print) 7579 bxe_print_next_block(sc, par_num++, "TCM"); 7580 break; 7581 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 7582 if (print) 7583 bxe_print_next_block(sc, par_num++, "TSEMI"); 7584 break; 7585 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 7586 if (print) 7587 bxe_print_next_block(sc, par_num++, "XPB"); 7588 break; 7589 } 7590 7591 /* Clear the bit */ 7592 sig &= ~cur_bit; 7593 } 7594 } 7595 7596 return (par_num); 7597} 7598 7599static int 7600bxe_check_blocks_with_parity1(struct bxe_softc *sc, 7601 uint32_t sig, 7602 int par_num, 7603 uint8_t *global, 7604 uint8_t print) 7605{ 7606 int i = 0; 7607 uint32_t cur_bit = 0; 7608 for (i = 0; sig; i++) { 7609 cur_bit = ((uint32_t)0x1 << i); 7610 if (sig & cur_bit) { 7611 switch (cur_bit) { 7612 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 7613 if (print) 7614 bxe_print_next_block(sc, par_num++, "PBF"); 7615 break; 7616 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 7617 if (print) 7618 bxe_print_next_block(sc, par_num++, "QM"); 7619 break; 7620 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 7621 if (print) 7622 bxe_print_next_block(sc, par_num++, "TM"); 7623 break; 7624 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 7625 if (print) 7626 bxe_print_next_block(sc, par_num++, "XSDM"); 7627 break; 7628 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 7629 if (print) 7630 bxe_print_next_block(sc, par_num++, "XCM"); 7631 break; 7632 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 7633 if (print) 7634 bxe_print_next_block(sc, par_num++, "XSEMI"); 7635 break; 7636 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 7637 if (print) 7638 bxe_print_next_block(sc, par_num++, "DOORBELLQ"); 7639 break; 7640 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 7641 if (print) 7642 bxe_print_next_block(sc, par_num++, "NIG"); 7643 break; 7644 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 7645 if (print) 7646 bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); 7647 *global = TRUE; 7648 break; 7649 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 7650 if (print) 7651 bxe_print_next_block(sc, par_num++, "DEBUG"); 7652 break; 7653 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 7654 if (print) 7655 bxe_print_next_block(sc, par_num++, "USDM"); 7656 break; 7657 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 7658 if (print) 7659 bxe_print_next_block(sc, par_num++, "UCM"); 7660 break; 7661 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 7662 if (print) 7663 bxe_print_next_block(sc, par_num++, "USEMI"); 7664 break; 7665 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 7666 if (print) 7667 bxe_print_next_block(sc, par_num++, "UPB"); 7668 break; 7669 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 7670 if (print) 7671 bxe_print_next_block(sc, par_num++, "CSDM"); 7672 break; 7673 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 7674 if (print) 7675 bxe_print_next_block(sc, par_num++, "CCM"); 7676 break; 7677 } 7678 7679 /* Clear the bit */ 7680 sig &= ~cur_bit; 7681 } 7682 } 7683 7684 return (par_num); 7685} 7686 7687static int 7688bxe_check_blocks_with_parity2(struct bxe_softc *sc, 7689 uint32_t sig, 7690 int par_num, 7691 uint8_t print) 7692{ 7693 uint32_t cur_bit = 0; 7694 int i = 0; 7695 7696 for (i = 0; sig; i++) { 7697 cur_bit = ((uint32_t)0x1 << i); 7698 if (sig & cur_bit) { 7699 switch (cur_bit) { 7700 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 7701 if (print) 7702 bxe_print_next_block(sc, par_num++, "CSEMI"); 7703 break; 7704 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 7705 if (print) 7706 bxe_print_next_block(sc, par_num++, "PXP"); 7707 break; 7708 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 7709 if (print) 7710 bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); 7711 break; 7712 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 7713 if (print) 7714 bxe_print_next_block(sc, par_num++, "CFC"); 7715 break; 7716 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 7717 if (print) 7718 bxe_print_next_block(sc, par_num++, "CDU"); 7719 break; 7720 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 7721 if (print) 7722 bxe_print_next_block(sc, par_num++, "DMAE"); 7723 break; 7724 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 7725 if (print) 7726 bxe_print_next_block(sc, par_num++, "IGU"); 7727 break; 7728 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 7729 if (print) 7730 bxe_print_next_block(sc, par_num++, "MISC"); 7731 break; 7732 } 7733 7734 /* Clear the bit */ 7735 sig &= ~cur_bit; 7736 } 7737 } 7738 7739 return (par_num); 7740} 7741 7742static int 7743bxe_check_blocks_with_parity3(struct bxe_softc *sc, 7744 uint32_t sig, 7745 int par_num, 7746 uint8_t *global, 7747 uint8_t print) 7748{ 7749 uint32_t cur_bit = 0; 7750 int i = 0; 7751 7752 for (i = 0; sig; i++) { 7753 cur_bit = ((uint32_t)0x1 << i); 7754 if (sig & cur_bit) { 7755 switch (cur_bit) { 7756 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 7757 if (print) 7758 bxe_print_next_block(sc, par_num++, "MCP ROM"); 7759 *global = TRUE; 7760 break; 7761 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 7762 if (print) 7763 bxe_print_next_block(sc, par_num++, 7764 "MCP UMP RX"); 7765 *global = TRUE; 7766 break; 7767 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 7768 if (print) 7769 bxe_print_next_block(sc, par_num++, 7770 "MCP UMP TX"); 7771 *global = TRUE; 7772 break; 7773 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 7774 if (print) 7775 bxe_print_next_block(sc, par_num++, 7776 "MCP SCPAD"); 7777 *global = TRUE; 7778 break; 7779 } 7780 7781 /* Clear the bit */ 7782 sig &= ~cur_bit; 7783 } 7784 } 7785 7786 return (par_num); 7787} 7788 7789static int 7790bxe_check_blocks_with_parity4(struct bxe_softc *sc, 7791 uint32_t sig, 7792 int par_num, 7793 uint8_t print) 7794{ 7795 uint32_t cur_bit = 0; 7796 int i = 0; 7797 7798 for (i = 0; sig; i++) { 7799 cur_bit = ((uint32_t)0x1 << i); 7800 if (sig & cur_bit) { 7801 switch (cur_bit) { 7802 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 7803 if (print) 7804 bxe_print_next_block(sc, par_num++, "PGLUE_B"); 7805 break; 7806 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 7807 if (print) 7808 bxe_print_next_block(sc, par_num++, "ATC"); 7809 break; 7810 } 7811 7812 /* Clear the bit */ 7813 sig &= ~cur_bit; 7814 } 7815 } 7816 7817 return (par_num); 7818} 7819 7820static uint8_t 7821bxe_parity_attn(struct bxe_softc *sc, 7822 uint8_t *global, 7823 uint8_t print, 7824 uint32_t *sig) 7825{ 7826 int par_num = 0; 7827 7828 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 7829 (sig[1] & HW_PRTY_ASSERT_SET_1) || 7830 (sig[2] & HW_PRTY_ASSERT_SET_2) || 7831 (sig[3] & HW_PRTY_ASSERT_SET_3) || 7832 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 7833 BLOGE(sc, "Parity error: HW block parity attention:\n" 7834 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 7835 (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), 7836 (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), 7837 (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), 7838 (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), 7839 (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); 7840 7841 if (print) 7842 BLOGI(sc, "Parity errors detected in blocks: "); 7843 7844 par_num = 7845 bxe_check_blocks_with_parity0(sc, sig[0] & 7846 HW_PRTY_ASSERT_SET_0, 7847 par_num, print); 7848 par_num = 7849 bxe_check_blocks_with_parity1(sc, sig[1] & 7850 HW_PRTY_ASSERT_SET_1, 7851 par_num, global, print); 7852 par_num = 7853 bxe_check_blocks_with_parity2(sc, sig[2] & 7854 HW_PRTY_ASSERT_SET_2, 7855 par_num, print); 7856 par_num = 7857 bxe_check_blocks_with_parity3(sc, sig[3] & 7858 HW_PRTY_ASSERT_SET_3, 7859 par_num, global, print); 7860 par_num = 7861 bxe_check_blocks_with_parity4(sc, sig[4] & 7862 HW_PRTY_ASSERT_SET_4, 7863 par_num, print); 7864 7865 if (print) 7866 BLOGI(sc, "\n"); 7867 7868 return (TRUE); 7869 } 7870 7871 return (FALSE); 7872} 7873 7874static uint8_t 7875bxe_chk_parity_attn(struct bxe_softc *sc, 7876 uint8_t *global, 7877 uint8_t print) 7878{ 7879 struct attn_route attn = { {0} }; 7880 int port = SC_PORT(sc); 7881 7882 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 7883 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 7884 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 7885 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 7886 7887 if (!CHIP_IS_E1x(sc)) 7888 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 7889 7890 return (bxe_parity_attn(sc, global, print, attn.sig)); 7891} 7892 7893static void 7894bxe_attn_int_deasserted4(struct bxe_softc *sc, 7895 uint32_t attn) 7896{ 7897 uint32_t val; 7898 7899 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 7900 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 7901 BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); 7902 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 7903 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 7904 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 7905 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 7906 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 7907 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 7908 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 7909 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 7910 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 7911 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 7912 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 7913 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 7914 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 7915 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 7916 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 7917 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 7918 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 7919 BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 7920 } 7921 7922 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 7923 val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); 7924 BLOGE(sc, "ATC hw attention 0x%08x\n", val); 7925 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 7926 BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 7927 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 7928 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 7929 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 7930 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 7931 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 7932 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 7933 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 7934 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 7935 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 7936 BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 7937 } 7938 7939 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7940 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 7941 BLOGE(sc, "FATAL parity attention set4 0x%08x\n", 7942 (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 7943 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 7944 } 7945} 7946 7947static void 7948bxe_e1h_disable(struct bxe_softc *sc) 7949{ 7950 int port = SC_PORT(sc); 7951 7952 bxe_tx_disable(sc); 7953 7954 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7955} 7956 7957static void 7958bxe_e1h_enable(struct bxe_softc *sc) 7959{ 7960 int port = SC_PORT(sc); 7961 7962 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7963 7964 // XXX bxe_tx_enable(sc); 7965} 7966 7967/* 7968 * called due to MCP event (on pmf): 7969 * reread new bandwidth configuration 7970 * configure FW 7971 * notify others function about the change 7972 */ 7973static void 7974bxe_config_mf_bw(struct bxe_softc *sc) 7975{ 7976 if (sc->link_vars.link_up) { 7977 bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); 7978 // XXX bxe_link_sync_notify(sc); 7979 } 7980 7981 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 7982} 7983 7984static void 7985bxe_set_mf_bw(struct bxe_softc *sc) 7986{ 7987 bxe_config_mf_bw(sc); 7988 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 7989} 7990 7991static void 7992bxe_handle_eee_event(struct bxe_softc *sc) 7993{ 7994 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); 7995 bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 7996} 7997 7998#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 7999 8000static void 8001bxe_drv_info_ether_stat(struct bxe_softc *sc) 8002{ 8003 struct eth_stats_info *ether_stat = 8004 &sc->sp->drv_info_to_mcp.ether_stat; 8005 8006 strlcpy(ether_stat->version, BXE_DRIVER_VERSION, 8007 ETH_STAT_INFO_VERSION_LEN); 8008 8009 /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ 8010 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, 8011 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 8012 ether_stat->mac_local + MAC_PAD, 8013 MAC_PAD, ETH_ALEN); 8014 8015 ether_stat->mtu_size = sc->mtu; 8016 8017 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 8018 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) { 8019 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 8020 } 8021 8022 // XXX ether_stat->feature_flags |= ???; 8023 8024 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; 8025 8026 ether_stat->txq_size = sc->tx_ring_size; 8027 ether_stat->rxq_size = sc->rx_ring_size; 8028} 8029 8030static void 8031bxe_handle_drv_info_req(struct bxe_softc *sc) 8032{ 8033 enum drv_info_opcode op_code; 8034 uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); 8035 8036 /* if drv_info version supported by MFW doesn't match - send NACK */ 8037 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 8038 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8039 return; 8040 } 8041 8042 op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 8043 DRV_INFO_CONTROL_OP_CODE_SHIFT); 8044 8045 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); 8046 8047 switch (op_code) { 8048 case ETH_STATS_OPCODE: 8049 bxe_drv_info_ether_stat(sc); 8050 break; 8051 case FCOE_STATS_OPCODE: 8052 case ISCSI_STATS_OPCODE: 8053 default: 8054 /* if op code isn't supported - send NACK */ 8055 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); 8056 return; 8057 } 8058 8059 /* 8060 * If we got drv_info attn from MFW then these fields are defined in 8061 * shmem2 for sure 8062 */ 8063 SHMEM2_WR(sc, drv_info_host_addr_lo, 8064 U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8065 SHMEM2_WR(sc, drv_info_host_addr_hi, 8066 U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); 8067 8068 bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); 8069} 8070 8071static void 8072bxe_dcc_event(struct bxe_softc *sc, 8073 uint32_t dcc_event) 8074{ 8075 BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); 8076 8077 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 8078 /* 8079 * This is the only place besides the function initialization 8080 * where the sc->flags can change so it is done without any 8081 * locks 8082 */ 8083 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { 8084 BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); 8085 sc->flags |= BXE_MF_FUNC_DIS; 8086 bxe_e1h_disable(sc); 8087 } else { 8088 BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); 8089 sc->flags &= ~BXE_MF_FUNC_DIS; 8090 bxe_e1h_enable(sc); 8091 } 8092 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 8093 } 8094 8095 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 8096 bxe_config_mf_bw(sc); 8097 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 8098 } 8099 8100 /* Report results to MCP */ 8101 if (dcc_event) 8102 bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); 8103 else 8104 bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); 8105} 8106 8107static void 8108bxe_pmf_update(struct bxe_softc *sc) 8109{ 8110 int port = SC_PORT(sc); 8111 uint32_t val; 8112 8113 sc->port.pmf = 1; 8114 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); 8115 8116 /* 8117 * We need the mb() to ensure the ordering between the writing to 8118 * sc->port.pmf here and reading it from the bxe_periodic_task(). 8119 */ 8120 mb(); 8121 8122 /* queue a periodic task */ 8123 // XXX schedule task... 8124 8125 // XXX bxe_dcbx_pmf_update(sc); 8126 8127 /* enable nig attention */ 8128 val = (0xff0f | (1 << (SC_VN(sc) + 4))); 8129 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8130 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); 8131 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); 8132 } else if (!CHIP_IS_E1x(sc)) { 8133 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 8134 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 8135 } 8136 8137 bxe_stats_handle(sc, STATS_EVENT_PMF); 8138} 8139 8140static int 8141bxe_mc_assert(struct bxe_softc *sc) 8142{ 8143 char last_idx; 8144 int i, rc = 0; 8145 uint32_t row0, row1, row2, row3; 8146 8147 /* XSTORM */ 8148 last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); 8149 if (last_idx) 8150 BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8151 8152 /* print the asserts */ 8153 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8154 8155 row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); 8156 row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); 8157 row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); 8158 row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); 8159 8160 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8161 BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8162 i, row3, row2, row1, row0); 8163 rc++; 8164 } else { 8165 break; 8166 } 8167 } 8168 8169 /* TSTORM */ 8170 last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); 8171 if (last_idx) { 8172 BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8173 } 8174 8175 /* print the asserts */ 8176 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8177 8178 row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); 8179 row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); 8180 row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); 8181 row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); 8182 8183 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8184 BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8185 i, row3, row2, row1, row0); 8186 rc++; 8187 } else { 8188 break; 8189 } 8190 } 8191 8192 /* CSTORM */ 8193 last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); 8194 if (last_idx) { 8195 BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8196 } 8197 8198 /* print the asserts */ 8199 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8200 8201 row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); 8202 row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); 8203 row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); 8204 row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); 8205 8206 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8207 BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8208 i, row3, row2, row1, row0); 8209 rc++; 8210 } else { 8211 break; 8212 } 8213 } 8214 8215 /* USTORM */ 8216 last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); 8217 if (last_idx) { 8218 BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 8219 } 8220 8221 /* print the asserts */ 8222 for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { 8223 8224 row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); 8225 row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); 8226 row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); 8227 row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); 8228 8229 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 8230 BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 8231 i, row3, row2, row1, row0); 8232 rc++; 8233 } else { 8234 break; 8235 } 8236 } 8237 8238 return (rc); 8239} 8240 8241static void 8242bxe_attn_int_deasserted3(struct bxe_softc *sc, 8243 uint32_t attn) 8244{ 8245 int func = SC_FUNC(sc); 8246 uint32_t val; 8247 8248 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 8249 8250 if (attn & BXE_PMF_LINK_ASSERT(sc)) { 8251 8252 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 8253 bxe_read_mf_cfg(sc); 8254 sc->devinfo.mf_info.mf_config[SC_VN(sc)] = 8255 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 8256 val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); 8257 8258 if (val & DRV_STATUS_DCC_EVENT_MASK) 8259 bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); 8260 8261 if (val & DRV_STATUS_SET_MF_BW) 8262 bxe_set_mf_bw(sc); 8263 8264 if (val & DRV_STATUS_DRV_INFO_REQ) 8265 bxe_handle_drv_info_req(sc); 8266 8267#if 0 8268 if (val & DRV_STATUS_VF_DISABLED) 8269 bxe_vf_handle_flr_event(sc); 8270#endif 8271 8272 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) 8273 bxe_pmf_update(sc); 8274 8275#if 0 8276 if (sc->port.pmf && 8277 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 8278 (sc->dcbx_enabled > 0)) 8279 /* start dcbx state machine */ 8280 bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED); 8281#endif 8282 8283#if 0 8284 if (val & DRV_STATUS_AFEX_EVENT_MASK) 8285 bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK); 8286#endif 8287 8288 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 8289 bxe_handle_eee_event(sc); 8290 8291 if (sc->link_vars.periodic_flags & 8292 ELINK_PERIODIC_FLAGS_LINK_EVENT) { 8293 /* sync with link */ 8294 BXE_PHY_LOCK(sc); 8295 sc->link_vars.periodic_flags &= 8296 ~ELINK_PERIODIC_FLAGS_LINK_EVENT; 8297 BXE_PHY_UNLOCK(sc); 8298 if (IS_MF(sc)) 8299 ; // XXX bxe_link_sync_notify(sc); 8300 bxe_link_report(sc); 8301 } 8302 8303 /* 8304 * Always call it here: bxe_link_report() will 8305 * prevent the link indication duplication. 8306 */ 8307 bxe_link_status_update(sc); 8308 8309 } else if (attn & BXE_MC_ASSERT_BITS) { 8310 8311 BLOGE(sc, "MC assert!\n"); 8312 bxe_mc_assert(sc); 8313 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); 8314 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); 8315 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); 8316 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); 8317 bxe_panic(sc, ("MC assert!\n")); 8318 8319 } else if (attn & BXE_MCP_ASSERT) { 8320 8321 BLOGE(sc, "MCP assert!\n"); 8322 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); 8323 // XXX bxe_fw_dump(sc); 8324 8325 } else { 8326 BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); 8327 } 8328 } 8329 8330 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 8331 BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); 8332 if (attn & BXE_GRC_TIMEOUT) { 8333 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); 8334 BLOGE(sc, "GRC time-out 0x%08x\n", val); 8335 } 8336 if (attn & BXE_GRC_RSV) { 8337 val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); 8338 BLOGE(sc, "GRC reserved 0x%08x\n", val); 8339 } 8340 REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 8341 } 8342} 8343 8344static void 8345bxe_attn_int_deasserted2(struct bxe_softc *sc, 8346 uint32_t attn) 8347{ 8348 int port = SC_PORT(sc); 8349 int reg_offset; 8350 uint32_t val0, mask0, val1, mask1; 8351 uint32_t val; 8352 8353 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 8354 val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); 8355 BLOGE(sc, "CFC hw attention 0x%08x\n", val); 8356 /* CFC error attention */ 8357 if (val & 0x2) { 8358 BLOGE(sc, "FATAL error from CFC\n"); 8359 } 8360 } 8361 8362 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 8363 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); 8364 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); 8365 /* RQ_USDMDP_FIFO_OVERFLOW */ 8366 if (val & 0x18000) { 8367 BLOGE(sc, "FATAL error from PXP\n"); 8368 } 8369 8370 if (!CHIP_IS_E1x(sc)) { 8371 val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); 8372 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); 8373 } 8374 } 8375 8376#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR 8377#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT 8378 8379 if (attn & AEU_PXP2_HW_INT_BIT) { 8380 /* CQ47854 workaround do not panic on 8381 * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8382 */ 8383 if (!CHIP_IS_E1x(sc)) { 8384 mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); 8385 val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); 8386 mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); 8387 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); 8388 /* 8389 * If the olny PXP2_EOP_ERROR_BIT is set in 8390 * STS0 and STS1 - clear it 8391 * 8392 * probably we lose additional attentions between 8393 * STS0 and STS_CLR0, in this case user will not 8394 * be notified about them 8395 */ 8396 if (val0 & mask0 & PXP2_EOP_ERROR_BIT && 8397 !(val1 & mask1)) 8398 val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 8399 8400 /* print the register, since no one can restore it */ 8401 BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); 8402 8403 /* 8404 * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR 8405 * then notify 8406 */ 8407 if (val0 & PXP2_EOP_ERROR_BIT) { 8408 BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); 8409 8410 /* 8411 * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is 8412 * set then clear attention from PXP2 block without panic 8413 */ 8414 if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && 8415 ((val1 & mask1) == 0)) 8416 attn &= ~AEU_PXP2_HW_INT_BIT; 8417 } 8418 } 8419 } 8420 8421 if (attn & HW_INTERRUT_ASSERT_SET_2) { 8422 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 8423 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 8424 8425 val = REG_RD(sc, reg_offset); 8426 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 8427 REG_WR(sc, reg_offset, val); 8428 8429 BLOGE(sc, "FATAL HW block attention set2 0x%x\n", 8430 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); 8431 bxe_panic(sc, ("HW block attention set2\n")); 8432 } 8433} 8434 8435static void 8436bxe_attn_int_deasserted1(struct bxe_softc *sc, 8437 uint32_t attn) 8438{ 8439 int port = SC_PORT(sc); 8440 int reg_offset; 8441 uint32_t val; 8442 8443 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 8444 val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); 8445 BLOGE(sc, "DB hw attention 0x%08x\n", val); 8446 /* DORQ discard attention */ 8447 if (val & 0x2) { 8448 BLOGE(sc, "FATAL error from DORQ\n"); 8449 } 8450 } 8451 8452 if (attn & HW_INTERRUT_ASSERT_SET_1) { 8453 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 8454 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 8455 8456 val = REG_RD(sc, reg_offset); 8457 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 8458 REG_WR(sc, reg_offset, val); 8459 8460 BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", 8461 (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); 8462 bxe_panic(sc, ("HW block attention set1\n")); 8463 } 8464} 8465 8466static void 8467bxe_attn_int_deasserted0(struct bxe_softc *sc, 8468 uint32_t attn) 8469{ 8470 int port = SC_PORT(sc); 8471 int reg_offset; 8472 uint32_t val; 8473 8474 reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 8475 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 8476 8477 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 8478 val = REG_RD(sc, reg_offset); 8479 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 8480 REG_WR(sc, reg_offset, val); 8481 8482 BLOGW(sc, "SPIO5 hw attention\n"); 8483 8484 /* Fan failure attention */ 8485 elink_hw_reset_phy(&sc->link_params); 8486 bxe_fan_failure(sc); 8487 } 8488 8489 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { 8490 BXE_PHY_LOCK(sc); 8491 elink_handle_module_detect_int(&sc->link_params); 8492 BXE_PHY_UNLOCK(sc); 8493 } 8494 8495 if (attn & HW_INTERRUT_ASSERT_SET_0) { 8496 val = REG_RD(sc, reg_offset); 8497 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 8498 REG_WR(sc, reg_offset, val); 8499 8500 bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", 8501 (attn & HW_INTERRUT_ASSERT_SET_0))); 8502 } 8503} 8504 8505static void 8506bxe_attn_int_deasserted(struct bxe_softc *sc, 8507 uint32_t deasserted) 8508{ 8509 struct attn_route attn; 8510 struct attn_route *group_mask; 8511 int port = SC_PORT(sc); 8512 int index; 8513 uint32_t reg_addr; 8514 uint32_t val; 8515 uint32_t aeu_mask; 8516 uint8_t global = FALSE; 8517 8518 /* 8519 * Need to take HW lock because MCP or other port might also 8520 * try to handle this event. 8521 */ 8522 bxe_acquire_alr(sc); 8523 8524 if (bxe_chk_parity_attn(sc, &global, TRUE)) { 8525 /* XXX 8526 * In case of parity errors don't handle attentions so that 8527 * other function would "see" parity errors. 8528 */ 8529 sc->recovery_state = BXE_RECOVERY_INIT; 8530 // XXX schedule a recovery task... 8531 /* disable HW interrupts */ 8532 bxe_int_disable(sc); 8533 bxe_release_alr(sc); 8534 return; 8535 } 8536 8537 attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 8538 attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 8539 attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 8540 attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 8541 if (!CHIP_IS_E1x(sc)) { 8542 attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 8543 } else { 8544 attn.sig[4] = 0; 8545 } 8546 8547 BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8548 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 8549 8550 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 8551 if (deasserted & (1 << index)) { 8552 group_mask = &sc->attn_group[index]; 8553 8554 BLOGD(sc, DBG_INTR, 8555 "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, 8556 group_mask->sig[0], group_mask->sig[1], 8557 group_mask->sig[2], group_mask->sig[3], 8558 group_mask->sig[4]); 8559 8560 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); 8561 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); 8562 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); 8563 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); 8564 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); 8565 } 8566 } 8567 8568 bxe_release_alr(sc); 8569 8570 if (sc->devinfo.int_block == INT_BLOCK_HC) { 8571 reg_addr = (HC_REG_COMMAND_REG + port*32 + 8572 COMMAND_REG_ATTN_BITS_CLR); 8573 } else { 8574 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 8575 } 8576 8577 val = ~deasserted; 8578 BLOGD(sc, DBG_INTR, 8579 "about to mask 0x%08x at %s addr 0x%08x\n", val, 8580 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 8581 REG_WR(sc, reg_addr, val); 8582 8583 if (~sc->attn_state & deasserted) { 8584 BLOGE(sc, "IGU error\n"); 8585 } 8586 8587 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 8588 MISC_REG_AEU_MASK_ATTN_FUNC_0; 8589 8590 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8591 8592 aeu_mask = REG_RD(sc, reg_addr); 8593 8594 BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", 8595 aeu_mask, deasserted); 8596 aeu_mask |= (deasserted & 0x3ff); 8597 BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); 8598 8599 REG_WR(sc, reg_addr, aeu_mask); 8600 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 8601 8602 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); 8603 sc->attn_state &= ~deasserted; 8604 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); 8605} 8606 8607static void 8608bxe_attn_int(struct bxe_softc *sc) 8609{ 8610 /* read local copy of bits */ 8611 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); 8612 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); 8613 uint32_t attn_state = sc->attn_state; 8614 8615 /* look for changed bits */ 8616 uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; 8617 uint32_t deasserted = ~attn_bits & attn_ack & attn_state; 8618 8619 BLOGD(sc, DBG_INTR, 8620 "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", 8621 attn_bits, attn_ack, asserted, deasserted); 8622 8623 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { 8624 BLOGE(sc, "BAD attention state\n"); 8625 } 8626 8627 /* handle bits that were raised */ 8628 if (asserted) { 8629 bxe_attn_int_asserted(sc, asserted); 8630 } 8631 8632 if (deasserted) { 8633 bxe_attn_int_deasserted(sc, deasserted); 8634 } 8635} 8636 8637static uint16_t 8638bxe_update_dsb_idx(struct bxe_softc *sc) 8639{ 8640 struct host_sp_status_block *def_sb = sc->def_sb; 8641 uint16_t rc = 0; 8642 8643 mb(); /* status block is written to by the chip */ 8644 8645 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 8646 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; 8647 rc |= BXE_DEF_SB_ATT_IDX; 8648 } 8649 8650 if (sc->def_idx != def_sb->sp_sb.running_index) { 8651 sc->def_idx = def_sb->sp_sb.running_index; 8652 rc |= BXE_DEF_SB_IDX; 8653 } 8654 8655 mb(); 8656 8657 return (rc); 8658} 8659 8660static inline struct ecore_queue_sp_obj * 8661bxe_cid_to_q_obj(struct bxe_softc *sc, 8662 uint32_t cid) 8663{ 8664 BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); 8665 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); 8666} 8667 8668static void 8669bxe_handle_mcast_eqe(struct bxe_softc *sc) 8670{ 8671 struct ecore_mcast_ramrod_params rparam; 8672 int rc; 8673 8674 memset(&rparam, 0, sizeof(rparam)); 8675 8676 rparam.mcast_obj = &sc->mcast_obj; 8677 8678 BXE_MCAST_LOCK(sc); 8679 8680 /* clear pending state for the last command */ 8681 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); 8682 8683 /* if there are pending mcast commands - send them */ 8684 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { 8685 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); 8686 if (rc < 0) { 8687 BLOGD(sc, DBG_SP, 8688 "ERROR: Failed to send pending mcast commands (%d)\n", 8689 rc); 8690 } 8691 } 8692 8693 BXE_MCAST_UNLOCK(sc); 8694} 8695 8696static void 8697bxe_handle_classification_eqe(struct bxe_softc *sc, 8698 union event_ring_elem *elem) 8699{ 8700 unsigned long ramrod_flags = 0; 8701 int rc = 0; 8702 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8703 struct ecore_vlan_mac_obj *vlan_mac_obj; 8704 8705 /* always push next commands out, don't wait here */ 8706 bit_set(&ramrod_flags, RAMROD_CONT); 8707 8708 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { 8709 case ECORE_FILTER_MAC_PENDING: 8710 BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); 8711 vlan_mac_obj = &sc->sp_objs[cid].mac_obj; 8712 break; 8713 8714 case ECORE_FILTER_MCAST_PENDING: 8715 BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); 8716 /* 8717 * This is only relevant for 57710 where multicast MACs are 8718 * configured as unicast MACs using the same ramrod. 8719 */ 8720 bxe_handle_mcast_eqe(sc); 8721 return; 8722 8723 default: 8724 BLOGE(sc, "Unsupported classification command: %d\n", 8725 elem->message.data.eth_event.echo); 8726 return; 8727 } 8728 8729 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); 8730 8731 if (rc < 0) { 8732 BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); 8733 } else if (rc > 0) { 8734 BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); 8735 } 8736} 8737 8738static void 8739bxe_handle_rx_mode_eqe(struct bxe_softc *sc, 8740 union event_ring_elem *elem) 8741{ 8742 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); 8743 8744 /* send rx_mode command again if was requested */ 8745 if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, 8746 &sc->sp_state)) { 8747 bxe_set_storm_rx_mode(sc); 8748 } 8749#if 0 8750 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED, 8751 &sc->sp_state)) { 8752 bxe_set_iscsi_eth_rx_mode(sc, TRUE); 8753 } 8754 else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED, 8755 &sc->sp_state)) { 8756 bxe_set_iscsi_eth_rx_mode(sc, FALSE); 8757 } 8758#endif 8759} 8760 8761static void 8762bxe_update_eq_prod(struct bxe_softc *sc, 8763 uint16_t prod) 8764{ 8765 storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); 8766 wmb(); /* keep prod updates ordered */ 8767} 8768 8769static void 8770bxe_eq_int(struct bxe_softc *sc) 8771{ 8772 uint16_t hw_cons, sw_cons, sw_prod; 8773 union event_ring_elem *elem; 8774 uint8_t echo; 8775 uint32_t cid; 8776 uint8_t opcode; 8777 int spqe_cnt = 0; 8778 struct ecore_queue_sp_obj *q_obj; 8779 struct ecore_func_sp_obj *f_obj = &sc->func_obj; 8780 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; 8781 8782 hw_cons = le16toh(*sc->eq_cons_sb); 8783 8784 /* 8785 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. 8786 * when we get to the next-page we need to adjust so the loop 8787 * condition below will be met. The next element is the size of a 8788 * regular element and hence incrementing by 1 8789 */ 8790 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { 8791 hw_cons++; 8792 } 8793 8794 /* 8795 * This function may never run in parallel with itself for a 8796 * specific sc and no need for a read memory barrier here. 8797 */ 8798 sw_cons = sc->eq_cons; 8799 sw_prod = sc->eq_prod; 8800 8801 BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", 8802 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); 8803 8804 for (; 8805 sw_cons != hw_cons; 8806 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 8807 8808 elem = &sc->eq[EQ_DESC(sw_cons)]; 8809 8810#if 0 8811 int rc; 8812 rc = bxe_iov_eq_sp_event(sc, elem); 8813 if (!rc) { 8814 BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc); 8815 goto next_spqe; 8816 } 8817#endif 8818 8819 /* elem CID originates from FW, actually LE */ 8820 cid = SW_CID(elem->message.data.cfc_del_event.cid); 8821 opcode = elem->message.opcode; 8822 8823 /* handle eq element */ 8824 switch (opcode) { 8825#if 0 8826 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 8827 BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n"); 8828 bxe_vf_mbx(sc, &elem->message.data.vf_pf_event); 8829 continue; 8830#endif 8831 8832 case EVENT_RING_OPCODE_STAT_QUERY: 8833 BLOGD(sc, DBG_SP, "got statistics completion event %d\n", 8834 sc->stats_comp++); 8835 /* nothing to do with stats comp */ 8836 goto next_spqe; 8837 8838 case EVENT_RING_OPCODE_CFC_DEL: 8839 /* handle according to cid range */ 8840 /* we may want to verify here that the sc state is HALTING */ 8841 BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); 8842 q_obj = bxe_cid_to_q_obj(sc, cid); 8843 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { 8844 break; 8845 } 8846 goto next_spqe; 8847 8848 case EVENT_RING_OPCODE_STOP_TRAFFIC: 8849 BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); 8850 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { 8851 break; 8852 } 8853 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); 8854 goto next_spqe; 8855 8856 case EVENT_RING_OPCODE_START_TRAFFIC: 8857 BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); 8858 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { 8859 break; 8860 } 8861 // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); 8862 goto next_spqe; 8863 8864 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 8865 echo = elem->message.data.function_update_event.echo; 8866 if (echo == SWITCH_UPDATE) { 8867 BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); 8868 if (f_obj->complete_cmd(sc, f_obj, 8869 ECORE_F_CMD_SWITCH_UPDATE)) { 8870 break; 8871 } 8872 } 8873 else { 8874 BLOGD(sc, DBG_SP, 8875 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 8876#if 0 8877 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE); 8878 /* 8879 * We will perform the queues update from the sp_core_task as 8880 * all queue SP operations should run with CORE_LOCK. 8881 */ 8882 bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state); 8883 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); 8884#endif 8885 } 8886 goto next_spqe; 8887 8888#if 0 8889 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 8890 f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS); 8891 bxe_after_afex_vif_lists(sc, elem); 8892 goto next_spqe; 8893#endif 8894 8895 case EVENT_RING_OPCODE_FORWARD_SETUP: 8896 q_obj = &bxe_fwd_sp_obj(sc, q_obj); 8897 if (q_obj->complete_cmd(sc, q_obj, 8898 ECORE_Q_CMD_SETUP_TX_ONLY)) { 8899 break; 8900 } 8901 goto next_spqe; 8902 8903 case EVENT_RING_OPCODE_FUNCTION_START: 8904 BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); 8905 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { 8906 break; 8907 } 8908 goto next_spqe; 8909 8910 case EVENT_RING_OPCODE_FUNCTION_STOP: 8911 BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); 8912 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { 8913 break; 8914 } 8915 goto next_spqe; 8916 } 8917 8918 switch (opcode | sc->state) { 8919 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): 8920 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): 8921 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; 8922 BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); 8923 rss_raw->clear_pending(rss_raw); 8924 break; 8925 8926 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): 8927 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): 8928 case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): 8929 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): 8930 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): 8931 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8932 BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); 8933 bxe_handle_classification_eqe(sc, elem); 8934 break; 8935 8936 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): 8937 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): 8938 case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8939 BLOGD(sc, DBG_SP, "got mcast ramrod\n"); 8940 bxe_handle_mcast_eqe(sc); 8941 break; 8942 8943 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): 8944 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): 8945 case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): 8946 BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); 8947 bxe_handle_rx_mode_eqe(sc, elem); 8948 break; 8949 8950 default: 8951 /* unknown event log error and continue */ 8952 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", 8953 elem->message.opcode, sc->state); 8954 } 8955 8956next_spqe: 8957 spqe_cnt++; 8958 } /* for */ 8959 8960 mb(); 8961 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); 8962 8963 sc->eq_cons = sw_cons; 8964 sc->eq_prod = sw_prod; 8965 8966 /* make sure that above mem writes were issued towards the memory */ 8967 wmb(); 8968 8969 /* update producer */ 8970 bxe_update_eq_prod(sc, sc->eq_prod); 8971} 8972 8973static void 8974bxe_handle_sp_tq(void *context, 8975 int pending) 8976{ 8977 struct bxe_softc *sc = (struct bxe_softc *)context; 8978 uint16_t status; 8979 8980 BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); 8981 8982 /* what work needs to be performed? */ 8983 status = bxe_update_dsb_idx(sc); 8984 8985 BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); 8986 8987 /* HW attentions */ 8988 if (status & BXE_DEF_SB_ATT_IDX) { 8989 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); 8990 bxe_attn_int(sc); 8991 status &= ~BXE_DEF_SB_ATT_IDX; 8992 } 8993 8994 /* SP events: STAT_QUERY and others */ 8995 if (status & BXE_DEF_SB_IDX) { 8996 /* handle EQ completions */ 8997 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); 8998 bxe_eq_int(sc); 8999 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 9000 le16toh(sc->def_idx), IGU_INT_NOP, 1); 9001 status &= ~BXE_DEF_SB_IDX; 9002 } 9003 9004 /* if status is non zero then something went wrong */ 9005 if (__predict_false(status)) { 9006 BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); 9007 } 9008 9009 /* ack status block only if something was actually handled */ 9010 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 9011 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); 9012 9013 /* 9014 * Must be called after the EQ processing (since eq leads to sriov 9015 * ramrod completion flows). 9016 * This flow may have been scheduled by the arrival of a ramrod 9017 * completion, or by the sriov code rescheduling itself. 9018 */ 9019 // XXX bxe_iov_sp_task(sc); 9020 9021#if 0 9022 /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */ 9023 if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, 9024 &sc->sp_state)) { 9025 bxe_link_report(sc); 9026 bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 9027 } 9028#endif 9029} 9030 9031static void 9032bxe_handle_fp_tq(void *context, 9033 int pending) 9034{ 9035 struct bxe_fastpath *fp = (struct bxe_fastpath *)context; 9036 struct bxe_softc *sc = fp->sc; 9037 uint8_t more_tx = FALSE; 9038 uint8_t more_rx = FALSE; 9039 9040 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); 9041 9042 /* XXX 9043 * IFF_DRV_RUNNING state can't be checked here since we process 9044 * slowpath events on a client queue during setup. Instead 9045 * we need to add a "process/continue" flag here that the driver 9046 * can use to tell the task here not to do anything. 9047 */ 9048#if 0 9049 if (!(sc->ifnet->if_drv_flags & IFF_DRV_RUNNING)) { 9050 return; 9051 } 9052#endif 9053 9054 /* update the fastpath index */ 9055 bxe_update_fp_sb_idx(fp); 9056 9057 /* XXX add loop here if ever support multiple tx CoS */ 9058 /* fp->txdata[cos] */ 9059 if (bxe_has_tx_work(fp)) { 9060 BXE_FP_TX_LOCK(fp); 9061 more_tx = bxe_txeof(sc, fp); 9062 BXE_FP_TX_UNLOCK(fp); 9063 } 9064 9065 if (bxe_has_rx_work(fp)) { 9066 more_rx = bxe_rxeof(sc, fp); 9067 } 9068 9069 if (more_rx /*|| more_tx*/) { 9070 /* still more work to do */ 9071 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9072 return; 9073 } 9074 9075 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9076 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9077} 9078 9079static void 9080bxe_task_fp(struct bxe_fastpath *fp) 9081{ 9082 struct bxe_softc *sc = fp->sc; 9083 uint8_t more_tx = FALSE; 9084 uint8_t more_rx = FALSE; 9085 9086 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); 9087 9088 /* update the fastpath index */ 9089 bxe_update_fp_sb_idx(fp); 9090 9091 /* XXX add loop here if ever support multiple tx CoS */ 9092 /* fp->txdata[cos] */ 9093 if (bxe_has_tx_work(fp)) { 9094 BXE_FP_TX_LOCK(fp); 9095 more_tx = bxe_txeof(sc, fp); 9096 BXE_FP_TX_UNLOCK(fp); 9097 } 9098 9099 if (bxe_has_rx_work(fp)) { 9100 more_rx = bxe_rxeof(sc, fp); 9101 } 9102 9103 if (more_rx /*|| more_tx*/) { 9104 /* still more work to do, bail out if this ISR and process later */ 9105 taskqueue_enqueue_fast(fp->tq, &fp->tq_task); 9106 return; 9107 } 9108 9109 /* 9110 * Here we write the fastpath index taken before doing any tx or rx work. 9111 * It is very well possible other hw events occurred up to this point and 9112 * they were actually processed accordingly above. Since we're going to 9113 * write an older fastpath index, an interrupt is coming which we might 9114 * not do any work in. 9115 */ 9116 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 9117 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); 9118} 9119 9120/* 9121 * Legacy interrupt entry point. 9122 * 9123 * Verifies that the controller generated the interrupt and 9124 * then calls a separate routine to handle the various 9125 * interrupt causes: link, RX, and TX. 9126 */ 9127static void 9128bxe_intr_legacy(void *xsc) 9129{ 9130 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9131 struct bxe_fastpath *fp; 9132 uint16_t status, mask; 9133 int i; 9134 9135 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); 9136 9137#if 0 9138 /* Don't handle any interrupts if we're not ready. */ 9139 if (__predict_false(sc->intr_sem != 0)) { 9140 return; 9141 } 9142#endif 9143 9144 /* 9145 * 0 for ustorm, 1 for cstorm 9146 * the bits returned from ack_int() are 0-15 9147 * bit 0 = attention status block 9148 * bit 1 = fast path status block 9149 * a mask of 0x2 or more = tx/rx event 9150 * a mask of 1 = slow path event 9151 */ 9152 9153 status = bxe_ack_int(sc); 9154 9155 /* the interrupt is not for us */ 9156 if (__predict_false(status == 0)) { 9157 BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); 9158 return; 9159 } 9160 9161 BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); 9162 9163 FOR_EACH_ETH_QUEUE(sc, i) { 9164 fp = &sc->fp[i]; 9165 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); 9166 if (status & mask) { 9167 /* acknowledge and disable further fastpath interrupts */ 9168 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9169 bxe_task_fp(fp); 9170 status &= ~mask; 9171 } 9172 } 9173 9174#if 0 9175 if (CNIC_SUPPORT(sc)) { 9176 mask = 0x2; 9177 if (status & (mask | 0x1)) { 9178 ... 9179 status &= ~mask; 9180 } 9181 } 9182#endif 9183 9184 if (__predict_false(status & 0x1)) { 9185 /* acknowledge and disable further slowpath interrupts */ 9186 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9187 9188 /* schedule slowpath handler */ 9189 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9190 9191 status &= ~0x1; 9192 } 9193 9194 if (__predict_false(status)) { 9195 BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); 9196 } 9197} 9198 9199/* slowpath interrupt entry point */ 9200static void 9201bxe_intr_sp(void *xsc) 9202{ 9203 struct bxe_softc *sc = (struct bxe_softc *)xsc; 9204 9205 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); 9206 9207 /* acknowledge and disable further slowpath interrupts */ 9208 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9209 9210 /* schedule slowpath handler */ 9211 taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); 9212} 9213 9214/* fastpath interrupt entry point */ 9215static void 9216bxe_intr_fp(void *xfp) 9217{ 9218 struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; 9219 struct bxe_softc *sc = fp->sc; 9220 9221 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); 9222 9223 BLOGD(sc, DBG_INTR, 9224 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", 9225 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); 9226 9227#if 0 9228 /* Don't handle any interrupts if we're not ready. */ 9229 if (__predict_false(sc->intr_sem != 0)) { 9230 return; 9231 } 9232#endif 9233 9234 /* acknowledge and disable further fastpath interrupts */ 9235 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 9236 9237 bxe_task_fp(fp); 9238} 9239 9240/* Release all interrupts allocated by the driver. */ 9241static void 9242bxe_interrupt_free(struct bxe_softc *sc) 9243{ 9244 int i; 9245 9246 switch (sc->interrupt_mode) { 9247 case INTR_MODE_INTX: 9248 BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); 9249 if (sc->intr[0].resource != NULL) { 9250 bus_release_resource(sc->dev, 9251 SYS_RES_IRQ, 9252 sc->intr[0].rid, 9253 sc->intr[0].resource); 9254 } 9255 break; 9256 case INTR_MODE_MSI: 9257 for (i = 0; i < sc->intr_count; i++) { 9258 BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); 9259 if (sc->intr[i].resource && sc->intr[i].rid) { 9260 bus_release_resource(sc->dev, 9261 SYS_RES_IRQ, 9262 sc->intr[i].rid, 9263 sc->intr[i].resource); 9264 } 9265 } 9266 pci_release_msi(sc->dev); 9267 break; 9268 case INTR_MODE_MSIX: 9269 for (i = 0; i < sc->intr_count; i++) { 9270 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); 9271 if (sc->intr[i].resource && sc->intr[i].rid) { 9272 bus_release_resource(sc->dev, 9273 SYS_RES_IRQ, 9274 sc->intr[i].rid, 9275 sc->intr[i].resource); 9276 } 9277 } 9278 pci_release_msi(sc->dev); 9279 break; 9280 default: 9281 /* nothing to do as initial allocation failed */ 9282 break; 9283 } 9284} 9285 9286/* 9287 * This function determines and allocates the appropriate 9288 * interrupt based on system capabilites and user request. 9289 * 9290 * The user may force a particular interrupt mode, specify 9291 * the number of receive queues, specify the method for 9292 * distribuitng received frames to receive queues, or use 9293 * the default settings which will automatically select the 9294 * best supported combination. In addition, the OS may or 9295 * may not support certain combinations of these settings. 9296 * This routine attempts to reconcile the settings requested 9297 * by the user with the capabilites available from the system 9298 * to select the optimal combination of features. 9299 * 9300 * Returns: 9301 * 0 = Success, !0 = Failure. 9302 */ 9303static int 9304bxe_interrupt_alloc(struct bxe_softc *sc) 9305{ 9306 int msix_count = 0; 9307 int msi_count = 0; 9308 int num_requested = 0; 9309 int num_allocated = 0; 9310 int rid, i, j; 9311 int rc; 9312 9313 /* get the number of available MSI/MSI-X interrupts from the OS */ 9314 if (sc->interrupt_mode > 0) { 9315 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { 9316 msix_count = pci_msix_count(sc->dev); 9317 } 9318 9319 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { 9320 msi_count = pci_msi_count(sc->dev); 9321 } 9322 9323 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", 9324 msi_count, msix_count); 9325 } 9326 9327 do { /* try allocating MSI-X interrupt resources (at least 2) */ 9328 if (sc->interrupt_mode != INTR_MODE_MSIX) { 9329 break; 9330 } 9331 9332 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || 9333 (msix_count < 2)) { 9334 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9335 break; 9336 } 9337 9338 /* ask for the necessary number of MSI-X vectors */ 9339 num_requested = min((sc->num_queues + 1), msix_count); 9340 9341 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); 9342 9343 num_allocated = num_requested; 9344 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { 9345 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); 9346 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9347 break; 9348 } 9349 9350 if (num_allocated < 2) { /* possible? */ 9351 BLOGE(sc, "MSI-X allocation less than 2!\n"); 9352 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9353 pci_release_msi(sc->dev); 9354 break; 9355 } 9356 9357 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", 9358 num_requested, num_allocated); 9359 9360 /* best effort so use the number of vectors allocated to us */ 9361 sc->intr_count = num_allocated; 9362 sc->num_queues = num_allocated - 1; 9363 9364 rid = 1; /* initial resource identifier */ 9365 9366 /* allocate the MSI-X vectors */ 9367 for (i = 0; i < num_allocated; i++) { 9368 sc->intr[i].rid = (rid + i); 9369 9370 if ((sc->intr[i].resource = 9371 bus_alloc_resource_any(sc->dev, 9372 SYS_RES_IRQ, 9373 &sc->intr[i].rid, 9374 RF_ACTIVE)) == NULL) { 9375 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", 9376 i, (rid + i)); 9377 9378 for (j = (i - 1); j >= 0; j--) { 9379 bus_release_resource(sc->dev, 9380 SYS_RES_IRQ, 9381 sc->intr[j].rid, 9382 sc->intr[j].resource); 9383 } 9384 9385 sc->intr_count = 0; 9386 sc->num_queues = 0; 9387 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ 9388 pci_release_msi(sc->dev); 9389 break; 9390 } 9391 9392 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); 9393 } 9394 } while (0); 9395 9396 do { /* try allocating MSI vector resources (at least 2) */ 9397 if (sc->interrupt_mode != INTR_MODE_MSI) { 9398 break; 9399 } 9400 9401 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || 9402 (msi_count < 1)) { 9403 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9404 break; 9405 } 9406 9407 /* ask for a single MSI vector */ 9408 num_requested = 1; 9409 9410 BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); 9411 9412 num_allocated = num_requested; 9413 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { 9414 BLOGE(sc, "MSI alloc failed (%d)!\n", rc); 9415 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9416 break; 9417 } 9418 9419 if (num_allocated != 1) { /* possible? */ 9420 BLOGE(sc, "MSI allocation is not 1!\n"); 9421 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9422 pci_release_msi(sc->dev); 9423 break; 9424 } 9425 9426 BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", 9427 num_requested, num_allocated); 9428 9429 /* best effort so use the number of vectors allocated to us */ 9430 sc->intr_count = num_allocated; 9431 sc->num_queues = num_allocated; 9432 9433 rid = 1; /* initial resource identifier */ 9434 9435 sc->intr[0].rid = rid; 9436 9437 if ((sc->intr[0].resource = 9438 bus_alloc_resource_any(sc->dev, 9439 SYS_RES_IRQ, 9440 &sc->intr[0].rid, 9441 RF_ACTIVE)) == NULL) { 9442 BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); 9443 sc->intr_count = 0; 9444 sc->num_queues = 0; 9445 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ 9446 pci_release_msi(sc->dev); 9447 break; 9448 } 9449 9450 BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); 9451 } while (0); 9452 9453 do { /* try allocating INTx vector resources */ 9454 if (sc->interrupt_mode != INTR_MODE_INTX) { 9455 break; 9456 } 9457 9458 BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); 9459 9460 /* only one vector for INTx */ 9461 sc->intr_count = 1; 9462 sc->num_queues = 1; 9463 9464 rid = 0; /* initial resource identifier */ 9465 9466 sc->intr[0].rid = rid; 9467 9468 if ((sc->intr[0].resource = 9469 bus_alloc_resource_any(sc->dev, 9470 SYS_RES_IRQ, 9471 &sc->intr[0].rid, 9472 (RF_ACTIVE | RF_SHAREABLE))) == NULL) { 9473 BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); 9474 sc->intr_count = 0; 9475 sc->num_queues = 0; 9476 sc->interrupt_mode = -1; /* Failed! */ 9477 break; 9478 } 9479 9480 BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); 9481 } while (0); 9482 9483 if (sc->interrupt_mode == -1) { 9484 BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); 9485 rc = 1; 9486 } else { 9487 BLOGD(sc, DBG_LOAD, 9488 "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", 9489 sc->interrupt_mode, sc->num_queues); 9490 rc = 0; 9491 } 9492 9493 return (rc); 9494} 9495 9496static void 9497bxe_interrupt_detach(struct bxe_softc *sc) 9498{ 9499 struct bxe_fastpath *fp; 9500 int i; 9501 9502 /* release interrupt resources */ 9503 for (i = 0; i < sc->intr_count; i++) { 9504 if (sc->intr[i].resource && sc->intr[i].tag) { 9505 BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); 9506 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); 9507 } 9508 } 9509 9510 for (i = 0; i < sc->num_queues; i++) { 9511 fp = &sc->fp[i]; 9512 if (fp->tq) { 9513 taskqueue_drain(fp->tq, &fp->tq_task); 9514 taskqueue_free(fp->tq); 9515 fp->tq = NULL; 9516 } 9517 } 9518 9519 if (sc->rx_mode_tq) { 9520 taskqueue_drain(sc->rx_mode_tq, &sc->rx_mode_tq_task); 9521 taskqueue_free(sc->rx_mode_tq); 9522 sc->rx_mode_tq = NULL; 9523 } 9524 9525 if (sc->sp_tq) { 9526 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); 9527 taskqueue_free(sc->sp_tq); 9528 sc->sp_tq = NULL; 9529 } 9530} 9531 9532/* 9533 * Enables interrupts and attach to the ISR. 9534 * 9535 * When using multiple MSI/MSI-X vectors the first vector 9536 * is used for slowpath operations while all remaining 9537 * vectors are used for fastpath operations. If only a 9538 * single MSI/MSI-X vector is used (SINGLE_ISR) then the 9539 * ISR must look for both slowpath and fastpath completions. 9540 */ 9541static int 9542bxe_interrupt_attach(struct bxe_softc *sc) 9543{ 9544 struct bxe_fastpath *fp; 9545 int rc = 0; 9546 int i; 9547 9548 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), 9549 "bxe%d_sp_tq", sc->unit); 9550 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); 9551 sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, 9552 taskqueue_thread_enqueue, 9553 &sc->sp_tq); 9554 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ 9555 "%s", sc->sp_tq_name); 9556 9557 snprintf(sc->rx_mode_tq_name, sizeof(sc->rx_mode_tq_name), 9558 "bxe%d_rx_mode_tq", sc->unit); 9559 TASK_INIT(&sc->rx_mode_tq_task, 0, bxe_handle_rx_mode_tq, sc); 9560 sc->rx_mode_tq = taskqueue_create_fast(sc->rx_mode_tq_name, M_NOWAIT, 9561 taskqueue_thread_enqueue, 9562 &sc->rx_mode_tq); 9563 taskqueue_start_threads(&sc->rx_mode_tq, 1, PWAIT, /* lower priority */ 9564 "%s", sc->rx_mode_tq_name); 9565 9566 for (i = 0; i < sc->num_queues; i++) { 9567 fp = &sc->fp[i]; 9568 snprintf(fp->tq_name, sizeof(fp->tq_name), 9569 "bxe%d_fp%d_tq", sc->unit, i); 9570 TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); 9571 fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, 9572 taskqueue_thread_enqueue, 9573 &fp->tq); 9574 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ 9575 "%s", fp->tq_name); 9576 } 9577 9578 /* setup interrupt handlers */ 9579 if (sc->interrupt_mode == INTR_MODE_MSIX) { 9580 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); 9581 9582 /* 9583 * Setup the interrupt handler. Note that we pass the driver instance 9584 * to the interrupt handler for the slowpath. 9585 */ 9586 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9587 (INTR_TYPE_NET | INTR_MPSAFE), 9588 NULL, bxe_intr_sp, sc, 9589 &sc->intr[0].tag)) != 0) { 9590 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); 9591 goto bxe_interrupt_attach_exit; 9592 } 9593 9594 bus_describe_intr(sc->dev, sc->intr[0].resource, 9595 sc->intr[0].tag, "sp"); 9596 9597 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ 9598 9599 /* initialize the fastpath vectors (note the first was used for sp) */ 9600 for (i = 0; i < sc->num_queues; i++) { 9601 fp = &sc->fp[i]; 9602 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); 9603 9604 /* 9605 * Setup the interrupt handler. Note that we pass the 9606 * fastpath context to the interrupt handler in this 9607 * case. 9608 */ 9609 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, 9610 (INTR_TYPE_NET | INTR_MPSAFE), 9611 NULL, bxe_intr_fp, fp, 9612 &sc->intr[i + 1].tag)) != 0) { 9613 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", 9614 (i + 1), rc); 9615 goto bxe_interrupt_attach_exit; 9616 } 9617 9618 bus_describe_intr(sc->dev, sc->intr[i + 1].resource, 9619 sc->intr[i + 1].tag, "fp%02d", i); 9620 9621 /* bind the fastpath instance to a cpu */ 9622 if (sc->num_queues > 1) { 9623 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); 9624 } 9625 9626 fp->state = BXE_FP_STATE_IRQ; 9627 } 9628 } else if (sc->interrupt_mode == INTR_MODE_MSI) { 9629 BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); 9630 9631 /* 9632 * Setup the interrupt handler. Note that we pass the 9633 * driver instance to the interrupt handler which 9634 * will handle both the slowpath and fastpath. 9635 */ 9636 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9637 (INTR_TYPE_NET | INTR_MPSAFE), 9638 NULL, bxe_intr_legacy, sc, 9639 &sc->intr[0].tag)) != 0) { 9640 BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); 9641 goto bxe_interrupt_attach_exit; 9642 } 9643 9644 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ 9645 BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); 9646 9647 /* 9648 * Setup the interrupt handler. Note that we pass the 9649 * driver instance to the interrupt handler which 9650 * will handle both the slowpath and fastpath. 9651 */ 9652 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, 9653 (INTR_TYPE_NET | INTR_MPSAFE), 9654 NULL, bxe_intr_legacy, sc, 9655 &sc->intr[0].tag)) != 0) { 9656 BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); 9657 goto bxe_interrupt_attach_exit; 9658 } 9659 } 9660 9661bxe_interrupt_attach_exit: 9662 9663 return (rc); 9664} 9665 9666static int bxe_init_hw_common_chip(struct bxe_softc *sc); 9667static int bxe_init_hw_common(struct bxe_softc *sc); 9668static int bxe_init_hw_port(struct bxe_softc *sc); 9669static int bxe_init_hw_func(struct bxe_softc *sc); 9670static void bxe_reset_common(struct bxe_softc *sc); 9671static void bxe_reset_port(struct bxe_softc *sc); 9672static void bxe_reset_func(struct bxe_softc *sc); 9673static int bxe_gunzip_init(struct bxe_softc *sc); 9674static void bxe_gunzip_end(struct bxe_softc *sc); 9675static int bxe_init_firmware(struct bxe_softc *sc); 9676static void bxe_release_firmware(struct bxe_softc *sc); 9677 9678static struct 9679ecore_func_sp_drv_ops bxe_func_sp_drv = { 9680 .init_hw_cmn_chip = bxe_init_hw_common_chip, 9681 .init_hw_cmn = bxe_init_hw_common, 9682 .init_hw_port = bxe_init_hw_port, 9683 .init_hw_func = bxe_init_hw_func, 9684 9685 .reset_hw_cmn = bxe_reset_common, 9686 .reset_hw_port = bxe_reset_port, 9687 .reset_hw_func = bxe_reset_func, 9688 9689 .gunzip_init = bxe_gunzip_init, 9690 .gunzip_end = bxe_gunzip_end, 9691 9692 .init_fw = bxe_init_firmware, 9693 .release_fw = bxe_release_firmware, 9694}; 9695 9696static void 9697bxe_init_func_obj(struct bxe_softc *sc) 9698{ 9699 sc->dmae_ready = 0; 9700 9701 ecore_init_func_obj(sc, 9702 &sc->func_obj, 9703 BXE_SP(sc, func_rdata), 9704 BXE_SP_MAPPING(sc, func_rdata), 9705 BXE_SP(sc, func_afex_rdata), 9706 BXE_SP_MAPPING(sc, func_afex_rdata), 9707 &bxe_func_sp_drv); 9708} 9709 9710static int 9711bxe_init_hw(struct bxe_softc *sc, 9712 uint32_t load_code) 9713{ 9714 struct ecore_func_state_params func_params = { NULL }; 9715 int rc; 9716 9717 /* prepare the parameters for function state transitions */ 9718 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 9719 9720 func_params.f_obj = &sc->func_obj; 9721 func_params.cmd = ECORE_F_CMD_HW_INIT; 9722 9723 func_params.params.hw_init.load_phase = load_code; 9724 9725 /* 9726 * Via a plethora of function pointers, we will eventually reach 9727 * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). 9728 */ 9729 rc = ecore_func_state_change(sc, &func_params); 9730 9731 return (rc); 9732} 9733 9734static void 9735bxe_fill(struct bxe_softc *sc, 9736 uint32_t addr, 9737 int fill, 9738 uint32_t len) 9739{ 9740 uint32_t i; 9741 9742 if (!(len % 4) && !(addr % 4)) { 9743 for (i = 0; i < len; i += 4) { 9744 REG_WR(sc, (addr + i), fill); 9745 } 9746 } else { 9747 for (i = 0; i < len; i++) { 9748 REG_WR8(sc, (addr + i), fill); 9749 } 9750 } 9751} 9752 9753/* writes FP SP data to FW - data_size in dwords */ 9754static void 9755bxe_wr_fp_sb_data(struct bxe_softc *sc, 9756 int fw_sb_id, 9757 uint32_t *sb_data_p, 9758 uint32_t data_size) 9759{ 9760 int index; 9761 9762 for (index = 0; index < data_size; index++) { 9763 REG_WR(sc, 9764 (BAR_CSTRORM_INTMEM + 9765 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 9766 (sizeof(uint32_t) * index)), 9767 *(sb_data_p + index)); 9768 } 9769} 9770 9771static void 9772bxe_zero_fp_sb(struct bxe_softc *sc, 9773 int fw_sb_id) 9774{ 9775 struct hc_status_block_data_e2 sb_data_e2; 9776 struct hc_status_block_data_e1x sb_data_e1x; 9777 uint32_t *sb_data_p; 9778 uint32_t data_size = 0; 9779 9780 if (!CHIP_IS_E1x(sc)) { 9781 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9782 sb_data_e2.common.state = SB_DISABLED; 9783 sb_data_e2.common.p_func.vf_valid = FALSE; 9784 sb_data_p = (uint32_t *)&sb_data_e2; 9785 data_size = (sizeof(struct hc_status_block_data_e2) / 9786 sizeof(uint32_t)); 9787 } else { 9788 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9789 sb_data_e1x.common.state = SB_DISABLED; 9790 sb_data_e1x.common.p_func.vf_valid = FALSE; 9791 sb_data_p = (uint32_t *)&sb_data_e1x; 9792 data_size = (sizeof(struct hc_status_block_data_e1x) / 9793 sizeof(uint32_t)); 9794 } 9795 9796 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9797 9798 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 9799 0, CSTORM_STATUS_BLOCK_SIZE); 9800 bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 9801 0, CSTORM_SYNC_BLOCK_SIZE); 9802} 9803 9804static void 9805bxe_wr_sp_sb_data(struct bxe_softc *sc, 9806 struct hc_sp_status_block_data *sp_sb_data) 9807{ 9808 int i; 9809 9810 for (i = 0; 9811 i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); 9812 i++) { 9813 REG_WR(sc, 9814 (BAR_CSTRORM_INTMEM + 9815 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + 9816 (i * sizeof(uint32_t))), 9817 *((uint32_t *)sp_sb_data + i)); 9818 } 9819} 9820 9821static void 9822bxe_zero_sp_sb(struct bxe_softc *sc) 9823{ 9824 struct hc_sp_status_block_data sp_sb_data; 9825 9826 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 9827 9828 sp_sb_data.state = SB_DISABLED; 9829 sp_sb_data.p_func.vf_valid = FALSE; 9830 9831 bxe_wr_sp_sb_data(sc, &sp_sb_data); 9832 9833 bxe_fill(sc, 9834 (BAR_CSTRORM_INTMEM + 9835 CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 9836 0, CSTORM_SP_STATUS_BLOCK_SIZE); 9837 bxe_fill(sc, 9838 (BAR_CSTRORM_INTMEM + 9839 CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 9840 0, CSTORM_SP_SYNC_BLOCK_SIZE); 9841} 9842 9843static void 9844bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 9845 int igu_sb_id, 9846 int igu_seg_id) 9847{ 9848 hc_sm->igu_sb_id = igu_sb_id; 9849 hc_sm->igu_seg_id = igu_seg_id; 9850 hc_sm->timer_value = 0xFF; 9851 hc_sm->time_to_expire = 0xFFFFFFFF; 9852} 9853 9854static void 9855bxe_map_sb_state_machines(struct hc_index_data *index_data) 9856{ 9857 /* zero out state machine indices */ 9858 9859 /* rx indices */ 9860 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9861 9862 /* tx indices */ 9863 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 9864 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 9865 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 9866 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 9867 9868 /* map indices */ 9869 9870 /* rx indices */ 9871 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 9872 (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9873 9874 /* tx indices */ 9875 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 9876 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9877 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 9878 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9879 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 9880 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9881 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 9882 (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); 9883} 9884 9885static void 9886bxe_init_sb(struct bxe_softc *sc, 9887 bus_addr_t busaddr, 9888 int vfid, 9889 uint8_t vf_valid, 9890 int fw_sb_id, 9891 int igu_sb_id) 9892{ 9893 struct hc_status_block_data_e2 sb_data_e2; 9894 struct hc_status_block_data_e1x sb_data_e1x; 9895 struct hc_status_block_sm *hc_sm_p; 9896 uint32_t *sb_data_p; 9897 int igu_seg_id; 9898 int data_size; 9899 9900 if (CHIP_INT_MODE_IS_BC(sc)) { 9901 igu_seg_id = HC_SEG_ACCESS_NORM; 9902 } else { 9903 igu_seg_id = IGU_SEG_ACCESS_NORM; 9904 } 9905 9906 bxe_zero_fp_sb(sc, fw_sb_id); 9907 9908 if (!CHIP_IS_E1x(sc)) { 9909 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 9910 sb_data_e2.common.state = SB_ENABLED; 9911 sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); 9912 sb_data_e2.common.p_func.vf_id = vfid; 9913 sb_data_e2.common.p_func.vf_valid = vf_valid; 9914 sb_data_e2.common.p_func.vnic_id = SC_VN(sc); 9915 sb_data_e2.common.same_igu_sb_1b = TRUE; 9916 sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); 9917 sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); 9918 hc_sm_p = sb_data_e2.common.state_machine; 9919 sb_data_p = (uint32_t *)&sb_data_e2; 9920 data_size = (sizeof(struct hc_status_block_data_e2) / 9921 sizeof(uint32_t)); 9922 bxe_map_sb_state_machines(sb_data_e2.index_data); 9923 } else { 9924 memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); 9925 sb_data_e1x.common.state = SB_ENABLED; 9926 sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); 9927 sb_data_e1x.common.p_func.vf_id = 0xff; 9928 sb_data_e1x.common.p_func.vf_valid = FALSE; 9929 sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); 9930 sb_data_e1x.common.same_igu_sb_1b = TRUE; 9931 sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); 9932 sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); 9933 hc_sm_p = sb_data_e1x.common.state_machine; 9934 sb_data_p = (uint32_t *)&sb_data_e1x; 9935 data_size = (sizeof(struct hc_status_block_data_e1x) / 9936 sizeof(uint32_t)); 9937 bxe_map_sb_state_machines(sb_data_e1x.index_data); 9938 } 9939 9940 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); 9941 bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); 9942 9943 BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); 9944 9945 /* write indices to HW - PCI guarantees endianity of regpairs */ 9946 bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); 9947} 9948 9949static inline uint8_t 9950bxe_fp_qzone_id(struct bxe_fastpath *fp) 9951{ 9952 if (CHIP_IS_E1x(fp->sc)) { 9953 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); 9954 } else { 9955 return (fp->cl_id); 9956 } 9957} 9958 9959static inline uint32_t 9960bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, 9961 struct bxe_fastpath *fp) 9962{ 9963 uint32_t offset = BAR_USTRORM_INTMEM; 9964 9965#if 0 9966 if (IS_VF(sc)) { 9967 return (PXP_VF_ADDR_USDM_QUEUES_START + 9968 (sc->acquire_resp.resc.hw_qid[fp->index] * 9969 sizeof(struct ustorm_queue_zone_data))); 9970 } else 9971#endif 9972 if (!CHIP_IS_E1x(sc)) { 9973 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 9974 } else { 9975 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); 9976 } 9977 9978 return (offset); 9979} 9980 9981static void 9982bxe_init_eth_fp(struct bxe_softc *sc, 9983 int idx) 9984{ 9985 struct bxe_fastpath *fp = &sc->fp[idx]; 9986 uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; 9987 unsigned long q_type = 0; 9988 int cos; 9989 9990 fp->sc = sc; 9991 fp->index = idx; 9992 9993 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 9994 "bxe%d_fp%d_tx_lock", sc->unit, idx); 9995 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 9996 9997 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), 9998 "bxe%d_fp%d_rx_lock", sc->unit, idx); 9999 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); 10000 10001 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); 10002 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); 10003 10004 fp->cl_id = (CHIP_IS_E1x(sc)) ? 10005 (SC_L_ID(sc) + idx) : 10006 /* want client ID same as IGU SB ID for non-E1 */ 10007 fp->igu_sb_id; 10008 fp->cl_qzone_id = bxe_fp_qzone_id(fp); 10009 10010 /* setup sb indices */ 10011 if (!CHIP_IS_E1x(sc)) { 10012 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; 10013 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; 10014 } else { 10015 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; 10016 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; 10017 } 10018 10019 /* init shortcut */ 10020 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); 10021 10022 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; 10023 10024 /* 10025 * XXX If multiple CoS is ever supported then each fastpath structure 10026 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 10027 */ 10028 for (cos = 0; cos < sc->max_cos; cos++) { 10029 cids[cos] = idx; 10030 } 10031 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; 10032 10033 /* nothing more for a VF to do */ 10034 if (IS_VF(sc)) { 10035 return; 10036 } 10037 10038 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, 10039 fp->fw_sb_id, fp->igu_sb_id); 10040 10041 bxe_update_fp_sb_idx(fp); 10042 10043 /* Configure Queue State object */ 10044 bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); 10045 bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); 10046 10047 ecore_init_queue_obj(sc, 10048 &sc->sp_objs[idx].q_obj, 10049 fp->cl_id, 10050 cids, 10051 sc->max_cos, 10052 SC_FUNC(sc), 10053 BXE_SP(sc, q_rdata), 10054 BXE_SP_MAPPING(sc, q_rdata), 10055 q_type); 10056 10057 /* configure classification DBs */ 10058 ecore_init_mac_obj(sc, 10059 &sc->sp_objs[idx].mac_obj, 10060 fp->cl_id, 10061 idx, 10062 SC_FUNC(sc), 10063 BXE_SP(sc, mac_rdata), 10064 BXE_SP_MAPPING(sc, mac_rdata), 10065 ECORE_FILTER_MAC_PENDING, 10066 &sc->sp_state, 10067 ECORE_OBJ_TYPE_RX_TX, 10068 &sc->macs_pool); 10069 10070 BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", 10071 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); 10072} 10073 10074static inline void 10075bxe_update_rx_prod(struct bxe_softc *sc, 10076 struct bxe_fastpath *fp, 10077 uint16_t rx_bd_prod, 10078 uint16_t rx_cq_prod, 10079 uint16_t rx_sge_prod) 10080{ 10081 struct ustorm_eth_rx_producers rx_prods = { 0 }; 10082 uint32_t i; 10083 10084 /* update producers */ 10085 rx_prods.bd_prod = rx_bd_prod; 10086 rx_prods.cqe_prod = rx_cq_prod; 10087 rx_prods.sge_prod = rx_sge_prod; 10088 10089 /* 10090 * Make sure that the BD and SGE data is updated before updating the 10091 * producers since FW might read the BD/SGE right after the producer 10092 * is updated. 10093 * This is only applicable for weak-ordered memory model archs such 10094 * as IA-64. The following barrier is also mandatory since FW will 10095 * assumes BDs must have buffers. 10096 */ 10097 wmb(); 10098 10099 for (i = 0; i < (sizeof(rx_prods) / 4); i++) { 10100 REG_WR(sc, 10101 (fp->ustorm_rx_prods_offset + (i * 4)), 10102 ((uint32_t *)&rx_prods)[i]); 10103 } 10104 10105 wmb(); /* keep prod updates ordered */ 10106 10107 BLOGD(sc, DBG_RX, 10108 "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", 10109 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); 10110} 10111 10112static void 10113bxe_init_rx_rings(struct bxe_softc *sc) 10114{ 10115 struct bxe_fastpath *fp; 10116 int i; 10117 10118 for (i = 0; i < sc->num_queues; i++) { 10119 fp = &sc->fp[i]; 10120 10121 fp->rx_bd_cons = 0; 10122 10123 /* 10124 * Activate the BD ring... 10125 * Warning, this will generate an interrupt (to the TSTORM) 10126 * so this can only be done after the chip is initialized 10127 */ 10128 bxe_update_rx_prod(sc, fp, 10129 fp->rx_bd_prod, 10130 fp->rx_cq_prod, 10131 fp->rx_sge_prod); 10132 10133 if (i != 0) { 10134 continue; 10135 } 10136 10137 if (CHIP_IS_E1(sc)) { 10138 REG_WR(sc, 10139 (BAR_USTRORM_INTMEM + 10140 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), 10141 U64_LO(fp->rcq_dma.paddr)); 10142 REG_WR(sc, 10143 (BAR_USTRORM_INTMEM + 10144 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), 10145 U64_HI(fp->rcq_dma.paddr)); 10146 } 10147 } 10148} 10149 10150static void 10151bxe_init_tx_ring_one(struct bxe_fastpath *fp) 10152{ 10153 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 10154 fp->tx_db.data.zero_fill1 = 0; 10155 fp->tx_db.data.prod = 0; 10156 10157 fp->tx_pkt_prod = 0; 10158 fp->tx_pkt_cons = 0; 10159 fp->tx_bd_prod = 0; 10160 fp->tx_bd_cons = 0; 10161 fp->eth_q_stats.tx_pkts = 0; 10162} 10163 10164static inline void 10165bxe_init_tx_rings(struct bxe_softc *sc) 10166{ 10167 int i; 10168 10169 for (i = 0; i < sc->num_queues; i++) { 10170#if 0 10171 uint8_t cos; 10172 for (cos = 0; cos < sc->max_cos; cos++) { 10173 bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]); 10174 } 10175#else 10176 bxe_init_tx_ring_one(&sc->fp[i]); 10177#endif 10178 } 10179} 10180 10181static void 10182bxe_init_def_sb(struct bxe_softc *sc) 10183{ 10184 struct host_sp_status_block *def_sb = sc->def_sb; 10185 bus_addr_t mapping = sc->def_sb_dma.paddr; 10186 int igu_sp_sb_index; 10187 int igu_seg_id; 10188 int port = SC_PORT(sc); 10189 int func = SC_FUNC(sc); 10190 int reg_offset, reg_offset_en5; 10191 uint64_t section; 10192 int index, sindex; 10193 struct hc_sp_status_block_data sp_sb_data; 10194 10195 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 10196 10197 if (CHIP_INT_MODE_IS_BC(sc)) { 10198 igu_sp_sb_index = DEF_SB_IGU_ID; 10199 igu_seg_id = HC_SEG_ACCESS_DEF; 10200 } else { 10201 igu_sp_sb_index = sc->igu_dsb_id; 10202 igu_seg_id = IGU_SEG_ACCESS_DEF; 10203 } 10204 10205 /* attentions */ 10206 section = ((uint64_t)mapping + 10207 offsetof(struct host_sp_status_block, atten_status_block)); 10208 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 10209 sc->attn_state = 0; 10210 10211 reg_offset = (port) ? 10212 MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 10213 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; 10214 reg_offset_en5 = (port) ? 10215 MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 10216 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; 10217 10218 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 10219 /* take care of sig[0]..sig[4] */ 10220 for (sindex = 0; sindex < 4; sindex++) { 10221 sc->attn_group[index].sig[sindex] = 10222 REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); 10223 } 10224 10225 if (!CHIP_IS_E1x(sc)) { 10226 /* 10227 * enable5 is separate from the rest of the registers, 10228 * and the address skip is 4 and not 16 between the 10229 * different groups 10230 */ 10231 sc->attn_group[index].sig[4] = 10232 REG_RD(sc, (reg_offset_en5 + (0x4 * index))); 10233 } else { 10234 sc->attn_group[index].sig[4] = 0; 10235 } 10236 } 10237 10238 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10239 reg_offset = (port) ? 10240 HC_REG_ATTN_MSG1_ADDR_L : 10241 HC_REG_ATTN_MSG0_ADDR_L; 10242 REG_WR(sc, reg_offset, U64_LO(section)); 10243 REG_WR(sc, (reg_offset + 4), U64_HI(section)); 10244 } else if (!CHIP_IS_E1x(sc)) { 10245 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 10246 REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 10247 } 10248 10249 section = ((uint64_t)mapping + 10250 offsetof(struct host_sp_status_block, sp_sb)); 10251 10252 bxe_zero_sp_sb(sc); 10253 10254 /* PCI guarantees endianity of regpair */ 10255 sp_sb_data.state = SB_ENABLED; 10256 sp_sb_data.host_sb_addr.lo = U64_LO(section); 10257 sp_sb_data.host_sb_addr.hi = U64_HI(section); 10258 sp_sb_data.igu_sb_id = igu_sp_sb_index; 10259 sp_sb_data.igu_seg_id = igu_seg_id; 10260 sp_sb_data.p_func.pf_id = func; 10261 sp_sb_data.p_func.vnic_id = SC_VN(sc); 10262 sp_sb_data.p_func.vf_id = 0xff; 10263 10264 bxe_wr_sp_sb_data(sc, &sp_sb_data); 10265 10266 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 10267} 10268 10269static void 10270bxe_init_sp_ring(struct bxe_softc *sc) 10271{ 10272 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); 10273 sc->spq_prod_idx = 0; 10274 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; 10275 sc->spq_prod_bd = sc->spq; 10276 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); 10277} 10278 10279static void 10280bxe_init_eq_ring(struct bxe_softc *sc) 10281{ 10282 union event_ring_elem *elem; 10283 int i; 10284 10285 for (i = 1; i <= NUM_EQ_PAGES; i++) { 10286 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; 10287 10288 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + 10289 BCM_PAGE_SIZE * 10290 (i % NUM_EQ_PAGES))); 10291 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + 10292 BCM_PAGE_SIZE * 10293 (i % NUM_EQ_PAGES))); 10294 } 10295 10296 sc->eq_cons = 0; 10297 sc->eq_prod = NUM_EQ_DESC; 10298 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; 10299 10300 atomic_store_rel_long(&sc->eq_spq_left, 10301 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), 10302 NUM_EQ_DESC) - 1)); 10303} 10304 10305static void 10306bxe_init_internal_common(struct bxe_softc *sc) 10307{ 10308 int i; 10309 10310 if (IS_MF_SI(sc)) { 10311 /* 10312 * In switch independent mode, the TSTORM needs to accept 10313 * packets that failed classification, since approximate match 10314 * mac addresses aren't written to NIG LLH. 10315 */ 10316 REG_WR8(sc, 10317 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10318 2); 10319 } else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */ 10320 REG_WR8(sc, 10321 (BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), 10322 0); 10323 } 10324 10325 /* 10326 * Zero this manually as its initialization is currently missing 10327 * in the initTool. 10328 */ 10329 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { 10330 REG_WR(sc, 10331 (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 10332 0); 10333 } 10334 10335 if (!CHIP_IS_E1x(sc)) { 10336 REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), 10337 CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 10338 } 10339} 10340 10341static void 10342bxe_init_internal(struct bxe_softc *sc, 10343 uint32_t load_code) 10344{ 10345 switch (load_code) { 10346 case FW_MSG_CODE_DRV_LOAD_COMMON: 10347 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 10348 bxe_init_internal_common(sc); 10349 /* no break */ 10350 10351 case FW_MSG_CODE_DRV_LOAD_PORT: 10352 /* nothing to do */ 10353 /* no break */ 10354 10355 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 10356 /* internal memory per function is initialized inside bxe_pf_init */ 10357 break; 10358 10359 default: 10360 BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); 10361 break; 10362 } 10363} 10364 10365static void 10366storm_memset_func_cfg(struct bxe_softc *sc, 10367 struct tstorm_eth_function_common_config *tcfg, 10368 uint16_t abs_fid) 10369{ 10370 uint32_t addr; 10371 size_t size; 10372 10373 addr = (BAR_TSTRORM_INTMEM + 10374 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); 10375 size = sizeof(struct tstorm_eth_function_common_config); 10376 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); 10377} 10378 10379static void 10380bxe_func_init(struct bxe_softc *sc, 10381 struct bxe_func_init_params *p) 10382{ 10383 struct tstorm_eth_function_common_config tcfg = { 0 }; 10384 10385 if (CHIP_IS_E1x(sc)) { 10386 storm_memset_func_cfg(sc, &tcfg, p->func_id); 10387 } 10388 10389 /* Enable the function in the FW */ 10390 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); 10391 storm_memset_func_en(sc, p->func_id, 1); 10392 10393 /* spq */ 10394 if (p->func_flgs & FUNC_FLG_SPQ) { 10395 storm_memset_spq_addr(sc, p->spq_map, p->func_id); 10396 REG_WR(sc, 10397 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), 10398 p->spq_prod); 10399 } 10400} 10401 10402/* 10403 * Calculates the sum of vn_min_rates. 10404 * It's needed for further normalizing of the min_rates. 10405 * Returns: 10406 * sum of vn_min_rates. 10407 * or 10408 * 0 - if all the min_rates are 0. 10409 * In the later case fainess algorithm should be deactivated. 10410 * If all min rates are not zero then those that are zeroes will be set to 1. 10411 */ 10412static void 10413bxe_calc_vn_min(struct bxe_softc *sc, 10414 struct cmng_init_input *input) 10415{ 10416 uint32_t vn_cfg; 10417 uint32_t vn_min_rate; 10418 int all_zero = 1; 10419 int vn; 10420 10421 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10422 vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10423 vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 10424 FUNC_MF_CFG_MIN_BW_SHIFT) * 100); 10425 10426 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10427 /* skip hidden VNs */ 10428 vn_min_rate = 0; 10429 } else if (!vn_min_rate) { 10430 /* If min rate is zero - set it to 100 */ 10431 vn_min_rate = DEF_MIN_RATE; 10432 } else { 10433 all_zero = 0; 10434 } 10435 10436 input->vnic_min_rate[vn] = vn_min_rate; 10437 } 10438 10439 /* if ETS or all min rates are zeros - disable fairness */ 10440 if (BXE_IS_ETS_ENABLED(sc)) { 10441 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10442 BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); 10443 } else if (all_zero) { 10444 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10445 BLOGD(sc, DBG_LOAD, 10446 "Fariness disabled (all MIN values are zeroes)\n"); 10447 } else { 10448 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 10449 } 10450} 10451 10452static inline uint16_t 10453bxe_extract_max_cfg(struct bxe_softc *sc, 10454 uint32_t mf_cfg) 10455{ 10456 uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 10457 FUNC_MF_CFG_MAX_BW_SHIFT); 10458 10459 if (!max_cfg) { 10460 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); 10461 max_cfg = 100; 10462 } 10463 10464 return (max_cfg); 10465} 10466 10467static void 10468bxe_calc_vn_max(struct bxe_softc *sc, 10469 int vn, 10470 struct cmng_init_input *input) 10471{ 10472 uint16_t vn_max_rate; 10473 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; 10474 uint32_t max_cfg; 10475 10476 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { 10477 vn_max_rate = 0; 10478 } else { 10479 max_cfg = bxe_extract_max_cfg(sc, vn_cfg); 10480 10481 if (IS_MF_SI(sc)) { 10482 /* max_cfg in percents of linkspeed */ 10483 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); 10484 } else { /* SD modes */ 10485 /* max_cfg is absolute in 100Mb units */ 10486 vn_max_rate = (max_cfg * 100); 10487 } 10488 } 10489 10490 BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 10491 10492 input->vnic_max_rate[vn] = vn_max_rate; 10493} 10494 10495static void 10496bxe_cmng_fns_init(struct bxe_softc *sc, 10497 uint8_t read_cfg, 10498 uint8_t cmng_type) 10499{ 10500 struct cmng_init_input input; 10501 int vn; 10502 10503 memset(&input, 0, sizeof(struct cmng_init_input)); 10504 10505 input.port_rate = sc->link_vars.line_speed; 10506 10507 if (cmng_type == CMNG_FNS_MINMAX) { 10508 /* read mf conf from shmem */ 10509 if (read_cfg) { 10510 bxe_read_mf_cfg(sc); 10511 } 10512 10513 /* get VN min rate and enable fairness if not 0 */ 10514 bxe_calc_vn_min(sc, &input); 10515 10516 /* get VN max rate */ 10517 if (sc->port.pmf) { 10518 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10519 bxe_calc_vn_max(sc, vn, &input); 10520 } 10521 } 10522 10523 /* always enable rate shaping and fairness */ 10524 input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 10525 10526 ecore_init_cmng(&input, &sc->cmng); 10527 return; 10528 } 10529 10530 /* rate shaping and fairness are disabled */ 10531 BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); 10532} 10533 10534static int 10535bxe_get_cmng_fns_mode(struct bxe_softc *sc) 10536{ 10537 if (CHIP_REV_IS_SLOW(sc)) { 10538 return (CMNG_FNS_NONE); 10539 } 10540 10541 if (IS_MF(sc)) { 10542 return (CMNG_FNS_MINMAX); 10543 } 10544 10545 return (CMNG_FNS_NONE); 10546} 10547 10548static void 10549storm_memset_cmng(struct bxe_softc *sc, 10550 struct cmng_init *cmng, 10551 uint8_t port) 10552{ 10553 int vn; 10554 int func; 10555 uint32_t addr; 10556 size_t size; 10557 10558 addr = (BAR_XSTRORM_INTMEM + 10559 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); 10560 size = sizeof(struct cmng_struct_per_port); 10561 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); 10562 10563 for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { 10564 func = func_by_vn(sc, vn); 10565 10566 addr = (BAR_XSTRORM_INTMEM + 10567 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); 10568 size = sizeof(struct rate_shaping_vars_per_vn); 10569 ecore_storm_memset_struct(sc, addr, size, 10570 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); 10571 10572 addr = (BAR_XSTRORM_INTMEM + 10573 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); 10574 size = sizeof(struct fairness_vars_per_vn); 10575 ecore_storm_memset_struct(sc, addr, size, 10576 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); 10577 } 10578} 10579 10580static void 10581bxe_pf_init(struct bxe_softc *sc) 10582{ 10583 struct bxe_func_init_params func_init = { 0 }; 10584 struct event_ring_data eq_data = { { 0 } }; 10585 uint16_t flags; 10586 10587 if (!CHIP_IS_E1x(sc)) { 10588 /* reset IGU PF statistics: MSIX + ATTN */ 10589 /* PF */ 10590 REG_WR(sc, 10591 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10592 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10593 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10594 0); 10595 /* ATTN */ 10596 REG_WR(sc, 10597 (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 10598 (BXE_IGU_STAS_MSG_VF_CNT * 4) + 10599 (BXE_IGU_STAS_MSG_PF_CNT * 4) + 10600 ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 10601 0); 10602 } 10603 10604 /* function setup flags */ 10605 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 10606 10607 /* 10608 * This flag is relevant for E1x only. 10609 * E2 doesn't have a TPA configuration in a function level. 10610 */ 10611 flags |= (sc->ifnet->if_capenable & IFCAP_LRO) ? FUNC_FLG_TPA : 0; 10612 10613 func_init.func_flgs = flags; 10614 func_init.pf_id = SC_FUNC(sc); 10615 func_init.func_id = SC_FUNC(sc); 10616 func_init.spq_map = sc->spq_dma.paddr; 10617 func_init.spq_prod = sc->spq_prod_idx; 10618 10619 bxe_func_init(sc, &func_init); 10620 10621 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); 10622 10623 /* 10624 * Congestion management values depend on the link rate. 10625 * There is no active link so initial link rate is set to 10Gbps. 10626 * When the link comes up the congestion management values are 10627 * re-calculated according to the actual link rate. 10628 */ 10629 sc->link_vars.line_speed = SPEED_10000; 10630 bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); 10631 10632 /* Only the PMF sets the HW */ 10633 if (sc->port.pmf) { 10634 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); 10635 } 10636 10637 /* init Event Queue - PCI bus guarantees correct endainity */ 10638 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); 10639 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); 10640 eq_data.producer = sc->eq_prod; 10641 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 10642 eq_data.sb_id = DEF_SB_ID; 10643 storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); 10644} 10645 10646static void 10647bxe_hc_int_enable(struct bxe_softc *sc) 10648{ 10649 int port = SC_PORT(sc); 10650 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10651 uint32_t val = REG_RD(sc, addr); 10652 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10653 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10654 (sc->intr_count == 1)) ? TRUE : FALSE; 10655 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10656 10657 if (msix) { 10658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10659 HC_CONFIG_0_REG_INT_LINE_EN_0); 10660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10661 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10662 if (single_msix) { 10663 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 10664 } 10665 } else if (msi) { 10666 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 10667 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10668 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10669 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10670 } else { 10671 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10672 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10673 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10674 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10675 10676 if (!CHIP_IS_E1(sc)) { 10677 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", 10678 val, port, addr); 10679 10680 REG_WR(sc, addr, val); 10681 10682 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 10683 } 10684 } 10685 10686 if (CHIP_IS_E1(sc)) { 10687 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); 10688 } 10689 10690 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 10691 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10692 10693 REG_WR(sc, addr, val); 10694 10695 /* ensure that HC_CONFIG is written before leading/trailing edge config */ 10696 mb(); 10697 10698 if (!CHIP_IS_E1(sc)) { 10699 /* init leading/trailing edge */ 10700 if (IS_MF(sc)) { 10701 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10702 if (sc->port.pmf) { 10703 /* enable nig and gpio3 attention */ 10704 val |= 0x1100; 10705 } 10706 } else { 10707 val = 0xffff; 10708 } 10709 10710 REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); 10711 REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); 10712 } 10713 10714 /* make sure that interrupts are indeed enabled from here on */ 10715 mb(); 10716} 10717 10718static void 10719bxe_igu_int_enable(struct bxe_softc *sc) 10720{ 10721 uint32_t val; 10722 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; 10723 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && 10724 (sc->intr_count == 1)) ? TRUE : FALSE; 10725 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; 10726 10727 val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10728 10729 if (msix) { 10730 val &= ~(IGU_PF_CONF_INT_LINE_EN | 10731 IGU_PF_CONF_SINGLE_ISR_EN); 10732 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10733 IGU_PF_CONF_ATTN_BIT_EN); 10734 if (single_msix) { 10735 val |= IGU_PF_CONF_SINGLE_ISR_EN; 10736 } 10737 } else if (msi) { 10738 val &= ~IGU_PF_CONF_INT_LINE_EN; 10739 val |= (IGU_PF_CONF_MSI_MSIX_EN | 10740 IGU_PF_CONF_ATTN_BIT_EN | 10741 IGU_PF_CONF_SINGLE_ISR_EN); 10742 } else { 10743 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 10744 val |= (IGU_PF_CONF_INT_LINE_EN | 10745 IGU_PF_CONF_ATTN_BIT_EN | 10746 IGU_PF_CONF_SINGLE_ISR_EN); 10747 } 10748 10749 /* clean previous status - need to configure igu prior to ack*/ 10750 if ((!msix) || single_msix) { 10751 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10752 bxe_ack_int(sc); 10753 } 10754 10755 val |= IGU_PF_CONF_FUNC_EN; 10756 10757 BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", 10758 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); 10759 10760 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10761 10762 mb(); 10763 10764 /* init leading/trailing edge */ 10765 if (IS_MF(sc)) { 10766 val = (0xee0f | (1 << (SC_VN(sc) + 4))); 10767 if (sc->port.pmf) { 10768 /* enable nig and gpio3 attention */ 10769 val |= 0x1100; 10770 } 10771 } else { 10772 val = 0xffff; 10773 } 10774 10775 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); 10776 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); 10777 10778 /* make sure that interrupts are indeed enabled from here on */ 10779 mb(); 10780} 10781 10782static void 10783bxe_int_enable(struct bxe_softc *sc) 10784{ 10785 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10786 bxe_hc_int_enable(sc); 10787 } else { 10788 bxe_igu_int_enable(sc); 10789 } 10790} 10791 10792static void 10793bxe_hc_int_disable(struct bxe_softc *sc) 10794{ 10795 int port = SC_PORT(sc); 10796 uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 10797 uint32_t val = REG_RD(sc, addr); 10798 10799 /* 10800 * In E1 we must use only PCI configuration space to disable MSI/MSIX 10801 * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC 10802 * block 10803 */ 10804 if (CHIP_IS_E1(sc)) { 10805 /* 10806 * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register 10807 * to prevent from HC sending interrupts after we exit the function 10808 */ 10809 REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); 10810 10811 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10812 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10813 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10814 } else { 10815 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 10816 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 10817 HC_CONFIG_0_REG_INT_LINE_EN_0 | 10818 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 10819 } 10820 10821 BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); 10822 10823 /* flush all outstanding writes */ 10824 mb(); 10825 10826 REG_WR(sc, addr, val); 10827 if (REG_RD(sc, addr) != val) { 10828 BLOGE(sc, "proper val not read from HC IGU!\n"); 10829 } 10830} 10831 10832static void 10833bxe_igu_int_disable(struct bxe_softc *sc) 10834{ 10835 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 10836 10837 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 10838 IGU_PF_CONF_INT_LINE_EN | 10839 IGU_PF_CONF_ATTN_BIT_EN); 10840 10841 BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); 10842 10843 /* flush all outstanding writes */ 10844 mb(); 10845 10846 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 10847 if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { 10848 BLOGE(sc, "proper val not read from IGU!\n"); 10849 } 10850} 10851 10852static void 10853bxe_int_disable(struct bxe_softc *sc) 10854{ 10855 if (sc->devinfo.int_block == INT_BLOCK_HC) { 10856 bxe_hc_int_disable(sc); 10857 } else { 10858 bxe_igu_int_disable(sc); 10859 } 10860} 10861 10862static void 10863bxe_nic_init(struct bxe_softc *sc, 10864 int load_code) 10865{ 10866 int i; 10867 10868 for (i = 0; i < sc->num_queues; i++) { 10869 bxe_init_eth_fp(sc, i); 10870 } 10871 10872 rmb(); /* ensure status block indices were read */ 10873 10874 bxe_init_rx_rings(sc); 10875 bxe_init_tx_rings(sc); 10876 10877 if (IS_VF(sc)) { 10878 return; 10879 } 10880 10881 /* initialize MOD_ABS interrupts */ 10882 elink_init_mod_abs_int(sc, &sc->link_vars, 10883 sc->devinfo.chip_id, 10884 sc->devinfo.shmem_base, 10885 sc->devinfo.shmem2_base, 10886 SC_PORT(sc)); 10887 10888 bxe_init_def_sb(sc); 10889 bxe_update_dsb_idx(sc); 10890 bxe_init_sp_ring(sc); 10891 bxe_init_eq_ring(sc); 10892 bxe_init_internal(sc, load_code); 10893 bxe_pf_init(sc); 10894 bxe_stats_init(sc); 10895 10896 /* flush all before enabling interrupts */ 10897 mb(); 10898 10899 bxe_int_enable(sc); 10900 10901 /* check for SPIO5 */ 10902 bxe_attn_int_deasserted0(sc, 10903 REG_RD(sc, 10904 (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 10905 SC_PORT(sc)*4)) & 10906 AEU_INPUTS_ATTN_BITS_SPIO5); 10907} 10908 10909static inline void 10910bxe_init_objs(struct bxe_softc *sc) 10911{ 10912 /* mcast rules must be added to tx if tx switching is enabled */ 10913 ecore_obj_type o_type = 10914 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : 10915 ECORE_OBJ_TYPE_RX; 10916 10917 /* RX_MODE controlling object */ 10918 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); 10919 10920 /* multicast configuration controlling object */ 10921 ecore_init_mcast_obj(sc, 10922 &sc->mcast_obj, 10923 sc->fp[0].cl_id, 10924 sc->fp[0].index, 10925 SC_FUNC(sc), 10926 SC_FUNC(sc), 10927 BXE_SP(sc, mcast_rdata), 10928 BXE_SP_MAPPING(sc, mcast_rdata), 10929 ECORE_FILTER_MCAST_PENDING, 10930 &sc->sp_state, 10931 o_type); 10932 10933 /* Setup CAM credit pools */ 10934 ecore_init_mac_credit_pool(sc, 10935 &sc->macs_pool, 10936 SC_FUNC(sc), 10937 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10938 VNICS_PER_PATH(sc)); 10939 10940 ecore_init_vlan_credit_pool(sc, 10941 &sc->vlans_pool, 10942 SC_ABS_FUNC(sc) >> 1, 10943 CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : 10944 VNICS_PER_PATH(sc)); 10945 10946 /* RSS configuration object */ 10947 ecore_init_rss_config_obj(sc, 10948 &sc->rss_conf_obj, 10949 sc->fp[0].cl_id, 10950 sc->fp[0].index, 10951 SC_FUNC(sc), 10952 SC_FUNC(sc), 10953 BXE_SP(sc, rss_rdata), 10954 BXE_SP_MAPPING(sc, rss_rdata), 10955 ECORE_FILTER_RSS_CONF_PENDING, 10956 &sc->sp_state, ECORE_OBJ_TYPE_RX); 10957} 10958 10959/* 10960 * Initialize the function. This must be called before sending CLIENT_SETUP 10961 * for the first client. 10962 */ 10963static inline int 10964bxe_func_start(struct bxe_softc *sc) 10965{ 10966 struct ecore_func_state_params func_params = { NULL }; 10967 struct ecore_func_start_params *start_params = &func_params.params.start; 10968 10969 /* Prepare parameters for function state transitions */ 10970 bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); 10971 10972 func_params.f_obj = &sc->func_obj; 10973 func_params.cmd = ECORE_F_CMD_START; 10974 10975 /* Function parameters */ 10976 start_params->mf_mode = sc->devinfo.mf_info.mf_mode; 10977 start_params->sd_vlan_tag = OVLAN(sc); 10978 10979 if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { 10980 start_params->network_cos_mode = STATIC_COS; 10981 } else { /* CHIP_IS_E1X */ 10982 start_params->network_cos_mode = FW_WRR; 10983 } 10984 10985 start_params->gre_tunnel_mode = 0; 10986 start_params->gre_tunnel_rss = 0; 10987 10988 return (ecore_func_state_change(sc, &func_params)); 10989} 10990 10991static int 10992bxe_set_power_state(struct bxe_softc *sc, 10993 uint8_t state) 10994{ 10995 uint16_t pmcsr; 10996 10997 /* If there is no power capability, silently succeed */ 10998 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { 10999 BLOGW(sc, "No power capability\n"); 11000 return (0); 11001 } 11002 11003 pmcsr = pci_read_config(sc->dev, 11004 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11005 2); 11006 11007 switch (state) { 11008 case PCI_PM_D0: 11009 pci_write_config(sc->dev, 11010 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11011 ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); 11012 11013 if (pmcsr & PCIM_PSTAT_DMASK) { 11014 /* delay required during transition out of D3hot */ 11015 DELAY(20000); 11016 } 11017 11018 break; 11019 11020 case PCI_PM_D3hot: 11021 /* XXX if there are other clients above don't shut down the power */ 11022 11023 /* don't shut down the power for emulation and FPGA */ 11024 if (CHIP_REV_IS_SLOW(sc)) { 11025 return (0); 11026 } 11027 11028 pmcsr &= ~PCIM_PSTAT_DMASK; 11029 pmcsr |= PCIM_PSTAT_D3; 11030 11031 if (sc->wol) { 11032 pmcsr |= PCIM_PSTAT_PMEENABLE; 11033 } 11034 11035 pci_write_config(sc->dev, 11036 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 11037 pmcsr, 4); 11038 11039 /* 11040 * No more memory access after this point until device is brought back 11041 * to D0 state. 11042 */ 11043 break; 11044 11045 default: 11046 BLOGE(sc, "Can't support PCI power state = %d\n", state); 11047 return (-1); 11048 } 11049 11050 return (0); 11051} 11052 11053 11054/* return true if succeeded to acquire the lock */ 11055static uint8_t 11056bxe_trylock_hw_lock(struct bxe_softc *sc, 11057 uint32_t resource) 11058{ 11059 uint32_t lock_status; 11060 uint32_t resource_bit = (1 << resource); 11061 int func = SC_FUNC(sc); 11062 uint32_t hw_lock_control_reg; 11063 11064 BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); 11065 11066 /* Validating that the resource is within range */ 11067 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 11068 BLOGD(sc, DBG_LOAD, 11069 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 11070 resource, HW_LOCK_MAX_RESOURCE_VALUE); 11071 return (FALSE); 11072 } 11073 11074 if (func <= 5) { 11075 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 11076 } else { 11077 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 11078 } 11079 11080 /* try to acquire the lock */ 11081 REG_WR(sc, hw_lock_control_reg + 4, resource_bit); 11082 lock_status = REG_RD(sc, hw_lock_control_reg); 11083 if (lock_status & resource_bit) { 11084 return (TRUE); 11085 } 11086 11087 BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource); 11088 11089 return (FALSE); 11090} 11091 11092/* 11093 * Get the recovery leader resource id according to the engine this function 11094 * belongs to. Currently only only 2 engines is supported. 11095 */ 11096static int 11097bxe_get_leader_lock_resource(struct bxe_softc *sc) 11098{ 11099 if (SC_PATH(sc)) { 11100 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); 11101 } else { 11102 return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); 11103 } 11104} 11105 11106/* try to acquire a leader lock for current engine */ 11107static uint8_t 11108bxe_trylock_leader_lock(struct bxe_softc *sc) 11109{ 11110 return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11111} 11112 11113static int 11114bxe_release_leader_lock(struct bxe_softc *sc) 11115{ 11116 return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); 11117} 11118 11119/* close gates #2, #3 and #4 */ 11120static void 11121bxe_set_234_gates(struct bxe_softc *sc, 11122 uint8_t close) 11123{ 11124 uint32_t val; 11125 11126 /* gates #2 and #4a are closed/opened for "not E1" only */ 11127 if (!CHIP_IS_E1(sc)) { 11128 /* #4 */ 11129 REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 11130 /* #2 */ 11131 REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 11132 } 11133 11134 /* #3 */ 11135 if (CHIP_IS_E1x(sc)) { 11136 /* prevent interrupts from HC on both ports */ 11137 val = REG_RD(sc, HC_REG_CONFIG_1); 11138 REG_WR(sc, HC_REG_CONFIG_1, 11139 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 11140 (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 11141 11142 val = REG_RD(sc, HC_REG_CONFIG_0); 11143 REG_WR(sc, HC_REG_CONFIG_0, 11144 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 11145 (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 11146 } else { 11147 /* Prevent incomming interrupts in IGU */ 11148 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 11149 11150 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, 11151 (!close) ? 11152 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 11153 (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 11154 } 11155 11156 BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", 11157 close ? "closing" : "opening"); 11158 11159 wmb(); 11160} 11161 11162/* poll for pending writes bit, it should get cleared in no more than 1s */ 11163static int 11164bxe_er_poll_igu_vq(struct bxe_softc *sc) 11165{ 11166 uint32_t cnt = 1000; 11167 uint32_t pend_bits = 0; 11168 11169 do { 11170 pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); 11171 11172 if (pend_bits == 0) { 11173 break; 11174 } 11175 11176 DELAY(1000); 11177 } while (--cnt > 0); 11178 11179 if (cnt == 0) { 11180 BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); 11181 return (-1); 11182 } 11183 11184 return (0); 11185} 11186 11187#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ 11188 11189static void 11190bxe_clp_reset_prep(struct bxe_softc *sc, 11191 uint32_t *magic_val) 11192{ 11193 /* Do some magic... */ 11194 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11195 *magic_val = val & SHARED_MF_CLP_MAGIC; 11196 MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 11197} 11198 11199/* restore the value of the 'magic' bit */ 11200static void 11201bxe_clp_reset_done(struct bxe_softc *sc, 11202 uint32_t magic_val) 11203{ 11204 /* Restore the 'magic' bit value... */ 11205 uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); 11206 MFCFG_WR(sc, shared_mf_config.clp_mb, 11207 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 11208} 11209 11210/* prepare for MCP reset, takes care of CLP configurations */ 11211static void 11212bxe_reset_mcp_prep(struct bxe_softc *sc, 11213 uint32_t *magic_val) 11214{ 11215 uint32_t shmem; 11216 uint32_t validity_offset; 11217 11218 /* set `magic' bit in order to save MF config */ 11219 if (!CHIP_IS_E1(sc)) { 11220 bxe_clp_reset_prep(sc, magic_val); 11221 } 11222 11223 /* get shmem offset */ 11224 shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11225 validity_offset = 11226 offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); 11227 11228 /* Clear validity map flags */ 11229 if (shmem > 0) { 11230 REG_WR(sc, shmem + validity_offset, 0); 11231 } 11232} 11233 11234#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 11235#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 11236 11237static void 11238bxe_mcp_wait_one(struct bxe_softc *sc) 11239{ 11240 /* special handling for emulation and FPGA (10 times longer) */ 11241 if (CHIP_REV_IS_SLOW(sc)) { 11242 DELAY((MCP_ONE_TIMEOUT*10) * 1000); 11243 } else { 11244 DELAY((MCP_ONE_TIMEOUT) * 1000); 11245 } 11246} 11247 11248/* initialize shmem_base and waits for validity signature to appear */ 11249static int 11250bxe_init_shmem(struct bxe_softc *sc) 11251{ 11252 int cnt = 0; 11253 uint32_t val = 0; 11254 11255 do { 11256 sc->devinfo.shmem_base = 11257 sc->link_params.shmem_base = 11258 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 11259 11260 if (sc->devinfo.shmem_base) { 11261 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 11262 if (val & SHR_MEM_VALIDITY_MB) 11263 return (0); 11264 } 11265 11266 bxe_mcp_wait_one(sc); 11267 11268 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 11269 11270 BLOGE(sc, "BAD MCP validity signature\n"); 11271 11272 return (-1); 11273} 11274 11275static int 11276bxe_reset_mcp_comp(struct bxe_softc *sc, 11277 uint32_t magic_val) 11278{ 11279 int rc = bxe_init_shmem(sc); 11280 11281 /* Restore the `magic' bit value */ 11282 if (!CHIP_IS_E1(sc)) { 11283 bxe_clp_reset_done(sc, magic_val); 11284 } 11285 11286 return (rc); 11287} 11288 11289static void 11290bxe_pxp_prep(struct bxe_softc *sc) 11291{ 11292 if (!CHIP_IS_E1(sc)) { 11293 REG_WR(sc, PXP2_REG_RD_START_INIT, 0); 11294 REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); 11295 wmb(); 11296 } 11297} 11298 11299/* 11300 * Reset the whole chip except for: 11301 * - PCIE core 11302 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) 11303 * - IGU 11304 * - MISC (including AEU) 11305 * - GRC 11306 * - RBCN, RBCP 11307 */ 11308static void 11309bxe_process_kill_chip_reset(struct bxe_softc *sc, 11310 uint8_t global) 11311{ 11312 uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 11313 uint32_t global_bits2, stay_reset2; 11314 11315 /* 11316 * Bits that have to be set in reset_mask2 if we want to reset 'global' 11317 * (per chip) blocks. 11318 */ 11319 global_bits2 = 11320 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 11321 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 11322 11323 /* 11324 * Don't reset the following blocks. 11325 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 11326 * reset, as in 4 port device they might still be owned 11327 * by the MCP (there is only one leader per path). 11328 */ 11329 not_reset_mask1 = 11330 MISC_REGISTERS_RESET_REG_1_RST_HC | 11331 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 11332 MISC_REGISTERS_RESET_REG_1_RST_PXP; 11333 11334 not_reset_mask2 = 11335 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 11336 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 11337 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 11338 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 11339 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 11340 MISC_REGISTERS_RESET_REG_2_RST_GRC | 11341 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 11342 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 11343 MISC_REGISTERS_RESET_REG_2_RST_ATC | 11344 MISC_REGISTERS_RESET_REG_2_PGLC | 11345 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 11346 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 11347 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 11348 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 11349 MISC_REGISTERS_RESET_REG_2_UMAC0 | 11350 MISC_REGISTERS_RESET_REG_2_UMAC1; 11351 11352 /* 11353 * Keep the following blocks in reset: 11354 * - all xxMACs are handled by the elink code. 11355 */ 11356 stay_reset2 = 11357 MISC_REGISTERS_RESET_REG_2_XMAC | 11358 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 11359 11360 /* Full reset masks according to the chip */ 11361 reset_mask1 = 0xffffffff; 11362 11363 if (CHIP_IS_E1(sc)) 11364 reset_mask2 = 0xffff; 11365 else if (CHIP_IS_E1H(sc)) 11366 reset_mask2 = 0x1ffff; 11367 else if (CHIP_IS_E2(sc)) 11368 reset_mask2 = 0xfffff; 11369 else /* CHIP_IS_E3 */ 11370 reset_mask2 = 0x3ffffff; 11371 11372 /* Don't reset global blocks unless we need to */ 11373 if (!global) 11374 reset_mask2 &= ~global_bits2; 11375 11376 /* 11377 * In case of attention in the QM, we need to reset PXP 11378 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 11379 * because otherwise QM reset would release 'close the gates' shortly 11380 * before resetting the PXP, then the PSWRQ would send a write 11381 * request to PGLUE. Then when PXP is reset, PGLUE would try to 11382 * read the payload data from PSWWR, but PSWWR would not 11383 * respond. The write queue in PGLUE would stuck, dmae commands 11384 * would not return. Therefore it's important to reset the second 11385 * reset register (containing the 11386 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 11387 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 11388 * bit). 11389 */ 11390 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 11391 reset_mask2 & (~not_reset_mask2)); 11392 11393 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 11394 reset_mask1 & (~not_reset_mask1)); 11395 11396 mb(); 11397 wmb(); 11398 11399 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 11400 reset_mask2 & (~stay_reset2)); 11401 11402 mb(); 11403 wmb(); 11404 11405 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 11406 wmb(); 11407} 11408 11409static int 11410bxe_process_kill(struct bxe_softc *sc, 11411 uint8_t global) 11412{ 11413 int cnt = 1000; 11414 uint32_t val = 0; 11415 uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 11416 uint32_t tags_63_32 = 0; 11417 11418 /* Empty the Tetris buffer, wait for 1s */ 11419 do { 11420 sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); 11421 blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); 11422 port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); 11423 port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); 11424 pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); 11425 if (CHIP_IS_E3(sc)) { 11426 tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); 11427 } 11428 11429 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 11430 ((port_is_idle_0 & 0x1) == 0x1) && 11431 ((port_is_idle_1 & 0x1) == 0x1) && 11432 (pgl_exp_rom2 == 0xffffffff) && 11433 (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) 11434 break; 11435 DELAY(1000); 11436 } while (cnt-- > 0); 11437 11438 if (cnt <= 0) { 11439 BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " 11440 "are still outstanding read requests after 1s! " 11441 "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " 11442 "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 11443 sr_cnt, blk_cnt, port_is_idle_0, 11444 port_is_idle_1, pgl_exp_rom2); 11445 return (-1); 11446 } 11447 11448 mb(); 11449 11450 /* Close gates #2, #3 and #4 */ 11451 bxe_set_234_gates(sc, TRUE); 11452 11453 /* Poll for IGU VQs for 57712 and newer chips */ 11454 if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { 11455 return (-1); 11456 } 11457 11458 /* XXX indicate that "process kill" is in progress to MCP */ 11459 11460 /* clear "unprepared" bit */ 11461 REG_WR(sc, MISC_REG_UNPREPARED, 0); 11462 mb(); 11463 11464 /* Make sure all is written to the chip before the reset */ 11465 wmb(); 11466 11467 /* 11468 * Wait for 1ms to empty GLUE and PCI-E core queues, 11469 * PSWHST, GRC and PSWRD Tetris buffer. 11470 */ 11471 DELAY(1000); 11472 11473 /* Prepare to chip reset: */ 11474 /* MCP */ 11475 if (global) { 11476 bxe_reset_mcp_prep(sc, &val); 11477 } 11478 11479 /* PXP */ 11480 bxe_pxp_prep(sc); 11481 mb(); 11482 11483 /* reset the chip */ 11484 bxe_process_kill_chip_reset(sc, global); 11485 mb(); 11486 11487 /* clear errors in PGB */ 11488 if (!CHIP_IS_E1(sc)) 11489 REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 11490 11491 /* Recover after reset: */ 11492 /* MCP */ 11493 if (global && bxe_reset_mcp_comp(sc, val)) { 11494 return (-1); 11495 } 11496 11497 /* XXX add resetting the NO_MCP mode DB here */ 11498 11499 /* Open the gates #2, #3 and #4 */ 11500 bxe_set_234_gates(sc, FALSE); 11501 11502 /* XXX 11503 * IGU/AEU preparation bring back the AEU/IGU to a reset state 11504 * re-enable attentions 11505 */ 11506 11507 return (0); 11508} 11509 11510static int 11511bxe_leader_reset(struct bxe_softc *sc) 11512{ 11513 int rc = 0; 11514 uint8_t global = bxe_reset_is_global(sc); 11515 uint32_t load_code; 11516 11517 /* 11518 * If not going to reset MCP, load "fake" driver to reset HW while 11519 * driver is owner of the HW. 11520 */ 11521 if (!global && !BXE_NOMCP(sc)) { 11522 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, 11523 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 11524 if (!load_code) { 11525 BLOGE(sc, "MCP response failure, aborting\n"); 11526 rc = -1; 11527 goto exit_leader_reset; 11528 } 11529 11530 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 11531 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 11532 BLOGE(sc, "MCP unexpected response, aborting\n"); 11533 rc = -1; 11534 goto exit_leader_reset2; 11535 } 11536 11537 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 11538 if (!load_code) { 11539 BLOGE(sc, "MCP response failure, aborting\n"); 11540 rc = -1; 11541 goto exit_leader_reset2; 11542 } 11543 } 11544 11545 /* try to recover after the failure */ 11546 if (bxe_process_kill(sc, global)) { 11547 BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); 11548 rc = -1; 11549 goto exit_leader_reset2; 11550 } 11551 11552 /* 11553 * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver 11554 * state. 11555 */ 11556 bxe_set_reset_done(sc); 11557 if (global) { 11558 bxe_clear_reset_global(sc); 11559 } 11560 11561exit_leader_reset2: 11562 11563 /* unload "fake driver" if it was loaded */ 11564 if (!global && !BXE_NOMCP(sc)) { 11565 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 11566 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 11567 } 11568 11569exit_leader_reset: 11570 11571 sc->is_leader = 0; 11572 bxe_release_leader_lock(sc); 11573 11574 mb(); 11575 return (rc); 11576} 11577 11578/* 11579 * prepare INIT transition, parameters configured: 11580 * - HC configuration 11581 * - Queue's CDU context 11582 */ 11583static void 11584bxe_pf_q_prep_init(struct bxe_softc *sc, 11585 struct bxe_fastpath *fp, 11586 struct ecore_queue_init_params *init_params) 11587{ 11588 uint8_t cos; 11589 int cxt_index, cxt_offset; 11590 11591 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); 11592 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); 11593 11594 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); 11595 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); 11596 11597 /* HC rate */ 11598 init_params->rx.hc_rate = 11599 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; 11600 init_params->tx.hc_rate = 11601 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; 11602 11603 /* FW SB ID */ 11604 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; 11605 11606 /* CQ index among the SB indices */ 11607 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11608 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 11609 11610 /* set maximum number of COSs supported by this queue */ 11611 init_params->max_cos = sc->max_cos; 11612 11613 BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", 11614 fp->index, init_params->max_cos); 11615 11616 /* set the context pointers queue object */ 11617 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 11618 /* XXX change index/cid here if ever support multiple tx CoS */ 11619 /* fp->txdata[cos]->cid */ 11620 cxt_index = fp->index / ILT_PAGE_CIDS; 11621 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); 11622 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; 11623 } 11624} 11625 11626/* set flags that are common for the Tx-only and not normal connections */ 11627static unsigned long 11628bxe_get_common_flags(struct bxe_softc *sc, 11629 struct bxe_fastpath *fp, 11630 uint8_t zero_stats) 11631{ 11632 unsigned long flags = 0; 11633 11634 /* PF driver will always initialize the Queue to an ACTIVE state */ 11635 bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); 11636 11637 /* 11638 * tx only connections collect statistics (on the same index as the 11639 * parent connection). The statistics are zeroed when the parent 11640 * connection is initialized. 11641 */ 11642 11643 bxe_set_bit(ECORE_Q_FLG_STATS, &flags); 11644 if (zero_stats) { 11645 bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); 11646 } 11647 11648 /* 11649 * tx only connections can support tx-switching, though their 11650 * CoS-ness doesn't survive the loopback 11651 */ 11652 if (sc->flags & BXE_TX_SWITCHING) { 11653 bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); 11654 } 11655 11656 bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); 11657 11658 return (flags); 11659} 11660 11661static unsigned long 11662bxe_get_q_flags(struct bxe_softc *sc, 11663 struct bxe_fastpath *fp, 11664 uint8_t leading) 11665{ 11666 unsigned long flags = 0; 11667 11668 if (IS_MF_SD(sc)) { 11669 bxe_set_bit(ECORE_Q_FLG_OV, &flags); 11670 } 11671 11672 if (sc->ifnet->if_capenable & IFCAP_LRO) { 11673 bxe_set_bit(ECORE_Q_FLG_TPA, &flags); 11674 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); 11675#if 0 11676 if (fp->mode == TPA_MODE_GRO) 11677 __set_bit(ECORE_Q_FLG_TPA_GRO, &flags); 11678#endif 11679 } 11680 11681 if (leading) { 11682 bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); 11683 bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); 11684 } 11685 11686 bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); 11687 11688#if 0 11689 /* configure silent vlan removal */ 11690 if (IS_MF_AFEX(sc)) { 11691 bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags); 11692 } 11693#endif 11694 11695 /* merge with common flags */ 11696 return (flags | bxe_get_common_flags(sc, fp, TRUE)); 11697} 11698 11699static void 11700bxe_pf_q_prep_general(struct bxe_softc *sc, 11701 struct bxe_fastpath *fp, 11702 struct ecore_general_setup_params *gen_init, 11703 uint8_t cos) 11704{ 11705 gen_init->stat_id = bxe_stats_id(fp); 11706 gen_init->spcl_id = fp->cl_id; 11707 gen_init->mtu = sc->mtu; 11708 gen_init->cos = cos; 11709} 11710 11711static void 11712bxe_pf_rx_q_prep(struct bxe_softc *sc, 11713 struct bxe_fastpath *fp, 11714 struct rxq_pause_params *pause, 11715 struct ecore_rxq_setup_params *rxq_init) 11716{ 11717 uint8_t max_sge = 0; 11718 uint16_t sge_sz = 0; 11719 uint16_t tpa_agg_size = 0; 11720 11721 if (sc->ifnet->if_capenable & IFCAP_LRO) { 11722 pause->sge_th_lo = SGE_TH_LO(sc); 11723 pause->sge_th_hi = SGE_TH_HI(sc); 11724 11725 /* validate SGE ring has enough to cross high threshold */ 11726 if (sc->dropless_fc && 11727 (pause->sge_th_hi + FW_PREFETCH_CNT) > 11728 (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { 11729 BLOGW(sc, "sge ring threshold limit\n"); 11730 } 11731 11732 /* minimum max_aggregation_size is 2*MTU (two full buffers) */ 11733 tpa_agg_size = (2 * sc->mtu); 11734 if (tpa_agg_size < sc->max_aggregation_size) { 11735 tpa_agg_size = sc->max_aggregation_size; 11736 } 11737 11738 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; 11739 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 11740 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; 11741 sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); 11742 } 11743 11744 /* pause - not for e1 */ 11745 if (!CHIP_IS_E1(sc)) { 11746 pause->bd_th_lo = BD_TH_LO(sc); 11747 pause->bd_th_hi = BD_TH_HI(sc); 11748 11749 pause->rcq_th_lo = RCQ_TH_LO(sc); 11750 pause->rcq_th_hi = RCQ_TH_HI(sc); 11751 11752 /* validate rings have enough entries to cross high thresholds */ 11753 if (sc->dropless_fc && 11754 pause->bd_th_hi + FW_PREFETCH_CNT > 11755 sc->rx_ring_size) { 11756 BLOGW(sc, "rx bd ring threshold limit\n"); 11757 } 11758 11759 if (sc->dropless_fc && 11760 pause->rcq_th_hi + FW_PREFETCH_CNT > 11761 RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { 11762 BLOGW(sc, "rcq ring threshold limit\n"); 11763 } 11764 11765 pause->pri_map = 1; 11766 } 11767 11768 /* rxq setup */ 11769 rxq_init->dscr_map = fp->rx_dma.paddr; 11770 rxq_init->sge_map = fp->rx_sge_dma.paddr; 11771 rxq_init->rcq_map = fp->rcq_dma.paddr; 11772 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); 11773 11774 /* 11775 * This should be a maximum number of data bytes that may be 11776 * placed on the BD (not including paddings). 11777 */ 11778 rxq_init->buf_sz = (fp->rx_buf_size - 11779 IP_HEADER_ALIGNMENT_PADDING); 11780 11781 rxq_init->cl_qzone_id = fp->cl_qzone_id; 11782 rxq_init->tpa_agg_sz = tpa_agg_size; 11783 rxq_init->sge_buf_sz = sge_sz; 11784 rxq_init->max_sges_pkt = max_sge; 11785 rxq_init->rss_engine_id = SC_FUNC(sc); 11786 rxq_init->mcast_engine_id = SC_FUNC(sc); 11787 11788 /* 11789 * Maximum number or simultaneous TPA aggregation for this Queue. 11790 * For PF Clients it should be the maximum available number. 11791 * VF driver(s) may want to define it to a smaller value. 11792 */ 11793 rxq_init->max_tpa_queues = MAX_AGG_QS(sc); 11794 11795 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; 11796 rxq_init->fw_sb_id = fp->fw_sb_id; 11797 11798 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 11799 11800 /* 11801 * configure silent vlan removal 11802 * if multi function mode is afex, then mask default vlan 11803 */ 11804 if (IS_MF_AFEX(sc)) { 11805 rxq_init->silent_removal_value = 11806 sc->devinfo.mf_info.afex_def_vlan_tag; 11807 rxq_init->silent_removal_mask = EVL_VLID_MASK; 11808 } 11809} 11810 11811static void 11812bxe_pf_tx_q_prep(struct bxe_softc *sc, 11813 struct bxe_fastpath *fp, 11814 struct ecore_txq_setup_params *txq_init, 11815 uint8_t cos) 11816{ 11817 /* 11818 * XXX If multiple CoS is ever supported then each fastpath structure 11819 * will need to maintain tx producer/consumer/dma/etc values *per* CoS. 11820 * fp->txdata[cos]->tx_dma.paddr; 11821 */ 11822 txq_init->dscr_map = fp->tx_dma.paddr; 11823 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 11824 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 11825 txq_init->fw_sb_id = fp->fw_sb_id; 11826 11827 /* 11828 * set the TSS leading client id for TX classfication to the 11829 * leading RSS client id 11830 */ 11831 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); 11832} 11833 11834/* 11835 * This function performs 2 steps in a queue state machine: 11836 * 1) RESET->INIT 11837 * 2) INIT->SETUP 11838 */ 11839static int 11840bxe_setup_queue(struct bxe_softc *sc, 11841 struct bxe_fastpath *fp, 11842 uint8_t leading) 11843{ 11844 struct ecore_queue_state_params q_params = { NULL }; 11845 struct ecore_queue_setup_params *setup_params = 11846 &q_params.params.setup; 11847#if 0 11848 struct ecore_queue_setup_tx_only_params *tx_only_params = 11849 &q_params.params.tx_only; 11850 uint8_t tx_index; 11851#endif 11852 int rc; 11853 11854 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); 11855 11856 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 11857 11858 q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; 11859 11860 /* we want to wait for completion in this context */ 11861 bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 11862 11863 /* prepare the INIT parameters */ 11864 bxe_pf_q_prep_init(sc, fp, &q_params.params.init); 11865 11866 /* Set the command */ 11867 q_params.cmd = ECORE_Q_CMD_INIT; 11868 11869 /* Change the state to INIT */ 11870 rc = ecore_queue_state_change(sc, &q_params); 11871 if (rc) { 11872 BLOGE(sc, "Queue(%d) INIT failed\n", fp->index); 11873 return (rc); 11874 } 11875 11876 BLOGD(sc, DBG_LOAD, "init complete\n"); 11877 11878 /* now move the Queue to the SETUP state */ 11879 memset(setup_params, 0, sizeof(*setup_params)); 11880 11881 /* set Queue flags */ 11882 setup_params->flags = bxe_get_q_flags(sc, fp, leading); 11883 11884 /* set general SETUP parameters */ 11885 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, 11886 FIRST_TX_COS_INDEX); 11887 11888 bxe_pf_rx_q_prep(sc, fp, 11889 &setup_params->pause_params, 11890 &setup_params->rxq_params); 11891 11892 bxe_pf_tx_q_prep(sc, fp, 11893 &setup_params->txq_params, 11894 FIRST_TX_COS_INDEX); 11895 11896 /* Set the command */ 11897 q_params.cmd = ECORE_Q_CMD_SETUP; 11898 11899 /* change the state to SETUP */ 11900 rc = ecore_queue_state_change(sc, &q_params); 11901 if (rc) { 11902 BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index); 11903 return (rc); 11904 } 11905 11906#if 0 11907 /* loop through the relevant tx-only indices */ 11908 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 11909 tx_index < sc->max_cos; 11910 tx_index++) { 11911 /* prepare and send tx-only ramrod*/ 11912 rc = bxe_setup_tx_only(sc, fp, &q_params, 11913 tx_only_params, tx_index, leading); 11914 if (rc) { 11915 BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n", 11916 fp->index, tx_index); 11917 return (rc); 11918 } 11919 } 11920#endif 11921 11922 return (rc); 11923} 11924 11925static int 11926bxe_setup_leading(struct bxe_softc *sc) 11927{ 11928 return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); 11929} 11930 11931static int 11932bxe_config_rss_pf(struct bxe_softc *sc, 11933 struct ecore_rss_config_obj *rss_obj, 11934 uint8_t config_hash) 11935{ 11936 struct ecore_config_rss_params params = { NULL }; 11937 int i; 11938 11939 /* 11940 * Although RSS is meaningless when there is a single HW queue we 11941 * still need it enabled in order to have HW Rx hash generated. 11942 */ 11943 11944 params.rss_obj = rss_obj; 11945 11946 bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 11947 11948 bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); 11949 11950 /* RSS configuration */ 11951 bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); 11952 bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); 11953 bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); 11954 bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); 11955 if (rss_obj->udp_rss_v4) { 11956 bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); 11957 } 11958 if (rss_obj->udp_rss_v6) { 11959 bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); 11960 } 11961 11962 /* Hash bits */ 11963 params.rss_result_mask = MULTI_MASK; 11964 11965 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); 11966 11967 if (config_hash) { 11968 /* RSS keys */ 11969 for (i = 0; i < sizeof(params.rss_key) / 4; i++) { 11970 params.rss_key[i] = arc4random(); 11971 } 11972 11973 bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); 11974 } 11975 11976 return (ecore_config_rss(sc, ¶ms)); 11977} 11978 11979static int 11980bxe_config_rss_eth(struct bxe_softc *sc, 11981 uint8_t config_hash) 11982{ 11983 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); 11984} 11985 11986static int 11987bxe_init_rss_pf(struct bxe_softc *sc) 11988{ 11989 uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); 11990 int i; 11991 11992 /* 11993 * Prepare the initial contents of the indirection table if 11994 * RSS is enabled 11995 */ 11996 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { 11997 sc->rss_conf_obj.ind_table[i] = 11998 (sc->fp->cl_id + (i % num_eth_queues)); 11999 } 12000 12001 if (sc->udp_rss) { 12002 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; 12003 } 12004 12005 /* 12006 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 12007 * per-port, so if explicit configuration is needed, do it only 12008 * for a PMF. 12009 * 12010 * For 57712 and newer it's a per-function configuration. 12011 */ 12012 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); 12013} 12014 12015static int 12016bxe_set_mac_one(struct bxe_softc *sc, 12017 uint8_t *mac, 12018 struct ecore_vlan_mac_obj *obj, 12019 uint8_t set, 12020 int mac_type, 12021 unsigned long *ramrod_flags) 12022{ 12023 struct ecore_vlan_mac_ramrod_params ramrod_param; 12024 int rc; 12025 12026 memset(&ramrod_param, 0, sizeof(ramrod_param)); 12027 12028 /* fill in general parameters */ 12029 ramrod_param.vlan_mac_obj = obj; 12030 ramrod_param.ramrod_flags = *ramrod_flags; 12031 12032 /* fill a user request section if needed */ 12033 if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { 12034 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 12035 12036 bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 12037 12038 /* Set the command: ADD or DEL */ 12039 ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : 12040 ECORE_VLAN_MAC_DEL; 12041 } 12042 12043 rc = ecore_config_vlan_mac(sc, &ramrod_param); 12044 12045 if (rc == ECORE_EXISTS) { 12046 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12047 /* do not treat adding same MAC as error */ 12048 rc = 0; 12049 } else if (rc < 0) { 12050 BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); 12051 } 12052 12053 return (rc); 12054} 12055 12056static int 12057bxe_set_eth_mac(struct bxe_softc *sc, 12058 uint8_t set) 12059{ 12060 unsigned long ramrod_flags = 0; 12061 12062 BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); 12063 12064 bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 12065 12066 /* Eth MAC is set on RSS leading client (fp[0]) */ 12067 return (bxe_set_mac_one(sc, sc->link_params.mac_addr, 12068 &sc->sp_objs->mac_obj, 12069 set, ECORE_ETH_MAC, &ramrod_flags)); 12070} 12071 12072#if 0 12073static void 12074bxe_update_max_mf_config(struct bxe_softc *sc, 12075 uint32_t value) 12076{ 12077 /* load old values */ 12078 uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)]; 12079 12080 if (value != bxe_extract_max_cfg(sc, mf_cfg)) { 12081 /* leave all but MAX value */ 12082 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 12083 12084 /* set new MAX value */ 12085 mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) & 12086 FUNC_MF_CFG_MAX_BW_MASK); 12087 12088 bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 12089 } 12090} 12091#endif 12092 12093static int 12094bxe_get_cur_phy_idx(struct bxe_softc *sc) 12095{ 12096 uint32_t sel_phy_idx = 0; 12097 12098 if (sc->link_params.num_phys <= 1) { 12099 return (ELINK_INT_PHY); 12100 } 12101 12102 if (sc->link_vars.link_up) { 12103 sel_phy_idx = ELINK_EXT_PHY1; 12104 /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ 12105 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 12106 (sc->link_params.phy[ELINK_EXT_PHY2].supported & 12107 ELINK_SUPPORTED_FIBRE)) 12108 sel_phy_idx = ELINK_EXT_PHY2; 12109 } else { 12110 switch (elink_phy_selection(&sc->link_params)) { 12111 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 12112 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 12113 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 12114 sel_phy_idx = ELINK_EXT_PHY1; 12115 break; 12116 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 12117 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 12118 sel_phy_idx = ELINK_EXT_PHY2; 12119 break; 12120 } 12121 } 12122 12123 return (sel_phy_idx); 12124} 12125 12126static int 12127bxe_get_link_cfg_idx(struct bxe_softc *sc) 12128{ 12129 uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); 12130 12131 /* 12132 * The selected activated PHY is always after swapping (in case PHY 12133 * swapping is enabled). So when swapping is enabled, we need to reverse 12134 * the configuration 12135 */ 12136 12137 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 12138 if (sel_phy_idx == ELINK_EXT_PHY1) 12139 sel_phy_idx = ELINK_EXT_PHY2; 12140 else if (sel_phy_idx == ELINK_EXT_PHY2) 12141 sel_phy_idx = ELINK_EXT_PHY1; 12142 } 12143 12144 return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); 12145} 12146 12147static void 12148bxe_set_requested_fc(struct bxe_softc *sc) 12149{ 12150 /* 12151 * Initialize link parameters structure variables 12152 * It is recommended to turn off RX FC for jumbo frames 12153 * for better performance 12154 */ 12155 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { 12156 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; 12157 } else { 12158 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; 12159 } 12160} 12161 12162static void 12163bxe_calc_fc_adv(struct bxe_softc *sc) 12164{ 12165 uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); 12166 switch (sc->link_vars.ieee_fc & 12167 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 12168 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 12169 default: 12170 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 12171 ADVERTISED_Pause); 12172 break; 12173 12174 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 12175 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 12176 ADVERTISED_Pause); 12177 break; 12178 12179 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 12180 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 12181 break; 12182 } 12183} 12184 12185static uint16_t 12186bxe_get_mf_speed(struct bxe_softc *sc) 12187{ 12188 uint16_t line_speed = sc->link_vars.line_speed; 12189 if (IS_MF(sc)) { 12190 uint16_t maxCfg = 12191 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); 12192 12193 /* calculate the current MAX line speed limit for the MF devices */ 12194 if (IS_MF_SI(sc)) { 12195 line_speed = (line_speed * maxCfg) / 100; 12196 } else { /* SD mode */ 12197 uint16_t vn_max_rate = maxCfg * 100; 12198 12199 if (vn_max_rate < line_speed) { 12200 line_speed = vn_max_rate; 12201 } 12202 } 12203 } 12204 12205 return (line_speed); 12206} 12207 12208static void 12209bxe_fill_report_data(struct bxe_softc *sc, 12210 struct bxe_link_report_data *data) 12211{ 12212 uint16_t line_speed = bxe_get_mf_speed(sc); 12213 12214 memset(data, 0, sizeof(*data)); 12215 12216 /* fill the report data with the effective line speed */ 12217 data->line_speed = line_speed; 12218 12219 /* Link is down */ 12220 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { 12221 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); 12222 } 12223 12224 /* Full DUPLEX */ 12225 if (sc->link_vars.duplex == DUPLEX_FULL) { 12226 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); 12227 } 12228 12229 /* Rx Flow Control is ON */ 12230 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { 12231 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 12232 } 12233 12234 /* Tx Flow Control is ON */ 12235 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { 12236 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 12237 } 12238} 12239 12240/* report link status to OS, should be called under phy_lock */ 12241static void 12242bxe_link_report_locked(struct bxe_softc *sc) 12243{ 12244 struct bxe_link_report_data cur_data; 12245 12246 /* reread mf_cfg */ 12247 if (IS_PF(sc) && !CHIP_IS_E1(sc)) { 12248 bxe_read_mf_cfg(sc); 12249 } 12250 12251 /* Read the current link report info */ 12252 bxe_fill_report_data(sc, &cur_data); 12253 12254 /* Don't report link down or exactly the same link status twice */ 12255 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || 12256 (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12257 &sc->last_reported_link.link_report_flags) && 12258 bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12259 &cur_data.link_report_flags))) { 12260 return; 12261 } 12262 12263 sc->link_cnt++; 12264 12265 /* report new link params and remember the state for the next time */ 12266 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); 12267 12268 if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, 12269 &cur_data.link_report_flags)) { 12270 if_link_state_change(sc->ifnet, LINK_STATE_DOWN); 12271 BLOGI(sc, "NIC Link is Down\n"); 12272 } else { 12273 const char *duplex; 12274 const char *flow; 12275 12276 if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, 12277 &cur_data.link_report_flags)) { 12278 duplex = "full"; 12279 } else { 12280 duplex = "half"; 12281 } 12282 12283 /* 12284 * Handle the FC at the end so that only these flags would be 12285 * possibly set. This way we may easily check if there is no FC 12286 * enabled. 12287 */ 12288 if (cur_data.link_report_flags) { 12289 if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12290 &cur_data.link_report_flags) && 12291 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12292 &cur_data.link_report_flags)) { 12293 flow = "ON - receive & transmit"; 12294 } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12295 &cur_data.link_report_flags) && 12296 !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12297 &cur_data.link_report_flags)) { 12298 flow = "ON - receive"; 12299 } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, 12300 &cur_data.link_report_flags) && 12301 bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, 12302 &cur_data.link_report_flags)) { 12303 flow = "ON - transmit"; 12304 } else { 12305 flow = "none"; /* possible? */ 12306 } 12307 } else { 12308 flow = "none"; 12309 } 12310 12311 if_link_state_change(sc->ifnet, LINK_STATE_UP); 12312 BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 12313 cur_data.line_speed, duplex, flow); 12314 } 12315} 12316 12317static void 12318bxe_link_report(struct bxe_softc *sc) 12319{ 12320 BXE_PHY_LOCK(sc); 12321 bxe_link_report_locked(sc); 12322 BXE_PHY_UNLOCK(sc); 12323} 12324 12325static void 12326bxe_link_status_update(struct bxe_softc *sc) 12327{ 12328 if (sc->state != BXE_STATE_OPEN) { 12329 return; 12330 } 12331 12332#if 0 12333 /* read updated dcb configuration */ 12334 if (IS_PF(sc)) 12335 bxe_dcbx_pmf_update(sc); 12336#endif 12337 12338 if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { 12339 elink_link_status_update(&sc->link_params, &sc->link_vars); 12340 } else { 12341 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | 12342 ELINK_SUPPORTED_10baseT_Full | 12343 ELINK_SUPPORTED_100baseT_Half | 12344 ELINK_SUPPORTED_100baseT_Full | 12345 ELINK_SUPPORTED_1000baseT_Full | 12346 ELINK_SUPPORTED_2500baseX_Full | 12347 ELINK_SUPPORTED_10000baseT_Full | 12348 ELINK_SUPPORTED_TP | 12349 ELINK_SUPPORTED_FIBRE | 12350 ELINK_SUPPORTED_Autoneg | 12351 ELINK_SUPPORTED_Pause | 12352 ELINK_SUPPORTED_Asym_Pause); 12353 sc->port.advertising[0] = sc->port.supported[0]; 12354 12355 sc->link_params.sc = sc; 12356 sc->link_params.port = SC_PORT(sc); 12357 sc->link_params.req_duplex[0] = DUPLEX_FULL; 12358 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; 12359 sc->link_params.req_line_speed[0] = SPEED_10000; 12360 sc->link_params.speed_cap_mask[0] = 0x7f0000; 12361 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; 12362 12363 if (CHIP_REV_IS_FPGA(sc)) { 12364 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; 12365 sc->link_vars.line_speed = ELINK_SPEED_1000; 12366 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12367 LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); 12368 } else { 12369 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; 12370 sc->link_vars.line_speed = ELINK_SPEED_10000; 12371 sc->link_vars.link_status = (LINK_STATUS_LINK_UP | 12372 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 12373 } 12374 12375 sc->link_vars.link_up = 1; 12376 12377 sc->link_vars.duplex = DUPLEX_FULL; 12378 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; 12379 12380 if (IS_PF(sc)) { 12381 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); 12382 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12383 bxe_link_report(sc); 12384 } 12385 } 12386 12387 if (IS_PF(sc)) { 12388 if (sc->link_vars.link_up) { 12389 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12390 } else { 12391 bxe_stats_handle(sc, STATS_EVENT_STOP); 12392 } 12393 bxe_link_report(sc); 12394 } else { 12395 bxe_link_report(sc); 12396 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12397 } 12398} 12399 12400static int 12401bxe_initial_phy_init(struct bxe_softc *sc, 12402 int load_mode) 12403{ 12404 int rc, cfg_idx = bxe_get_link_cfg_idx(sc); 12405 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; 12406 struct elink_params *lp = &sc->link_params; 12407 12408 bxe_set_requested_fc(sc); 12409 12410 if (CHIP_REV_IS_SLOW(sc)) { 12411 uint32_t bond = CHIP_BOND_ID(sc); 12412 uint32_t feat = 0; 12413 12414 if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { 12415 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12416 } else if (bond & 0x4) { 12417 if (CHIP_IS_E3(sc)) { 12418 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; 12419 } else { 12420 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; 12421 } 12422 } else if (bond & 0x8) { 12423 if (CHIP_IS_E3(sc)) { 12424 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; 12425 } else { 12426 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12427 } 12428 } 12429 12430 /* disable EMAC for E3 and above */ 12431 if (bond & 0x2) { 12432 feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; 12433 } 12434 12435 sc->link_params.feature_config_flags |= feat; 12436 } 12437 12438 BXE_PHY_LOCK(sc); 12439 12440 if (load_mode == LOAD_DIAG) { 12441 lp->loopback_mode = ELINK_LOOPBACK_XGXS; 12442 /* Prefer doing PHY loopback at 10G speed, if possible */ 12443 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { 12444 if (lp->speed_cap_mask[cfg_idx] & 12445 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 12446 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; 12447 } else { 12448 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; 12449 } 12450 } 12451 } 12452 12453 if (load_mode == LOAD_LOOPBACK_EXT) { 12454 lp->loopback_mode = ELINK_LOOPBACK_EXT; 12455 } 12456 12457 rc = elink_phy_init(&sc->link_params, &sc->link_vars); 12458 12459 BXE_PHY_UNLOCK(sc); 12460 12461 bxe_calc_fc_adv(sc); 12462 12463 if (sc->link_vars.link_up) { 12464 bxe_stats_handle(sc, STATS_EVENT_LINK_UP); 12465 bxe_link_report(sc); 12466 } 12467 12468 if (!CHIP_REV_IS_SLOW(sc)) { 12469 bxe_periodic_start(sc); 12470 } 12471 12472 sc->link_params.req_line_speed[cfg_idx] = req_line_speed; 12473 return (rc); 12474} 12475 12476/* must be called under IF_ADDR_LOCK */ 12477static int 12478bxe_init_mcast_macs_list(struct bxe_softc *sc, 12479 struct ecore_mcast_ramrod_params *p) 12480{ 12481 struct ifnet *ifp = sc->ifnet; 12482 int mc_count = 0; 12483 struct ifmultiaddr *ifma; 12484 struct ecore_mcast_list_elem *mc_mac; 12485 12486 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 12487 if (ifma->ifma_addr->sa_family != AF_LINK) { 12488 continue; 12489 } 12490 12491 mc_count++; 12492 } 12493 12494 ECORE_LIST_INIT(&p->mcast_list); 12495 p->mcast_list_len = 0; 12496 12497 if (!mc_count) { 12498 return (0); 12499 } 12500 12501 mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, 12502 (M_NOWAIT | M_ZERO)); 12503 if (!mc_mac) { 12504 BLOGE(sc, "Failed to allocate temp mcast list\n"); 12505 return (-1); 12506 } 12507 12508 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 12509 if (ifma->ifma_addr->sa_family != AF_LINK) { 12510 continue; 12511 } 12512 12513 mc_mac->mac = (uint8_t *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 12514 ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list); 12515 12516 BLOGD(sc, DBG_LOAD, 12517 "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", 12518 mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], 12519 mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); 12520 12521 mc_mac++; 12522 } 12523 12524 p->mcast_list_len = mc_count; 12525 12526 return (0); 12527} 12528 12529static void 12530bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p) 12531{ 12532 struct ecore_mcast_list_elem *mc_mac = 12533 ECORE_LIST_FIRST_ENTRY(&p->mcast_list, 12534 struct ecore_mcast_list_elem, 12535 link); 12536 12537 if (mc_mac) { 12538 /* only a single free as all mc_macs are in the same heap array */ 12539 free(mc_mac, M_DEVBUF); 12540 } 12541} 12542 12543static int 12544bxe_set_mc_list(struct bxe_softc *sc) 12545{ 12546 struct ecore_mcast_ramrod_params rparam = { NULL }; 12547 int rc = 0; 12548 12549 rparam.mcast_obj = &sc->mcast_obj; 12550 12551 BXE_MCAST_LOCK(sc); 12552 12553 /* first, clear all configured multicast MACs */ 12554 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); 12555 if (rc < 0) { 12556 BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); 12557 return (rc); 12558 } 12559 12560 /* configure a new MACs list */ 12561 rc = bxe_init_mcast_macs_list(sc, &rparam); 12562 if (rc) { 12563 BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc); 12564 BXE_MCAST_UNLOCK(sc); 12565 return (rc); 12566 } 12567 12568 /* Now add the new MACs */ 12569 rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); 12570 if (rc < 0) { 12571 BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); 12572 } 12573 12574 bxe_free_mcast_macs_list(&rparam); 12575 12576 BXE_MCAST_UNLOCK(sc); 12577 12578 return (rc); 12579} 12580 12581static int 12582bxe_set_uc_list(struct bxe_softc *sc) 12583{ 12584 struct ifnet *ifp = sc->ifnet; 12585 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; 12586 struct ifaddr *ifa; 12587 unsigned long ramrod_flags = 0; 12588 int rc; 12589 12590#if __FreeBSD_version < 800000 12591 IF_ADDR_LOCK(ifp); 12592#else 12593 if_addr_rlock(ifp); 12594#endif 12595 12596 /* first schedule a cleanup up of old configuration */ 12597 rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); 12598 if (rc < 0) { 12599 BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); 12600#if __FreeBSD_version < 800000 12601 IF_ADDR_UNLOCK(ifp); 12602#else 12603 if_addr_runlock(ifp); 12604#endif 12605 return (rc); 12606 } 12607 12608 ifa = ifp->if_addr; 12609 while (ifa) { 12610 if (ifa->ifa_addr->sa_family != AF_LINK) { 12611 ifa = TAILQ_NEXT(ifa, ifa_link); 12612 continue; 12613 } 12614 12615 rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), 12616 mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); 12617 if (rc == -EEXIST) { 12618 BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); 12619 /* do not treat adding same MAC as an error */ 12620 rc = 0; 12621 } else if (rc < 0) { 12622 BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); 12623#if __FreeBSD_version < 800000 12624 IF_ADDR_UNLOCK(ifp); 12625#else 12626 if_addr_runlock(ifp); 12627#endif 12628 return (rc); 12629 } 12630 12631 ifa = TAILQ_NEXT(ifa, ifa_link); 12632 } 12633 12634#if __FreeBSD_version < 800000 12635 IF_ADDR_UNLOCK(ifp); 12636#else 12637 if_addr_runlock(ifp); 12638#endif 12639 12640 /* Execute the pending commands */ 12641 bit_set(&ramrod_flags, RAMROD_CONT); 12642 return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, 12643 ECORE_UC_LIST_MAC, &ramrod_flags)); 12644} 12645 12646static void 12647bxe_handle_rx_mode_tq(void *context, 12648 int pending) 12649{ 12650 struct bxe_softc *sc = (struct bxe_softc *)context; 12651 struct ifnet *ifp = sc->ifnet; 12652 uint32_t rx_mode = BXE_RX_MODE_NORMAL; 12653 12654 BXE_CORE_LOCK(sc); 12655 12656 if (sc->state != BXE_STATE_OPEN) { 12657 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); 12658 BXE_CORE_UNLOCK(sc); 12659 return; 12660 } 12661 12662 BLOGD(sc, DBG_SP, "ifp->if_flags=0x%x\n", ifp->if_flags); 12663 12664 if (ifp->if_flags & IFF_PROMISC) { 12665 rx_mode = BXE_RX_MODE_PROMISC; 12666 } else if ((ifp->if_flags & IFF_ALLMULTI) || 12667 ((ifp->if_amcount > BXE_MAX_MULTICAST) && 12668 CHIP_IS_E1(sc))) { 12669 rx_mode = BXE_RX_MODE_ALLMULTI; 12670 } else { 12671 if (IS_PF(sc)) { 12672 /* some multicasts */ 12673 if (bxe_set_mc_list(sc) < 0) { 12674 rx_mode = BXE_RX_MODE_ALLMULTI; 12675 } 12676 if (bxe_set_uc_list(sc) < 0) { 12677 rx_mode = BXE_RX_MODE_PROMISC; 12678 } 12679 } 12680#if 0 12681 else { 12682 /* 12683 * Configuring mcast to a VF involves sleeping (when we 12684 * wait for the PF's response). Since this function is 12685 * called from a non sleepable context we must schedule 12686 * a work item for this purpose 12687 */ 12688 bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state); 12689 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12690 } 12691#endif 12692 } 12693 12694 sc->rx_mode = rx_mode; 12695 12696 /* schedule the rx_mode command */ 12697 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { 12698 BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); 12699 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); 12700 BXE_CORE_UNLOCK(sc); 12701 return; 12702 } 12703 12704 if (IS_PF(sc)) { 12705 bxe_set_storm_rx_mode(sc); 12706 } 12707#if 0 12708 else { 12709 /* 12710 * Configuring mcast to a VF involves sleeping (when we 12711 * wait for the PF's response). Since this function is 12712 * called from a non sleepable context we must schedule 12713 * a work item for this purpose 12714 */ 12715 bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state); 12716 schedule_delayed_work(&sc->sp_rtnl_task, 0); 12717 } 12718#endif 12719 12720 BXE_CORE_UNLOCK(sc); 12721} 12722 12723static void 12724bxe_set_rx_mode(struct bxe_softc *sc) 12725{ 12726 taskqueue_enqueue(sc->rx_mode_tq, &sc->rx_mode_tq_task); 12727} 12728 12729/* update flags in shmem */ 12730static void 12731bxe_update_drv_flags(struct bxe_softc *sc, 12732 uint32_t flags, 12733 uint32_t set) 12734{ 12735 uint32_t drv_flags; 12736 12737 if (SHMEM2_HAS(sc, drv_flags)) { 12738 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12739 drv_flags = SHMEM2_RD(sc, drv_flags); 12740 12741 if (set) { 12742 SET_FLAGS(drv_flags, flags); 12743 } else { 12744 RESET_FLAGS(drv_flags, flags); 12745 } 12746 12747 SHMEM2_WR(sc, drv_flags, drv_flags); 12748 BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); 12749 12750 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); 12751 } 12752} 12753 12754/* periodic timer callout routine, only runs when the interface is up */ 12755 12756static void 12757bxe_periodic_callout_func(void *xsc) 12758{ 12759 struct bxe_softc *sc = (struct bxe_softc *)xsc; 12760 int i; 12761 12762 if (!BXE_CORE_TRYLOCK(sc)) { 12763 /* just bail and try again next time */ 12764 12765 if ((sc->state == BXE_STATE_OPEN) && 12766 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12767 /* schedule the next periodic callout */ 12768 callout_reset(&sc->periodic_callout, hz, 12769 bxe_periodic_callout_func, sc); 12770 } 12771 12772 return; 12773 } 12774 12775 if ((sc->state != BXE_STATE_OPEN) || 12776 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { 12777 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); 12778 BXE_CORE_UNLOCK(sc); 12779 return; 12780 } 12781 12782 /* Check for TX timeouts on any fastpath. */ 12783 FOR_EACH_QUEUE(sc, i) { 12784 if (bxe_watchdog(sc, &sc->fp[i]) != 0) { 12785 /* Ruh-Roh, chip was reset! */ 12786 break; 12787 } 12788 } 12789 12790 if (!CHIP_REV_IS_SLOW(sc)) { 12791 /* 12792 * This barrier is needed to ensure the ordering between the writing 12793 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and 12794 * the reading here. 12795 */ 12796 mb(); 12797 if (sc->port.pmf) { 12798 BXE_PHY_LOCK(sc); 12799 elink_period_func(&sc->link_params, &sc->link_vars); 12800 BXE_PHY_UNLOCK(sc); 12801 } 12802 } 12803 12804 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 12805 int mb_idx = SC_FW_MB_IDX(sc); 12806 uint32_t drv_pulse; 12807 uint32_t mcp_pulse; 12808 12809 ++sc->fw_drv_pulse_wr_seq; 12810 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 12811 12812 drv_pulse = sc->fw_drv_pulse_wr_seq; 12813 bxe_drv_pulse(sc); 12814 12815 mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & 12816 MCP_PULSE_SEQ_MASK); 12817 12818 /* 12819 * The delta between driver pulse and mcp response should 12820 * be 1 (before mcp response) or 0 (after mcp response). 12821 */ 12822 if ((drv_pulse != mcp_pulse) && 12823 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { 12824 /* someone lost a heartbeat... */ 12825 BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 12826 drv_pulse, mcp_pulse); 12827 } 12828 } 12829 12830 /* state is BXE_STATE_OPEN */ 12831 bxe_stats_handle(sc, STATS_EVENT_UPDATE); 12832 12833#if 0 12834 /* sample VF bulletin board for new posts from PF */ 12835 if (IS_VF(sc)) { 12836 bxe_sample_bulletin(sc); 12837 } 12838#endif 12839 12840 BXE_CORE_UNLOCK(sc); 12841 12842 if ((sc->state == BXE_STATE_OPEN) && 12843 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { 12844 /* schedule the next periodic callout */ 12845 callout_reset(&sc->periodic_callout, hz, 12846 bxe_periodic_callout_func, sc); 12847 } 12848} 12849 12850static void 12851bxe_periodic_start(struct bxe_softc *sc) 12852{ 12853 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); 12854 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); 12855} 12856 12857static void 12858bxe_periodic_stop(struct bxe_softc *sc) 12859{ 12860 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); 12861 callout_drain(&sc->periodic_callout); 12862} 12863 12864/* start the controller */ 12865static __noinline int 12866bxe_nic_load(struct bxe_softc *sc, 12867 int load_mode) 12868{ 12869 uint32_t val; 12870 int load_code = 0; 12871 int i, rc = 0; 12872 12873 BXE_CORE_LOCK_ASSERT(sc); 12874 12875 BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); 12876 12877 sc->state = BXE_STATE_OPENING_WAITING_LOAD; 12878 12879 if (IS_PF(sc)) { 12880 /* must be called before memory allocation and HW init */ 12881 bxe_ilt_set_info(sc); 12882 } 12883 12884 sc->last_reported_link_state = LINK_STATE_UNKNOWN; 12885 12886 bxe_set_fp_rx_buf_size(sc); 12887 12888 if (bxe_alloc_fp_buffers(sc) != 0) { 12889 BLOGE(sc, "Failed to allocate fastpath memory\n"); 12890 sc->state = BXE_STATE_CLOSED; 12891 rc = ENOMEM; 12892 goto bxe_nic_load_error0; 12893 } 12894 12895 if (bxe_alloc_mem(sc) != 0) { 12896 sc->state = BXE_STATE_CLOSED; 12897 rc = ENOMEM; 12898 goto bxe_nic_load_error0; 12899 } 12900 12901 if (bxe_alloc_fw_stats_mem(sc) != 0) { 12902 sc->state = BXE_STATE_CLOSED; 12903 rc = ENOMEM; 12904 goto bxe_nic_load_error0; 12905 } 12906 12907 if (IS_PF(sc)) { 12908 /* set pf load just before approaching the MCP */ 12909 bxe_set_pf_load(sc); 12910 12911 /* if MCP exists send load request and analyze response */ 12912 if (!BXE_NOMCP(sc)) { 12913 /* attempt to load pf */ 12914 if (bxe_nic_load_request(sc, &load_code) != 0) { 12915 sc->state = BXE_STATE_CLOSED; 12916 rc = ENXIO; 12917 goto bxe_nic_load_error1; 12918 } 12919 12920 /* what did the MCP say? */ 12921 if (bxe_nic_load_analyze_req(sc, load_code) != 0) { 12922 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12923 sc->state = BXE_STATE_CLOSED; 12924 rc = ENXIO; 12925 goto bxe_nic_load_error2; 12926 } 12927 } else { 12928 BLOGI(sc, "Device has no MCP!\n"); 12929 load_code = bxe_nic_load_no_mcp(sc); 12930 } 12931 12932 /* mark PMF if applicable */ 12933 bxe_nic_load_pmf(sc, load_code); 12934 12935 /* Init Function state controlling object */ 12936 bxe_init_func_obj(sc); 12937 12938 /* Initialize HW */ 12939 if (bxe_init_hw(sc, load_code) != 0) { 12940 BLOGE(sc, "HW init failed\n"); 12941 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12942 sc->state = BXE_STATE_CLOSED; 12943 rc = ENXIO; 12944 goto bxe_nic_load_error2; 12945 } 12946 } 12947 12948 /* attach interrupts */ 12949 if (bxe_interrupt_attach(sc) != 0) { 12950 sc->state = BXE_STATE_CLOSED; 12951 rc = ENXIO; 12952 goto bxe_nic_load_error2; 12953 } 12954 12955 bxe_nic_init(sc, load_code); 12956 12957 /* Init per-function objects */ 12958 if (IS_PF(sc)) { 12959 bxe_init_objs(sc); 12960 // XXX bxe_iov_nic_init(sc); 12961 12962 /* set AFEX default VLAN tag to an invalid value */ 12963 sc->devinfo.mf_info.afex_def_vlan_tag = -1; 12964 // XXX bxe_nic_load_afex_dcc(sc, load_code); 12965 12966 sc->state = BXE_STATE_OPENING_WAITING_PORT; 12967 rc = bxe_func_start(sc); 12968 if (rc) { 12969 BLOGE(sc, "Function start failed!\n"); 12970 bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12971 sc->state = BXE_STATE_ERROR; 12972 goto bxe_nic_load_error3; 12973 } 12974 12975 /* send LOAD_DONE command to MCP */ 12976 if (!BXE_NOMCP(sc)) { 12977 load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); 12978 if (!load_code) { 12979 BLOGE(sc, "MCP response failure, aborting\n"); 12980 sc->state = BXE_STATE_ERROR; 12981 rc = ENXIO; 12982 goto bxe_nic_load_error3; 12983 } 12984 } 12985 12986 rc = bxe_setup_leading(sc); 12987 if (rc) { 12988 BLOGE(sc, "Setup leading failed!\n"); 12989 sc->state = BXE_STATE_ERROR; 12990 goto bxe_nic_load_error3; 12991 } 12992 12993 FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { 12994 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); 12995 if (rc) { 12996 BLOGE(sc, "Queue(%d) setup failed\n", i); 12997 sc->state = BXE_STATE_ERROR; 12998 goto bxe_nic_load_error3; 12999 } 13000 } 13001 13002 rc = bxe_init_rss_pf(sc); 13003 if (rc) { 13004 BLOGE(sc, "PF RSS init failed\n"); 13005 sc->state = BXE_STATE_ERROR; 13006 goto bxe_nic_load_error3; 13007 } 13008 } 13009 /* XXX VF */ 13010#if 0 13011 else { /* VF */ 13012 FOR_EACH_ETH_QUEUE(sc, i) { 13013 rc = bxe_vfpf_setup_q(sc, i); 13014 if (rc) { 13015 BLOGE(sc, "Queue(%d) setup failed\n", i); 13016 sc->state = BXE_STATE_ERROR; 13017 goto bxe_nic_load_error3; 13018 } 13019 } 13020 } 13021#endif 13022 13023 /* now when Clients are configured we are ready to work */ 13024 sc->state = BXE_STATE_OPEN; 13025 13026 /* Configure a ucast MAC */ 13027 if (IS_PF(sc)) { 13028 rc = bxe_set_eth_mac(sc, TRUE); 13029 } 13030#if 0 13031 else { /* IS_VF(sc) */ 13032 rc = bxe_vfpf_set_mac(sc); 13033 } 13034#endif 13035 if (rc) { 13036 BLOGE(sc, "Setting Ethernet MAC failed\n"); 13037 sc->state = BXE_STATE_ERROR; 13038 goto bxe_nic_load_error3; 13039 } 13040 13041#if 0 13042 if (IS_PF(sc) && sc->pending_max) { 13043 /* for AFEX */ 13044 bxe_update_max_mf_config(sc, sc->pending_max); 13045 sc->pending_max = 0; 13046 } 13047#endif 13048 13049 if (sc->port.pmf) { 13050 rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); 13051 if (rc) { 13052 sc->state = BXE_STATE_ERROR; 13053 goto bxe_nic_load_error3; 13054 } 13055 } 13056 13057 sc->link_params.feature_config_flags &= 13058 ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; 13059 13060 /* start fast path */ 13061 13062 /* Initialize Rx filter */ 13063 bxe_set_rx_mode(sc); 13064 13065 /* start the Tx */ 13066 switch (/* XXX load_mode */LOAD_OPEN) { 13067 case LOAD_NORMAL: 13068 case LOAD_OPEN: 13069 break; 13070 13071 case LOAD_DIAG: 13072 case LOAD_LOOPBACK_EXT: 13073 sc->state = BXE_STATE_DIAG; 13074 break; 13075 13076 default: 13077 break; 13078 } 13079 13080 if (sc->port.pmf) { 13081 bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); 13082 } else { 13083 bxe_link_status_update(sc); 13084 } 13085 13086 /* start the periodic timer callout */ 13087 bxe_periodic_start(sc); 13088 13089 if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { 13090 /* mark driver is loaded in shmem2 */ 13091 val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); 13092 SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], 13093 (val | 13094 DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | 13095 DRV_FLAGS_CAPABILITIES_LOADED_L2)); 13096 } 13097 13098 /* wait for all pending SP commands to complete */ 13099 if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { 13100 BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); 13101 bxe_periodic_stop(sc); 13102 bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); 13103 return (ENXIO); 13104 } 13105 13106#if 0 13107 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ 13108 if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) { 13109 bxe_dcbx_init(sc, FALSE); 13110 } 13111#endif 13112 13113 /* Tell the stack the driver is running! */ 13114 sc->ifnet->if_drv_flags = IFF_DRV_RUNNING; 13115 13116 BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); 13117 13118 return (0); 13119 13120bxe_nic_load_error3: 13121 13122 if (IS_PF(sc)) { 13123 bxe_int_disable_sync(sc, 1); 13124 13125 /* clean out queued objects */ 13126 bxe_squeeze_objects(sc); 13127 } 13128 13129 bxe_interrupt_detach(sc); 13130 13131bxe_nic_load_error2: 13132 13133 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 13134 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 13135 bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); 13136 } 13137 13138 sc->port.pmf = 0; 13139 13140bxe_nic_load_error1: 13141 13142 /* clear pf_load status, as it was already set */ 13143 if (IS_PF(sc)) { 13144 bxe_clear_pf_load(sc); 13145 } 13146 13147bxe_nic_load_error0: 13148 13149 bxe_free_fw_stats_mem(sc); 13150 bxe_free_fp_buffers(sc); 13151 bxe_free_mem(sc); 13152 13153 return (rc); 13154} 13155 13156static int 13157bxe_init_locked(struct bxe_softc *sc) 13158{ 13159 int other_engine = SC_PATH(sc) ? 0 : 1; 13160 uint8_t other_load_status, load_status; 13161 uint8_t global = FALSE; 13162 int rc; 13163 13164 BXE_CORE_LOCK_ASSERT(sc); 13165 13166 /* check if the driver is already running */ 13167 if (sc->ifnet->if_drv_flags & IFF_DRV_RUNNING) { 13168 BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); 13169 return (0); 13170 } 13171 13172 bxe_set_power_state(sc, PCI_PM_D0); 13173 13174 /* 13175 * If parity occurred during the unload, then attentions and/or 13176 * RECOVERY_IN_PROGRES may still be set. If so we want the first function 13177 * loaded on the current engine to complete the recovery. Parity recovery 13178 * is only relevant for PF driver. 13179 */ 13180 if (IS_PF(sc)) { 13181 other_load_status = bxe_get_load_status(sc, other_engine); 13182 load_status = bxe_get_load_status(sc, SC_PATH(sc)); 13183 13184 if (!bxe_reset_is_done(sc, SC_PATH(sc)) || 13185 bxe_chk_parity_attn(sc, &global, TRUE)) { 13186 do { 13187 /* 13188 * If there are attentions and they are in global blocks, set 13189 * the GLOBAL_RESET bit regardless whether it will be this 13190 * function that will complete the recovery or not. 13191 */ 13192 if (global) { 13193 bxe_set_reset_global(sc); 13194 } 13195 13196 /* 13197 * Only the first function on the current engine should try 13198 * to recover in open. In case of attentions in global blocks 13199 * only the first in the chip should try to recover. 13200 */ 13201 if ((!load_status && (!global || !other_load_status)) && 13202 bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { 13203 BLOGI(sc, "Recovered during init\n"); 13204 break; 13205 } 13206 13207 /* recovery has failed... */ 13208 bxe_set_power_state(sc, PCI_PM_D3hot); 13209 sc->recovery_state = BXE_RECOVERY_FAILED; 13210 13211 BLOGE(sc, "Recovery flow hasn't properly " 13212 "completed yet, try again later. " 13213 "If you still see this message after a " 13214 "few retries then power cycle is required.\n"); 13215 13216 rc = ENXIO; 13217 goto bxe_init_locked_done; 13218 } while (0); 13219 } 13220 } 13221 13222 sc->recovery_state = BXE_RECOVERY_DONE; 13223 13224 rc = bxe_nic_load(sc, LOAD_OPEN); 13225 13226bxe_init_locked_done: 13227 13228 if (rc) { 13229 /* Tell the stack the driver is NOT running! */ 13230 BLOGE(sc, "Initialization failed, " 13231 "stack notified driver is NOT running!\n"); 13232 sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING; 13233 } 13234 13235 return (rc); 13236} 13237 13238static int 13239bxe_stop_locked(struct bxe_softc *sc) 13240{ 13241 BXE_CORE_LOCK_ASSERT(sc); 13242 return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); 13243} 13244 13245/* 13246 * Handles controller initialization when called from an unlocked routine. 13247 * ifconfig calls this function. 13248 * 13249 * Returns: 13250 * void 13251 */ 13252static void 13253bxe_init(void *xsc) 13254{ 13255 struct bxe_softc *sc = (struct bxe_softc *)xsc; 13256 13257 BXE_CORE_LOCK(sc); 13258 bxe_init_locked(sc); 13259 BXE_CORE_UNLOCK(sc); 13260} 13261 13262static int 13263bxe_init_ifnet(struct bxe_softc *sc) 13264{ 13265 struct ifnet *ifp; 13266 13267 /* ifconfig entrypoint for media type/status reporting */ 13268 ifmedia_init(&sc->ifmedia, IFM_IMASK, 13269 bxe_ifmedia_update, 13270 bxe_ifmedia_status); 13271 13272 /* set the default interface values */ 13273 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); 13274 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 13275 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); 13276 13277 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ 13278 13279 /* allocate the ifnet structure */ 13280 if ((ifp = if_alloc(IFT_ETHER)) == NULL) { 13281 BLOGE(sc, "Interface allocation failed!\n"); 13282 return (ENXIO); 13283 } 13284 13285 ifp->if_softc = sc; 13286 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 13287 ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 13288 ifp->if_ioctl = bxe_ioctl; 13289 ifp->if_start = bxe_tx_start; 13290#if __FreeBSD_version >= 800000 13291 ifp->if_transmit = bxe_tx_mq_start; 13292 ifp->if_qflush = bxe_mq_flush; 13293#endif 13294#ifdef FreeBSD8_0 13295 ifp->if_timer = 0; 13296#endif 13297 ifp->if_init = bxe_init; 13298 ifp->if_mtu = sc->mtu; 13299 ifp->if_hwassist = (CSUM_IP | 13300 CSUM_TCP | 13301 CSUM_UDP | 13302 CSUM_TSO | 13303 CSUM_TCP_IPV6 | 13304 CSUM_UDP_IPV6); 13305 ifp->if_capabilities = 13306#if __FreeBSD_version < 700000 13307 (IFCAP_VLAN_MTU | 13308 IFCAP_VLAN_HWTAGGING | 13309 IFCAP_HWCSUM | 13310 IFCAP_JUMBO_MTU | 13311 IFCAP_LRO); 13312#else 13313 (IFCAP_VLAN_MTU | 13314 IFCAP_VLAN_HWTAGGING | 13315 IFCAP_VLAN_HWTSO | 13316 IFCAP_VLAN_HWFILTER | 13317 IFCAP_VLAN_HWCSUM | 13318 IFCAP_HWCSUM | 13319 IFCAP_JUMBO_MTU | 13320 IFCAP_LRO | 13321 IFCAP_TSO4 | 13322 IFCAP_TSO6 | 13323 IFCAP_WOL_MAGIC); 13324#endif 13325 ifp->if_capenable = ifp->if_capabilities; 13326 ifp->if_capenable &= ~IFCAP_WOL_MAGIC; /* XXX not yet... */ 13327#if __FreeBSD_version < 1000025 13328 ifp->if_baudrate = 1000000000; 13329#else 13330 if_initbaudrate(ifp, IF_Gbps(10)); 13331#endif 13332 ifp->if_snd.ifq_drv_maxlen = sc->tx_ring_size; 13333 13334 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 13335 IFQ_SET_READY(&ifp->if_snd); 13336 13337 sc->ifnet = ifp; 13338 13339 /* attach to the Ethernet interface list */ 13340 ether_ifattach(ifp, sc->link_params.mac_addr); 13341 13342 return (0); 13343} 13344 13345static void 13346bxe_deallocate_bars(struct bxe_softc *sc) 13347{ 13348 int i; 13349 13350 for (i = 0; i < MAX_BARS; i++) { 13351 if (sc->bar[i].resource != NULL) { 13352 bus_release_resource(sc->dev, 13353 SYS_RES_MEMORY, 13354 sc->bar[i].rid, 13355 sc->bar[i].resource); 13356 BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", 13357 i, PCIR_BAR(i)); 13358 } 13359 } 13360} 13361 13362static int 13363bxe_allocate_bars(struct bxe_softc *sc) 13364{ 13365 u_int flags; 13366 int i; 13367 13368 memset(sc->bar, 0, sizeof(sc->bar)); 13369 13370 for (i = 0; i < MAX_BARS; i++) { 13371 13372 /* memory resources reside at BARs 0, 2, 4 */ 13373 /* Run `pciconf -lb` to see mappings */ 13374 if ((i != 0) && (i != 2) && (i != 4)) { 13375 continue; 13376 } 13377 13378 sc->bar[i].rid = PCIR_BAR(i); 13379 13380 flags = RF_ACTIVE; 13381 if (i == 0) { 13382 flags |= RF_SHAREABLE; 13383 } 13384 13385 if ((sc->bar[i].resource = 13386 bus_alloc_resource_any(sc->dev, 13387 SYS_RES_MEMORY, 13388 &sc->bar[i].rid, 13389 flags)) == NULL) { 13390#if 0 13391 /* BAR4 doesn't exist for E1 */ 13392 BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n", 13393 i, PCIR_BAR(i)); 13394#endif 13395 return (0); 13396 } 13397 13398 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); 13399 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); 13400 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); 13401 13402 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n", 13403 i, PCIR_BAR(i), 13404 (void *)rman_get_start(sc->bar[i].resource), 13405 (void *)rman_get_end(sc->bar[i].resource), 13406 rman_get_size(sc->bar[i].resource), 13407 (void *)sc->bar[i].kva); 13408 } 13409 13410 return (0); 13411} 13412 13413static void 13414bxe_get_function_num(struct bxe_softc *sc) 13415{ 13416 uint32_t val = 0; 13417 13418 /* 13419 * Read the ME register to get the function number. The ME register 13420 * holds the relative-function number and absolute-function number. The 13421 * absolute-function number appears only in E2 and above. Before that 13422 * these bits always contained zero, therefore we cannot blindly use them. 13423 */ 13424 13425 val = REG_RD(sc, BAR_ME_REGISTER); 13426 13427 sc->pfunc_rel = 13428 (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); 13429 sc->path_id = 13430 (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; 13431 13432 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13433 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); 13434 } else { 13435 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); 13436 } 13437 13438 BLOGD(sc, DBG_LOAD, 13439 "Relative function %d, Absolute function %d, Path %d\n", 13440 sc->pfunc_rel, sc->pfunc_abs, sc->path_id); 13441} 13442 13443static uint32_t 13444bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) 13445{ 13446 uint32_t shmem2_size; 13447 uint32_t offset; 13448 uint32_t mf_cfg_offset_value; 13449 13450 /* Non 57712 */ 13451 offset = (SHMEM_RD(sc, func_mb) + 13452 (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); 13453 13454 /* 57712 plus */ 13455 if (sc->devinfo.shmem2_base != 0) { 13456 shmem2_size = SHMEM2_RD(sc, size); 13457 if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { 13458 mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); 13459 if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { 13460 offset = mf_cfg_offset_value; 13461 } 13462 } 13463 } 13464 13465 return (offset); 13466} 13467 13468static uint32_t 13469bxe_pcie_capability_read(struct bxe_softc *sc, 13470 int reg, 13471 int width) 13472{ 13473 int pcie_reg; 13474 13475 /* ensure PCIe capability is enabled */ 13476 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { 13477 if (pcie_reg != 0) { 13478 BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); 13479 return (pci_read_config(sc->dev, (pcie_reg + reg), width)); 13480 } 13481 } 13482 13483 BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); 13484 13485 return (0); 13486} 13487 13488static uint8_t 13489bxe_is_pcie_pending(struct bxe_softc *sc) 13490{ 13491 return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & 13492 PCIM_EXP_STA_TRANSACTION_PND); 13493} 13494 13495/* 13496 * Walk the PCI capabiites list for the device to find what features are 13497 * supported. These capabilites may be enabled/disabled by firmware so it's 13498 * best to walk the list rather than make assumptions. 13499 */ 13500static void 13501bxe_probe_pci_caps(struct bxe_softc *sc) 13502{ 13503 uint16_t link_status; 13504 int reg; 13505 13506 /* check if PCI Power Management is enabled */ 13507 if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { 13508 if (reg != 0) { 13509 BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); 13510 13511 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; 13512 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; 13513 } 13514 } 13515 13516 link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); 13517 13518 /* handle PCIe 2.0 workarounds for 57710 */ 13519 if (CHIP_IS_E1(sc)) { 13520 /* workaround for 57710 errata E4_57710_27462 */ 13521 sc->devinfo.pcie_link_speed = 13522 (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; 13523 13524 /* workaround for 57710 errata E4_57710_27488 */ 13525 sc->devinfo.pcie_link_width = 13526 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13527 if (sc->devinfo.pcie_link_speed > 1) { 13528 sc->devinfo.pcie_link_width = 13529 ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; 13530 } 13531 } else { 13532 sc->devinfo.pcie_link_speed = 13533 (link_status & PCIM_LINK_STA_SPEED); 13534 sc->devinfo.pcie_link_width = 13535 ((link_status & PCIM_LINK_STA_WIDTH) >> 4); 13536 } 13537 13538 BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", 13539 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); 13540 13541 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; 13542 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; 13543 13544 /* check if MSI capability is enabled */ 13545 if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { 13546 if (reg != 0) { 13547 BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); 13548 13549 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; 13550 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; 13551 } 13552 } 13553 13554 /* check if MSI-X capability is enabled */ 13555 if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { 13556 if (reg != 0) { 13557 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); 13558 13559 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; 13560 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; 13561 } 13562 } 13563} 13564 13565static int 13566bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) 13567{ 13568 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13569 uint32_t val; 13570 13571 /* get the outer vlan if we're in switch-dependent mode */ 13572 13573 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13574 mf_info->ext_id = (uint16_t)val; 13575 13576 mf_info->multi_vnics_mode = 1; 13577 13578 if (!VALID_OVLAN(mf_info->ext_id)) { 13579 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); 13580 return (1); 13581 } 13582 13583 /* get the capabilities */ 13584 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13585 FUNC_MF_CFG_PROTOCOL_ISCSI) { 13586 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; 13587 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == 13588 FUNC_MF_CFG_PROTOCOL_FCOE) { 13589 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; 13590 } else { 13591 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; 13592 } 13593 13594 mf_info->vnics_per_port = 13595 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13596 13597 return (0); 13598} 13599 13600static uint32_t 13601bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) 13602{ 13603 uint32_t retval = 0; 13604 uint32_t val; 13605 13606 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13607 13608 if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { 13609 if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { 13610 retval |= MF_PROTO_SUPPORT_ETHERNET; 13611 } 13612 if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 13613 retval |= MF_PROTO_SUPPORT_ISCSI; 13614 } 13615 if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 13616 retval |= MF_PROTO_SUPPORT_FCOE; 13617 } 13618 } 13619 13620 return (retval); 13621} 13622 13623static int 13624bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) 13625{ 13626 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13627 uint32_t val; 13628 13629 /* 13630 * There is no outer vlan if we're in switch-independent mode. 13631 * If the mac is valid then assume multi-function. 13632 */ 13633 13634 val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); 13635 13636 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); 13637 13638 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13639 13640 mf_info->vnics_per_port = 13641 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13642 13643 return (0); 13644} 13645 13646static int 13647bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) 13648{ 13649 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13650 uint32_t e1hov_tag; 13651 uint32_t func_config; 13652 uint32_t niv_config; 13653 13654 mf_info->multi_vnics_mode = 1; 13655 13656 e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13657 func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13658 niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); 13659 13660 mf_info->ext_id = 13661 (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> 13662 FUNC_MF_CFG_E1HOV_TAG_SHIFT); 13663 13664 mf_info->default_vlan = 13665 (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> 13666 FUNC_MF_CFG_AFEX_VLAN_SHIFT); 13667 13668 mf_info->niv_allowed_priorities = 13669 (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 13670 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); 13671 13672 mf_info->niv_default_cos = 13673 (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 13674 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); 13675 13676 mf_info->afex_vlan_mode = 13677 ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 13678 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); 13679 13680 mf_info->niv_mba_enabled = 13681 ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> 13682 FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); 13683 13684 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); 13685 13686 mf_info->vnics_per_port = 13687 (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; 13688 13689 return (0); 13690} 13691 13692static int 13693bxe_check_valid_mf_cfg(struct bxe_softc *sc) 13694{ 13695 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13696 uint32_t mf_cfg1; 13697 uint32_t mf_cfg2; 13698 uint32_t ovlan1; 13699 uint32_t ovlan2; 13700 uint8_t i, j; 13701 13702 BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", 13703 SC_PORT(sc)); 13704 BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", 13705 mf_info->mf_config[SC_VN(sc)]); 13706 BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", 13707 mf_info->multi_vnics_mode); 13708 BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", 13709 mf_info->vnics_per_port); 13710 BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", 13711 mf_info->ext_id); 13712 BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", 13713 mf_info->min_bw[0], mf_info->min_bw[1], 13714 mf_info->min_bw[2], mf_info->min_bw[3]); 13715 BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", 13716 mf_info->max_bw[0], mf_info->max_bw[1], 13717 mf_info->max_bw[2], mf_info->max_bw[3]); 13718 BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", 13719 sc->mac_addr_str); 13720 13721 /* various MF mode sanity checks... */ 13722 13723 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { 13724 BLOGE(sc, "Enumerated function %d is marked as hidden\n", 13725 SC_PORT(sc)); 13726 return (1); 13727 } 13728 13729 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { 13730 BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", 13731 mf_info->vnics_per_port, mf_info->multi_vnics_mode); 13732 return (1); 13733 } 13734 13735 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13736 /* vnic id > 0 must have valid ovlan in switch-dependent mode */ 13737 if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { 13738 BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", 13739 SC_VN(sc), OVLAN(sc)); 13740 return (1); 13741 } 13742 13743 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { 13744 BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", 13745 mf_info->multi_vnics_mode, OVLAN(sc)); 13746 return (1); 13747 } 13748 13749 /* 13750 * Verify all functions are either MF or SF mode. If MF, make sure 13751 * sure that all non-hidden functions have a valid ovlan. If SF, 13752 * make sure that all non-hidden functions have an invalid ovlan. 13753 */ 13754 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13755 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13756 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13757 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13758 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || 13759 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { 13760 BLOGE(sc, "mf_mode=SD function %d MF config " 13761 "mismatch, multi_vnics_mode=%d ovlan=%d\n", 13762 i, mf_info->multi_vnics_mode, ovlan1); 13763 return (1); 13764 } 13765 } 13766 13767 /* Verify all funcs on the same port each have a different ovlan. */ 13768 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13769 mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); 13770 ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); 13771 /* iterate from the next function on the port to the max func */ 13772 for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { 13773 mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); 13774 ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); 13775 if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && 13776 VALID_OVLAN(ovlan1) && 13777 !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && 13778 VALID_OVLAN(ovlan2) && 13779 (ovlan1 == ovlan2)) { 13780 BLOGE(sc, "mf_mode=SD functions %d and %d " 13781 "have the same ovlan (%d)\n", 13782 i, j, ovlan1); 13783 return (1); 13784 } 13785 } 13786 } 13787 } /* MULTI_FUNCTION_SD */ 13788 13789 return (0); 13790} 13791 13792static int 13793bxe_get_mf_cfg_info(struct bxe_softc *sc) 13794{ 13795 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; 13796 uint32_t val, mac_upper; 13797 uint8_t i, vnic; 13798 13799 /* initialize mf_info defaults */ 13800 mf_info->vnics_per_port = 1; 13801 mf_info->multi_vnics_mode = FALSE; 13802 mf_info->path_has_ovlan = FALSE; 13803 mf_info->mf_mode = SINGLE_FUNCTION; 13804 13805 if (!CHIP_IS_MF_CAP(sc)) { 13806 return (0); 13807 } 13808 13809 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { 13810 BLOGE(sc, "Invalid mf_cfg_base!\n"); 13811 return (1); 13812 } 13813 13814 /* get the MF mode (switch dependent / independent / single-function) */ 13815 13816 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13817 13818 switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) 13819 { 13820 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 13821 13822 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13823 13824 /* check for legal upper mac bytes */ 13825 if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { 13826 mf_info->mf_mode = MULTI_FUNCTION_SI; 13827 } else { 13828 BLOGE(sc, "Invalid config for Switch Independent mode\n"); 13829 } 13830 13831 break; 13832 13833 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 13834 case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: 13835 13836 /* get outer vlan configuration */ 13837 val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); 13838 13839 if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != 13840 FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 13841 mf_info->mf_mode = MULTI_FUNCTION_SD; 13842 } else { 13843 BLOGE(sc, "Invalid config for Switch Dependent mode\n"); 13844 } 13845 13846 break; 13847 13848 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 13849 13850 /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ 13851 return (0); 13852 13853 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 13854 13855 /* 13856 * Mark MF mode as NIV if MCP version includes NPAR-SD support 13857 * and the MAC address is valid. 13858 */ 13859 mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 13860 13861 if ((SHMEM2_HAS(sc, afex_driver_support)) && 13862 (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { 13863 mf_info->mf_mode = MULTI_FUNCTION_AFEX; 13864 } else { 13865 BLOGE(sc, "Invalid config for AFEX mode\n"); 13866 } 13867 13868 break; 13869 13870 default: 13871 13872 BLOGE(sc, "Unknown MF mode (0x%08x)\n", 13873 (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); 13874 13875 return (1); 13876 } 13877 13878 /* set path mf_mode (which could be different than function mf_mode) */ 13879 if (mf_info->mf_mode == MULTI_FUNCTION_SD) { 13880 mf_info->path_has_ovlan = TRUE; 13881 } else if (mf_info->mf_mode == SINGLE_FUNCTION) { 13882 /* 13883 * Decide on path multi vnics mode. If we're not in MF mode and in 13884 * 4-port mode, this is good enough to check vnic-0 of the other port 13885 * on the same path 13886 */ 13887 if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { 13888 uint8_t other_port = !(PORT_ID(sc) & 1); 13889 uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); 13890 13891 val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); 13892 13893 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; 13894 } 13895 } 13896 13897 if (mf_info->mf_mode == SINGLE_FUNCTION) { 13898 /* invalid MF config */ 13899 if (SC_VN(sc) >= 1) { 13900 BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); 13901 return (1); 13902 } 13903 13904 return (0); 13905 } 13906 13907 /* get the MF configuration */ 13908 mf_info->mf_config[SC_VN(sc)] = 13909 MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); 13910 13911 switch(mf_info->mf_mode) 13912 { 13913 case MULTI_FUNCTION_SD: 13914 13915 bxe_get_shmem_mf_cfg_info_sd(sc); 13916 break; 13917 13918 case MULTI_FUNCTION_SI: 13919 13920 bxe_get_shmem_mf_cfg_info_si(sc); 13921 break; 13922 13923 case MULTI_FUNCTION_AFEX: 13924 13925 bxe_get_shmem_mf_cfg_info_niv(sc); 13926 break; 13927 13928 default: 13929 13930 BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", 13931 mf_info->mf_mode); 13932 return (1); 13933 } 13934 13935 /* get the congestion management parameters */ 13936 13937 vnic = 0; 13938 FOREACH_ABS_FUNC_IN_PORT(sc, i) { 13939 /* get min/max bw */ 13940 val = MFCFG_RD(sc, func_mf_config[i].config); 13941 mf_info->min_bw[vnic] = 13942 ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); 13943 mf_info->max_bw[vnic] = 13944 ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); 13945 vnic++; 13946 } 13947 13948 return (bxe_check_valid_mf_cfg(sc)); 13949} 13950 13951static int 13952bxe_get_shmem_info(struct bxe_softc *sc) 13953{ 13954 int port; 13955 uint32_t mac_hi, mac_lo, val; 13956 13957 port = SC_PORT(sc); 13958 mac_hi = mac_lo = 0; 13959 13960 sc->link_params.sc = sc; 13961 sc->link_params.port = port; 13962 13963 /* get the hardware config info */ 13964 sc->devinfo.hw_config = 13965 SHMEM_RD(sc, dev_info.shared_hw_config.config); 13966 sc->devinfo.hw_config2 = 13967 SHMEM_RD(sc, dev_info.shared_hw_config.config2); 13968 13969 sc->link_params.hw_led_mode = 13970 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> 13971 SHARED_HW_CFG_LED_MODE_SHIFT); 13972 13973 /* get the port feature config */ 13974 sc->port.config = 13975 SHMEM_RD(sc, dev_info.port_feature_config[port].config), 13976 13977 /* get the link params */ 13978 sc->link_params.speed_cap_mask[0] = 13979 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); 13980 sc->link_params.speed_cap_mask[1] = 13981 SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); 13982 13983 /* get the lane config */ 13984 sc->link_params.lane_config = 13985 SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); 13986 13987 /* get the link config */ 13988 val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); 13989 sc->port.link_config[ELINK_INT_PHY] = val; 13990 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); 13991 sc->port.link_config[ELINK_EXT_PHY1] = 13992 SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); 13993 13994 /* get the override preemphasis flag and enable it or turn it off */ 13995 val = SHMEM_RD(sc, dev_info.shared_feature_config.config); 13996 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { 13997 sc->link_params.feature_config_flags |= 13998 ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 13999 } else { 14000 sc->link_params.feature_config_flags &= 14001 ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 14002 } 14003 14004 /* get the initial value of the link params */ 14005 sc->link_params.multi_phy_config = 14006 SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); 14007 14008 /* get external phy info */ 14009 sc->port.ext_phy_config = 14010 SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); 14011 14012 /* get the multifunction configuration */ 14013 bxe_get_mf_cfg_info(sc); 14014 14015 /* get the mac address */ 14016 if (IS_MF(sc)) { 14017 mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); 14018 mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); 14019 } else { 14020 mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); 14021 mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); 14022 } 14023 14024 if ((mac_lo == 0) && (mac_hi == 0)) { 14025 *sc->mac_addr_str = 0; 14026 BLOGE(sc, "No Ethernet address programmed!\n"); 14027 } else { 14028 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); 14029 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); 14030 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); 14031 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); 14032 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); 14033 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); 14034 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), 14035 "%02x:%02x:%02x:%02x:%02x:%02x", 14036 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], 14037 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], 14038 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); 14039 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); 14040 } 14041 14042#if 0 14043 if (!IS_MF(sc) && 14044 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14045 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) { 14046 sc->flags |= BXE_NO_ISCSI; 14047 } 14048 if (!IS_MF(sc) && 14049 ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 14050 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) { 14051 sc->flags |= BXE_NO_FCOE_FLAG; 14052 } 14053#endif 14054 14055 return (0); 14056} 14057 14058static void 14059bxe_get_tunable_params(struct bxe_softc *sc) 14060{ 14061 /* sanity checks */ 14062 14063 if ((bxe_interrupt_mode != INTR_MODE_INTX) && 14064 (bxe_interrupt_mode != INTR_MODE_MSI) && 14065 (bxe_interrupt_mode != INTR_MODE_MSIX)) { 14066 BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); 14067 bxe_interrupt_mode = INTR_MODE_MSIX; 14068 } 14069 14070 if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { 14071 BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); 14072 bxe_queue_count = 0; 14073 } 14074 14075 if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { 14076 if (bxe_max_rx_bufs == 0) { 14077 bxe_max_rx_bufs = RX_BD_USABLE; 14078 } else { 14079 BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); 14080 bxe_max_rx_bufs = 2048; 14081 } 14082 } 14083 14084 if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { 14085 BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); 14086 bxe_hc_rx_ticks = 25; 14087 } 14088 14089 if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { 14090 BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); 14091 bxe_hc_tx_ticks = 50; 14092 } 14093 14094 if (bxe_max_aggregation_size == 0) { 14095 bxe_max_aggregation_size = TPA_AGG_SIZE; 14096 } 14097 14098 if (bxe_max_aggregation_size > 0xffff) { 14099 BLOGW(sc, "invalid max_aggregation_size (%d)\n", 14100 bxe_max_aggregation_size); 14101 bxe_max_aggregation_size = TPA_AGG_SIZE; 14102 } 14103 14104 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { 14105 BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); 14106 bxe_mrrs = -1; 14107 } 14108 14109 if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { 14110 BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); 14111 bxe_autogreeen = 0; 14112 } 14113 14114 if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { 14115 BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); 14116 bxe_udp_rss = 0; 14117 } 14118 14119 /* pull in user settings */ 14120 14121 sc->interrupt_mode = bxe_interrupt_mode; 14122 sc->max_rx_bufs = bxe_max_rx_bufs; 14123 sc->hc_rx_ticks = bxe_hc_rx_ticks; 14124 sc->hc_tx_ticks = bxe_hc_tx_ticks; 14125 sc->max_aggregation_size = bxe_max_aggregation_size; 14126 sc->mrrs = bxe_mrrs; 14127 sc->autogreeen = bxe_autogreeen; 14128 sc->udp_rss = bxe_udp_rss; 14129 14130 if (bxe_interrupt_mode == INTR_MODE_INTX) { 14131 sc->num_queues = 1; 14132 } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ 14133 sc->num_queues = 14134 min((bxe_queue_count ? bxe_queue_count : mp_ncpus), 14135 MAX_RSS_CHAINS); 14136 if (sc->num_queues > mp_ncpus) { 14137 sc->num_queues = mp_ncpus; 14138 } 14139 } 14140 14141 BLOGD(sc, DBG_LOAD, 14142 "User Config: " 14143 "debug=0x%lx " 14144 "interrupt_mode=%d " 14145 "queue_count=%d " 14146 "hc_rx_ticks=%d " 14147 "hc_tx_ticks=%d " 14148 "rx_budget=%d " 14149 "max_aggregation_size=%d " 14150 "mrrs=%d " 14151 "autogreeen=%d " 14152 "udp_rss=%d\n", 14153 bxe_debug, 14154 sc->interrupt_mode, 14155 sc->num_queues, 14156 sc->hc_rx_ticks, 14157 sc->hc_tx_ticks, 14158 bxe_rx_budget, 14159 sc->max_aggregation_size, 14160 sc->mrrs, 14161 sc->autogreeen, 14162 sc->udp_rss); 14163} 14164 14165static void 14166bxe_media_detect(struct bxe_softc *sc) 14167{ 14168 uint32_t phy_idx = bxe_get_cur_phy_idx(sc); 14169 switch (sc->link_params.phy[phy_idx].media_type) { 14170 case ELINK_ETH_PHY_SFPP_10G_FIBER: 14171 case ELINK_ETH_PHY_XFP_FIBER: 14172 BLOGI(sc, "Found 10Gb Fiber media.\n"); 14173 sc->media = IFM_10G_SR; 14174 break; 14175 case ELINK_ETH_PHY_SFP_1G_FIBER: 14176 BLOGI(sc, "Found 1Gb Fiber media.\n"); 14177 sc->media = IFM_1000_SX; 14178 break; 14179 case ELINK_ETH_PHY_KR: 14180 case ELINK_ETH_PHY_CX4: 14181 BLOGI(sc, "Found 10GBase-CX4 media.\n"); 14182 sc->media = IFM_10G_CX4; 14183 break; 14184 case ELINK_ETH_PHY_DA_TWINAX: 14185 BLOGI(sc, "Found 10Gb Twinax media.\n"); 14186 sc->media = IFM_10G_TWINAX; 14187 break; 14188 case ELINK_ETH_PHY_BASE_T: 14189 if (sc->link_params.speed_cap_mask[0] & 14190 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 14191 BLOGI(sc, "Found 10GBase-T media.\n"); 14192 sc->media = IFM_10G_T; 14193 } else { 14194 BLOGI(sc, "Found 1000Base-T media.\n"); 14195 sc->media = IFM_1000_T; 14196 } 14197 break; 14198 case ELINK_ETH_PHY_NOT_PRESENT: 14199 BLOGI(sc, "Media not present.\n"); 14200 sc->media = 0; 14201 break; 14202 case ELINK_ETH_PHY_UNSPECIFIED: 14203 default: 14204 BLOGI(sc, "Unknown media!\n"); 14205 sc->media = 0; 14206 break; 14207 } 14208} 14209 14210#define GET_FIELD(value, fname) \ 14211 (((value) & (fname##_MASK)) >> (fname##_SHIFT)) 14212#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 14213#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 14214 14215static int 14216bxe_get_igu_cam_info(struct bxe_softc *sc) 14217{ 14218 int pfid = SC_FUNC(sc); 14219 int igu_sb_id; 14220 uint32_t val; 14221 uint8_t fid, igu_sb_cnt = 0; 14222 14223 sc->igu_base_sb = 0xff; 14224 14225 if (CHIP_INT_MODE_IS_BC(sc)) { 14226 int vn = SC_VN(sc); 14227 igu_sb_cnt = sc->igu_sb_cnt; 14228 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * 14229 FP_SB_MAX_E1x); 14230 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + 14231 (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); 14232 return (0); 14233 } 14234 14235 /* IGU in normal mode - read CAM */ 14236 for (igu_sb_id = 0; 14237 igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 14238 igu_sb_id++) { 14239 val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 14240 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { 14241 continue; 14242 } 14243 fid = IGU_FID(val); 14244 if ((fid & IGU_FID_ENCODE_IS_PF)) { 14245 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { 14246 continue; 14247 } 14248 if (IGU_VEC(val) == 0) { 14249 /* default status block */ 14250 sc->igu_dsb_id = igu_sb_id; 14251 } else { 14252 if (sc->igu_base_sb == 0xff) { 14253 sc->igu_base_sb = igu_sb_id; 14254 } 14255 igu_sb_cnt++; 14256 } 14257 } 14258 } 14259 14260 /* 14261 * Due to new PF resource allocation by MFW T7.4 and above, it's optional 14262 * that number of CAM entries will not be equal to the value advertised in 14263 * PCI. Driver should use the minimal value of both as the actual status 14264 * block count 14265 */ 14266 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); 14267 14268 if (igu_sb_cnt == 0) { 14269 BLOGE(sc, "CAM configuration error\n"); 14270 return (-1); 14271 } 14272 14273 return (0); 14274} 14275 14276/* 14277 * Gather various information from the device config space, the device itself, 14278 * shmem, and the user input. 14279 */ 14280static int 14281bxe_get_device_info(struct bxe_softc *sc) 14282{ 14283 uint32_t val; 14284 int rc; 14285 14286 /* Get the data for the device */ 14287 sc->devinfo.vendor_id = pci_get_vendor(sc->dev); 14288 sc->devinfo.device_id = pci_get_device(sc->dev); 14289 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); 14290 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); 14291 14292 /* get the chip revision (chip metal comes from pci config space) */ 14293 sc->devinfo.chip_id = 14294 sc->link_params.chip_id = 14295 (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | 14296 ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | 14297 (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | 14298 ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); 14299 14300 /* force 57811 according to MISC register */ 14301 if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 14302 if (CHIP_IS_57810(sc)) { 14303 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | 14304 (sc->devinfo.chip_id & 0x0000ffff)); 14305 } else if (CHIP_IS_57810_MF(sc)) { 14306 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | 14307 (sc->devinfo.chip_id & 0x0000ffff)); 14308 } 14309 sc->devinfo.chip_id |= 0x1; 14310 } 14311 14312 BLOGD(sc, DBG_LOAD, 14313 "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", 14314 sc->devinfo.chip_id, 14315 ((sc->devinfo.chip_id >> 16) & 0xffff), 14316 ((sc->devinfo.chip_id >> 12) & 0xf), 14317 ((sc->devinfo.chip_id >> 4) & 0xff), 14318 ((sc->devinfo.chip_id >> 0) & 0xf)); 14319 14320 val = (REG_RD(sc, 0x2874) & 0x55); 14321 if ((sc->devinfo.chip_id & 0x1) || 14322 (CHIP_IS_E1(sc) && val) || 14323 (CHIP_IS_E1H(sc) && (val == 0x55))) { 14324 sc->flags |= BXE_ONE_PORT_FLAG; 14325 BLOGD(sc, DBG_LOAD, "single port device\n"); 14326 } 14327 14328 /* set the doorbell size */ 14329 sc->doorbell_size = (1 << BXE_DB_SHIFT); 14330 14331 /* determine whether the device is in 2 port or 4 port mode */ 14332 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ 14333 if (CHIP_IS_E2E3(sc)) { 14334 /* 14335 * Read port4mode_en_ovwr[0]: 14336 * If 1, four port mode is in port4mode_en_ovwr[1]. 14337 * If 0, four port mode is in port4mode_en[0]. 14338 */ 14339 val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); 14340 if (val & 1) { 14341 val = ((val >> 1) & 1); 14342 } else { 14343 val = REG_RD(sc, MISC_REG_PORT4MODE_EN); 14344 } 14345 14346 sc->devinfo.chip_port_mode = 14347 (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; 14348 14349 BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); 14350 } 14351 14352 /* get the function and path info for the device */ 14353 bxe_get_function_num(sc); 14354 14355 /* get the shared memory base address */ 14356 sc->devinfo.shmem_base = 14357 sc->link_params.shmem_base = 14358 REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); 14359 sc->devinfo.shmem2_base = 14360 REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : 14361 MISC_REG_GENERIC_CR_0)); 14362 14363 BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", 14364 sc->devinfo.shmem_base, sc->devinfo.shmem2_base); 14365 14366 if (!sc->devinfo.shmem_base) { 14367 /* this should ONLY prevent upcoming shmem reads */ 14368 BLOGI(sc, "MCP not active\n"); 14369 sc->flags |= BXE_NO_MCP_FLAG; 14370 return (0); 14371 } 14372 14373 /* make sure the shared memory contents are valid */ 14374 val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); 14375 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != 14376 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { 14377 BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); 14378 return (0); 14379 } 14380 BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); 14381 14382 /* get the bootcode version */ 14383 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); 14384 snprintf(sc->devinfo.bc_ver_str, 14385 sizeof(sc->devinfo.bc_ver_str), 14386 "%d.%d.%d", 14387 ((sc->devinfo.bc_ver >> 24) & 0xff), 14388 ((sc->devinfo.bc_ver >> 16) & 0xff), 14389 ((sc->devinfo.bc_ver >> 8) & 0xff)); 14390 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); 14391 14392 /* get the bootcode shmem address */ 14393 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); 14394 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); 14395 14396 /* clean indirect addresses as they're not used */ 14397 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); 14398 if (IS_PF(sc)) { 14399 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); 14400 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); 14401 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); 14402 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); 14403 if (CHIP_IS_E1x(sc)) { 14404 REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); 14405 REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); 14406 REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); 14407 REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); 14408 } 14409 14410 /* 14411 * Enable internal target-read (in case we are probed after PF 14412 * FLR). Must be done prior to any BAR read access. Only for 14413 * 57712 and up 14414 */ 14415 if (!CHIP_IS_E1x(sc)) { 14416 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 14417 } 14418 } 14419 14420 /* get the nvram size */ 14421 val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); 14422 sc->devinfo.flash_size = 14423 (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); 14424 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); 14425 14426 /* get PCI capabilites */ 14427 bxe_probe_pci_caps(sc); 14428 14429 bxe_set_power_state(sc, PCI_PM_D0); 14430 14431 /* get various configuration parameters from shmem */ 14432 bxe_get_shmem_info(sc); 14433 14434 if (sc->devinfo.pcie_msix_cap_reg != 0) { 14435 val = pci_read_config(sc->dev, 14436 (sc->devinfo.pcie_msix_cap_reg + 14437 PCIR_MSIX_CTRL), 14438 2); 14439 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); 14440 } else { 14441 sc->igu_sb_cnt = 1; 14442 } 14443 14444 sc->igu_base_addr = BAR_IGU_INTMEM; 14445 14446 /* initialize IGU parameters */ 14447 if (CHIP_IS_E1x(sc)) { 14448 sc->devinfo.int_block = INT_BLOCK_HC; 14449 sc->igu_dsb_id = DEF_SB_IGU_ID; 14450 sc->igu_base_sb = 0; 14451 } else { 14452 sc->devinfo.int_block = INT_BLOCK_IGU; 14453 14454 /* do not allow device reset during IGU info preocessing */ 14455 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14456 14457 val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); 14458 14459 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14460 int tout = 5000; 14461 14462 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); 14463 14464 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 14465 REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); 14466 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); 14467 14468 while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14469 tout--; 14470 DELAY(1000); 14471 } 14472 14473 if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { 14474 BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); 14475 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14476 return (-1); 14477 } 14478 } 14479 14480 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 14481 BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); 14482 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; 14483 } else { 14484 BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); 14485 } 14486 14487 rc = bxe_get_igu_cam_info(sc); 14488 14489 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 14490 14491 if (rc) { 14492 return (rc); 14493 } 14494 } 14495 14496 /* 14497 * Get base FW non-default (fast path) status block ID. This value is 14498 * used to initialize the fw_sb_id saved on the fp/queue structure to 14499 * determine the id used by the FW. 14500 */ 14501 if (CHIP_IS_E1x(sc)) { 14502 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); 14503 } else { 14504 /* 14505 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of 14506 * the same queue are indicated on the same IGU SB). So we prefer 14507 * FW and IGU SBs to be the same value. 14508 */ 14509 sc->base_fw_ndsb = sc->igu_base_sb; 14510 } 14511 14512 BLOGD(sc, DBG_LOAD, 14513 "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", 14514 sc->igu_dsb_id, sc->igu_base_sb, 14515 sc->igu_sb_cnt, sc->base_fw_ndsb); 14516 14517 elink_phy_probe(&sc->link_params); 14518 14519 return (0); 14520} 14521 14522static void 14523bxe_link_settings_supported(struct bxe_softc *sc, 14524 uint32_t switch_cfg) 14525{ 14526 uint32_t cfg_size = 0; 14527 uint32_t idx; 14528 uint8_t port = SC_PORT(sc); 14529 14530 /* aggregation of supported attributes of all external phys */ 14531 sc->port.supported[0] = 0; 14532 sc->port.supported[1] = 0; 14533 14534 switch (sc->link_params.num_phys) { 14535 case 1: 14536 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; 14537 cfg_size = 1; 14538 break; 14539 case 2: 14540 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; 14541 cfg_size = 1; 14542 break; 14543 case 3: 14544 if (sc->link_params.multi_phy_config & 14545 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 14546 sc->port.supported[1] = 14547 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14548 sc->port.supported[0] = 14549 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14550 } else { 14551 sc->port.supported[0] = 14552 sc->link_params.phy[ELINK_EXT_PHY1].supported; 14553 sc->port.supported[1] = 14554 sc->link_params.phy[ELINK_EXT_PHY2].supported; 14555 } 14556 cfg_size = 2; 14557 break; 14558 } 14559 14560 if (!(sc->port.supported[0] || sc->port.supported[1])) { 14561 BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", 14562 SHMEM_RD(sc, 14563 dev_info.port_hw_config[port].external_phy_config), 14564 SHMEM_RD(sc, 14565 dev_info.port_hw_config[port].external_phy_config2)); 14566 return; 14567 } 14568 14569 if (CHIP_IS_E3(sc)) 14570 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); 14571 else { 14572 switch (switch_cfg) { 14573 case ELINK_SWITCH_CFG_1G: 14574 sc->port.phy_addr = 14575 REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 14576 break; 14577 case ELINK_SWITCH_CFG_10G: 14578 sc->port.phy_addr = 14579 REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 14580 break; 14581 default: 14582 BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", 14583 sc->port.link_config[0]); 14584 return; 14585 } 14586 } 14587 14588 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); 14589 14590 /* mask what we support according to speed_cap_mask per configuration */ 14591 for (idx = 0; idx < cfg_size; idx++) { 14592 if (!(sc->link_params.speed_cap_mask[idx] & 14593 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { 14594 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; 14595 } 14596 14597 if (!(sc->link_params.speed_cap_mask[idx] & 14598 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { 14599 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; 14600 } 14601 14602 if (!(sc->link_params.speed_cap_mask[idx] & 14603 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { 14604 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; 14605 } 14606 14607 if (!(sc->link_params.speed_cap_mask[idx] & 14608 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { 14609 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; 14610 } 14611 14612 if (!(sc->link_params.speed_cap_mask[idx] & 14613 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { 14614 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; 14615 } 14616 14617 if (!(sc->link_params.speed_cap_mask[idx] & 14618 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { 14619 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; 14620 } 14621 14622 if (!(sc->link_params.speed_cap_mask[idx] & 14623 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 14624 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; 14625 } 14626 14627 if (!(sc->link_params.speed_cap_mask[idx] & 14628 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { 14629 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; 14630 } 14631 } 14632 14633 BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", 14634 sc->port.supported[0], sc->port.supported[1]); 14635} 14636 14637static void 14638bxe_link_settings_requested(struct bxe_softc *sc) 14639{ 14640 uint32_t link_config; 14641 uint32_t idx; 14642 uint32_t cfg_size = 0; 14643 14644 sc->port.advertising[0] = 0; 14645 sc->port.advertising[1] = 0; 14646 14647 switch (sc->link_params.num_phys) { 14648 case 1: 14649 case 2: 14650 cfg_size = 1; 14651 break; 14652 case 3: 14653 cfg_size = 2; 14654 break; 14655 } 14656 14657 for (idx = 0; idx < cfg_size; idx++) { 14658 sc->link_params.req_duplex[idx] = DUPLEX_FULL; 14659 link_config = sc->port.link_config[idx]; 14660 14661 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 14662 case PORT_FEATURE_LINK_SPEED_AUTO: 14663 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { 14664 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14665 sc->port.advertising[idx] |= sc->port.supported[idx]; 14666 if (sc->link_params.phy[ELINK_EXT_PHY1].type == 14667 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 14668 sc->port.advertising[idx] |= 14669 (ELINK_SUPPORTED_100baseT_Half | 14670 ELINK_SUPPORTED_100baseT_Full); 14671 } else { 14672 /* force 10G, no AN */ 14673 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14674 sc->port.advertising[idx] |= 14675 (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); 14676 continue; 14677 } 14678 break; 14679 14680 case PORT_FEATURE_LINK_SPEED_10M_FULL: 14681 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { 14682 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14683 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | 14684 ADVERTISED_TP); 14685 } else { 14686 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14687 "speed_cap_mask=0x%08x\n", 14688 link_config, sc->link_params.speed_cap_mask[idx]); 14689 return; 14690 } 14691 break; 14692 14693 case PORT_FEATURE_LINK_SPEED_10M_HALF: 14694 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { 14695 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; 14696 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14697 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | 14698 ADVERTISED_TP); 14699 } else { 14700 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14701 "speed_cap_mask=0x%08x\n", 14702 link_config, sc->link_params.speed_cap_mask[idx]); 14703 return; 14704 } 14705 break; 14706 14707 case PORT_FEATURE_LINK_SPEED_100M_FULL: 14708 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { 14709 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14710 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | 14711 ADVERTISED_TP); 14712 } else { 14713 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14714 "speed_cap_mask=0x%08x\n", 14715 link_config, sc->link_params.speed_cap_mask[idx]); 14716 return; 14717 } 14718 break; 14719 14720 case PORT_FEATURE_LINK_SPEED_100M_HALF: 14721 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { 14722 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; 14723 sc->link_params.req_duplex[idx] = DUPLEX_HALF; 14724 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | 14725 ADVERTISED_TP); 14726 } else { 14727 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14728 "speed_cap_mask=0x%08x\n", 14729 link_config, sc->link_params.speed_cap_mask[idx]); 14730 return; 14731 } 14732 break; 14733 14734 case PORT_FEATURE_LINK_SPEED_1G: 14735 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { 14736 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; 14737 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | 14738 ADVERTISED_TP); 14739 } else { 14740 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14741 "speed_cap_mask=0x%08x\n", 14742 link_config, sc->link_params.speed_cap_mask[idx]); 14743 return; 14744 } 14745 break; 14746 14747 case PORT_FEATURE_LINK_SPEED_2_5G: 14748 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { 14749 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; 14750 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | 14751 ADVERTISED_TP); 14752 } else { 14753 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14754 "speed_cap_mask=0x%08x\n", 14755 link_config, sc->link_params.speed_cap_mask[idx]); 14756 return; 14757 } 14758 break; 14759 14760 case PORT_FEATURE_LINK_SPEED_10G_CX4: 14761 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { 14762 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; 14763 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | 14764 ADVERTISED_FIBRE); 14765 } else { 14766 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14767 "speed_cap_mask=0x%08x\n", 14768 link_config, sc->link_params.speed_cap_mask[idx]); 14769 return; 14770 } 14771 break; 14772 14773 case PORT_FEATURE_LINK_SPEED_20G: 14774 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; 14775 break; 14776 14777 default: 14778 BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " 14779 "speed_cap_mask=0x%08x\n", 14780 link_config, sc->link_params.speed_cap_mask[idx]); 14781 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; 14782 sc->port.advertising[idx] = sc->port.supported[idx]; 14783 break; 14784 } 14785 14786 sc->link_params.req_flow_ctrl[idx] = 14787 (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); 14788 14789 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { 14790 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { 14791 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; 14792 } else { 14793 bxe_set_requested_fc(sc); 14794 } 14795 } 14796 14797 BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " 14798 "req_flow_ctrl=0x%x advertising=0x%x\n", 14799 sc->link_params.req_line_speed[idx], 14800 sc->link_params.req_duplex[idx], 14801 sc->link_params.req_flow_ctrl[idx], 14802 sc->port.advertising[idx]); 14803 } 14804} 14805 14806static void 14807bxe_get_phy_info(struct bxe_softc *sc) 14808{ 14809 uint8_t port = SC_PORT(sc); 14810 uint32_t config = sc->port.config; 14811 uint32_t eee_mode; 14812 14813 /* shmem data already read in bxe_get_shmem_info() */ 14814 14815 BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " 14816 "link_config0=0x%08x\n", 14817 sc->link_params.lane_config, 14818 sc->link_params.speed_cap_mask[0], 14819 sc->port.link_config[0]); 14820 14821 bxe_link_settings_supported(sc, sc->link_params.switch_cfg); 14822 bxe_link_settings_requested(sc); 14823 14824 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { 14825 sc->link_params.feature_config_flags |= 14826 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14827 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { 14828 sc->link_params.feature_config_flags &= 14829 ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14830 } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { 14831 sc->link_params.feature_config_flags |= 14832 ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; 14833 } 14834 14835 /* configure link feature according to nvram value */ 14836 eee_mode = 14837 (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & 14838 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 14839 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 14840 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 14841 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | 14842 ELINK_EEE_MODE_ENABLE_LPI | 14843 ELINK_EEE_MODE_OUTPUT_TIME); 14844 } else { 14845 sc->link_params.eee_mode = 0; 14846 } 14847 14848 /* get the media type */ 14849 bxe_media_detect(sc); 14850} 14851 14852static void 14853bxe_get_params(struct bxe_softc *sc) 14854{ 14855 /* get user tunable params */ 14856 bxe_get_tunable_params(sc); 14857 14858 /* select the RX and TX ring sizes */ 14859 sc->tx_ring_size = TX_BD_USABLE; 14860 sc->rx_ring_size = RX_BD_USABLE; 14861 14862 /* XXX disable WoL */ 14863 sc->wol = 0; 14864} 14865 14866static void 14867bxe_set_modes_bitmap(struct bxe_softc *sc) 14868{ 14869 uint32_t flags = 0; 14870 14871 if (CHIP_REV_IS_FPGA(sc)) { 14872 SET_FLAGS(flags, MODE_FPGA); 14873 } else if (CHIP_REV_IS_EMUL(sc)) { 14874 SET_FLAGS(flags, MODE_EMUL); 14875 } else { 14876 SET_FLAGS(flags, MODE_ASIC); 14877 } 14878 14879 if (CHIP_IS_MODE_4_PORT(sc)) { 14880 SET_FLAGS(flags, MODE_PORT4); 14881 } else { 14882 SET_FLAGS(flags, MODE_PORT2); 14883 } 14884 14885 if (CHIP_IS_E2(sc)) { 14886 SET_FLAGS(flags, MODE_E2); 14887 } else if (CHIP_IS_E3(sc)) { 14888 SET_FLAGS(flags, MODE_E3); 14889 if (CHIP_REV(sc) == CHIP_REV_Ax) { 14890 SET_FLAGS(flags, MODE_E3_A0); 14891 } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { 14892 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 14893 } 14894 } 14895 14896 if (IS_MF(sc)) { 14897 SET_FLAGS(flags, MODE_MF); 14898 switch (sc->devinfo.mf_info.mf_mode) { 14899 case MULTI_FUNCTION_SD: 14900 SET_FLAGS(flags, MODE_MF_SD); 14901 break; 14902 case MULTI_FUNCTION_SI: 14903 SET_FLAGS(flags, MODE_MF_SI); 14904 break; 14905 case MULTI_FUNCTION_AFEX: 14906 SET_FLAGS(flags, MODE_MF_AFEX); 14907 break; 14908 } 14909 } else { 14910 SET_FLAGS(flags, MODE_SF); 14911 } 14912 14913#if defined(__LITTLE_ENDIAN) 14914 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 14915#else /* __BIG_ENDIAN */ 14916 SET_FLAGS(flags, MODE_BIG_ENDIAN); 14917#endif 14918 14919 INIT_MODE_FLAGS(sc) = flags; 14920} 14921 14922static int 14923bxe_alloc_hsi_mem(struct bxe_softc *sc) 14924{ 14925 struct bxe_fastpath *fp; 14926 bus_addr_t busaddr; 14927 int max_agg_queues; 14928 int max_segments; 14929 bus_size_t max_size; 14930 bus_size_t max_seg_size; 14931 char buf[32]; 14932 int rc; 14933 int i, j; 14934 14935 /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ 14936 14937 /* allocate the parent bus DMA tag */ 14938 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 14939 1, /* alignment */ 14940 0, /* boundary limit */ 14941 BUS_SPACE_MAXADDR, /* restricted low */ 14942 BUS_SPACE_MAXADDR, /* restricted hi */ 14943 NULL, /* addr filter() */ 14944 NULL, /* addr filter() arg */ 14945 BUS_SPACE_MAXSIZE_32BIT, /* max map size */ 14946 BUS_SPACE_UNRESTRICTED, /* num discontinuous */ 14947 BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 14948 0, /* flags */ 14949 NULL, /* lock() */ 14950 NULL, /* lock() arg */ 14951 &sc->parent_dma_tag); /* returned dma tag */ 14952 if (rc != 0) { 14953 BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); 14954 return (1); 14955 } 14956 14957 /************************/ 14958 /* DEFAULT STATUS BLOCK */ 14959 /************************/ 14960 14961 if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), 14962 &sc->def_sb_dma, "default status block") != 0) { 14963 /* XXX */ 14964 bus_dma_tag_destroy(sc->parent_dma_tag); 14965 return (1); 14966 } 14967 14968 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; 14969 14970 /***************/ 14971 /* EVENT QUEUE */ 14972 /***************/ 14973 14974 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 14975 &sc->eq_dma, "event queue") != 0) { 14976 /* XXX */ 14977 bxe_dma_free(sc, &sc->def_sb_dma); 14978 sc->def_sb = NULL; 14979 bus_dma_tag_destroy(sc->parent_dma_tag); 14980 return (1); 14981 } 14982 14983 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; 14984 14985 /*************/ 14986 /* SLOW PATH */ 14987 /*************/ 14988 14989 if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), 14990 &sc->sp_dma, "slow path") != 0) { 14991 /* XXX */ 14992 bxe_dma_free(sc, &sc->eq_dma); 14993 sc->eq = NULL; 14994 bxe_dma_free(sc, &sc->def_sb_dma); 14995 sc->def_sb = NULL; 14996 bus_dma_tag_destroy(sc->parent_dma_tag); 14997 return (1); 14998 } 14999 15000 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; 15001 15002 /*******************/ 15003 /* SLOW PATH QUEUE */ 15004 /*******************/ 15005 15006 if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, 15007 &sc->spq_dma, "slow path queue") != 0) { 15008 /* XXX */ 15009 bxe_dma_free(sc, &sc->sp_dma); 15010 sc->sp = NULL; 15011 bxe_dma_free(sc, &sc->eq_dma); 15012 sc->eq = NULL; 15013 bxe_dma_free(sc, &sc->def_sb_dma); 15014 sc->def_sb = NULL; 15015 bus_dma_tag_destroy(sc->parent_dma_tag); 15016 return (1); 15017 } 15018 15019 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; 15020 15021 /***************************/ 15022 /* FW DECOMPRESSION BUFFER */ 15023 /***************************/ 15024 15025 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, 15026 "fw decompression buffer") != 0) { 15027 /* XXX */ 15028 bxe_dma_free(sc, &sc->spq_dma); 15029 sc->spq = NULL; 15030 bxe_dma_free(sc, &sc->sp_dma); 15031 sc->sp = NULL; 15032 bxe_dma_free(sc, &sc->eq_dma); 15033 sc->eq = NULL; 15034 bxe_dma_free(sc, &sc->def_sb_dma); 15035 sc->def_sb = NULL; 15036 bus_dma_tag_destroy(sc->parent_dma_tag); 15037 return (1); 15038 } 15039 15040 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; 15041 15042 if ((sc->gz_strm = 15043 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { 15044 /* XXX */ 15045 bxe_dma_free(sc, &sc->gz_buf_dma); 15046 sc->gz_buf = NULL; 15047 bxe_dma_free(sc, &sc->spq_dma); 15048 sc->spq = NULL; 15049 bxe_dma_free(sc, &sc->sp_dma); 15050 sc->sp = NULL; 15051 bxe_dma_free(sc, &sc->eq_dma); 15052 sc->eq = NULL; 15053 bxe_dma_free(sc, &sc->def_sb_dma); 15054 sc->def_sb = NULL; 15055 bus_dma_tag_destroy(sc->parent_dma_tag); 15056 return (1); 15057 } 15058 15059 /*************/ 15060 /* FASTPATHS */ 15061 /*************/ 15062 15063 /* allocate DMA memory for each fastpath structure */ 15064 for (i = 0; i < sc->num_queues; i++) { 15065 fp = &sc->fp[i]; 15066 fp->sc = sc; 15067 fp->index = i; 15068 15069 /*******************/ 15070 /* FP STATUS BLOCK */ 15071 /*******************/ 15072 15073 snprintf(buf, sizeof(buf), "fp %d status block", i); 15074 if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), 15075 &fp->sb_dma, buf) != 0) { 15076 /* XXX unwind and free previous fastpath allocations */ 15077 BLOGE(sc, "Failed to alloc %s\n", buf); 15078 return (1); 15079 } else { 15080 if (CHIP_IS_E2E3(sc)) { 15081 fp->status_block.e2_sb = 15082 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; 15083 } else { 15084 fp->status_block.e1x_sb = 15085 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; 15086 } 15087 } 15088 15089 /******************/ 15090 /* FP TX BD CHAIN */ 15091 /******************/ 15092 15093 snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); 15094 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), 15095 &fp->tx_dma, buf) != 0) { 15096 /* XXX unwind and free previous fastpath allocations */ 15097 BLOGE(sc, "Failed to alloc %s\n", buf); 15098 return (1); 15099 } else { 15100 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; 15101 } 15102 15103 /* link together the tx bd chain pages */ 15104 for (j = 1; j <= TX_BD_NUM_PAGES; j++) { 15105 /* index into the tx bd chain array to last entry per page */ 15106 struct eth_tx_next_bd *tx_next_bd = 15107 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; 15108 /* point to the next page and wrap from last page */ 15109 busaddr = (fp->tx_dma.paddr + 15110 (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); 15111 tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); 15112 tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); 15113 } 15114 15115 /******************/ 15116 /* FP RX BD CHAIN */ 15117 /******************/ 15118 15119 snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); 15120 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), 15121 &fp->rx_dma, buf) != 0) { 15122 /* XXX unwind and free previous fastpath allocations */ 15123 BLOGE(sc, "Failed to alloc %s\n", buf); 15124 return (1); 15125 } else { 15126 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; 15127 } 15128 15129 /* link together the rx bd chain pages */ 15130 for (j = 1; j <= RX_BD_NUM_PAGES; j++) { 15131 /* index into the rx bd chain array to last entry per page */ 15132 struct eth_rx_bd *rx_bd = 15133 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; 15134 /* point to the next page and wrap from last page */ 15135 busaddr = (fp->rx_dma.paddr + 15136 (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); 15137 rx_bd->addr_hi = htole32(U64_HI(busaddr)); 15138 rx_bd->addr_lo = htole32(U64_LO(busaddr)); 15139 } 15140 15141 /*******************/ 15142 /* FP RX RCQ CHAIN */ 15143 /*******************/ 15144 15145 snprintf(buf, sizeof(buf), "fp %d rcq chain", i); 15146 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), 15147 &fp->rcq_dma, buf) != 0) { 15148 /* XXX unwind and free previous fastpath allocations */ 15149 BLOGE(sc, "Failed to alloc %s\n", buf); 15150 return (1); 15151 } else { 15152 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; 15153 } 15154 15155 /* link together the rcq chain pages */ 15156 for (j = 1; j <= RCQ_NUM_PAGES; j++) { 15157 /* index into the rcq chain array to last entry per page */ 15158 struct eth_rx_cqe_next_page *rx_cqe_next = 15159 (struct eth_rx_cqe_next_page *) 15160 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; 15161 /* point to the next page and wrap from last page */ 15162 busaddr = (fp->rcq_dma.paddr + 15163 (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); 15164 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); 15165 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); 15166 } 15167 15168 /*******************/ 15169 /* FP RX SGE CHAIN */ 15170 /*******************/ 15171 15172 snprintf(buf, sizeof(buf), "fp %d sge chain", i); 15173 if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), 15174 &fp->rx_sge_dma, buf) != 0) { 15175 /* XXX unwind and free previous fastpath allocations */ 15176 BLOGE(sc, "Failed to alloc %s\n", buf); 15177 return (1); 15178 } else { 15179 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; 15180 } 15181 15182 /* link together the sge chain pages */ 15183 for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { 15184 /* index into the rcq chain array to last entry per page */ 15185 struct eth_rx_sge *rx_sge = 15186 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; 15187 /* point to the next page and wrap from last page */ 15188 busaddr = (fp->rx_sge_dma.paddr + 15189 (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); 15190 rx_sge->addr_hi = htole32(U64_HI(busaddr)); 15191 rx_sge->addr_lo = htole32(U64_LO(busaddr)); 15192 } 15193 15194 /***********************/ 15195 /* FP TX MBUF DMA MAPS */ 15196 /***********************/ 15197 15198 /* set required sizes before mapping to conserve resources */ 15199 if (sc->ifnet->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) { 15200 max_size = BXE_TSO_MAX_SIZE; 15201 max_segments = BXE_TSO_MAX_SEGMENTS; 15202 max_seg_size = BXE_TSO_MAX_SEG_SIZE; 15203 } else { 15204 max_size = (MCLBYTES * BXE_MAX_SEGMENTS); 15205 max_segments = BXE_MAX_SEGMENTS; 15206 max_seg_size = MCLBYTES; 15207 } 15208 15209 /* create a dma tag for the tx mbufs */ 15210 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15211 1, /* alignment */ 15212 0, /* boundary limit */ 15213 BUS_SPACE_MAXADDR, /* restricted low */ 15214 BUS_SPACE_MAXADDR, /* restricted hi */ 15215 NULL, /* addr filter() */ 15216 NULL, /* addr filter() arg */ 15217 max_size, /* max map size */ 15218 max_segments, /* num discontinuous */ 15219 max_seg_size, /* max seg size */ 15220 0, /* flags */ 15221 NULL, /* lock() */ 15222 NULL, /* lock() arg */ 15223 &fp->tx_mbuf_tag); /* returned dma tag */ 15224 if (rc != 0) { 15225 /* XXX unwind and free previous fastpath allocations */ 15226 BLOGE(sc, "Failed to create dma tag for " 15227 "'fp %d tx mbufs' (%d)\n", 15228 i, rc); 15229 return (1); 15230 } 15231 15232 /* create dma maps for each of the tx mbuf clusters */ 15233 for (j = 0; j < TX_BD_TOTAL; j++) { 15234 if (bus_dmamap_create(fp->tx_mbuf_tag, 15235 BUS_DMA_NOWAIT, 15236 &fp->tx_mbuf_chain[j].m_map)) { 15237 /* XXX unwind and free previous fastpath allocations */ 15238 BLOGE(sc, "Failed to create dma map for " 15239 "'fp %d tx mbuf %d' (%d)\n", 15240 i, j, rc); 15241 return (1); 15242 } 15243 } 15244 15245 /***********************/ 15246 /* FP RX MBUF DMA MAPS */ 15247 /***********************/ 15248 15249 /* create a dma tag for the rx mbufs */ 15250 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15251 1, /* alignment */ 15252 0, /* boundary limit */ 15253 BUS_SPACE_MAXADDR, /* restricted low */ 15254 BUS_SPACE_MAXADDR, /* restricted hi */ 15255 NULL, /* addr filter() */ 15256 NULL, /* addr filter() arg */ 15257 MJUM9BYTES, /* max map size */ 15258 1, /* num discontinuous */ 15259 MJUM9BYTES, /* max seg size */ 15260 0, /* flags */ 15261 NULL, /* lock() */ 15262 NULL, /* lock() arg */ 15263 &fp->rx_mbuf_tag); /* returned dma tag */ 15264 if (rc != 0) { 15265 /* XXX unwind and free previous fastpath allocations */ 15266 BLOGE(sc, "Failed to create dma tag for " 15267 "'fp %d rx mbufs' (%d)\n", 15268 i, rc); 15269 return (1); 15270 } 15271 15272 /* create dma maps for each of the rx mbuf clusters */ 15273 for (j = 0; j < RX_BD_TOTAL; j++) { 15274 if (bus_dmamap_create(fp->rx_mbuf_tag, 15275 BUS_DMA_NOWAIT, 15276 &fp->rx_mbuf_chain[j].m_map)) { 15277 /* XXX unwind and free previous fastpath allocations */ 15278 BLOGE(sc, "Failed to create dma map for " 15279 "'fp %d rx mbuf %d' (%d)\n", 15280 i, j, rc); 15281 return (1); 15282 } 15283 } 15284 15285 /* create dma map for the spare rx mbuf cluster */ 15286 if (bus_dmamap_create(fp->rx_mbuf_tag, 15287 BUS_DMA_NOWAIT, 15288 &fp->rx_mbuf_spare_map)) { 15289 /* XXX unwind and free previous fastpath allocations */ 15290 BLOGE(sc, "Failed to create dma map for " 15291 "'fp %d spare rx mbuf' (%d)\n", 15292 i, rc); 15293 return (1); 15294 } 15295 15296 /***************************/ 15297 /* FP RX SGE MBUF DMA MAPS */ 15298 /***************************/ 15299 15300 /* create a dma tag for the rx sge mbufs */ 15301 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 15302 1, /* alignment */ 15303 0, /* boundary limit */ 15304 BUS_SPACE_MAXADDR, /* restricted low */ 15305 BUS_SPACE_MAXADDR, /* restricted hi */ 15306 NULL, /* addr filter() */ 15307 NULL, /* addr filter() arg */ 15308 BCM_PAGE_SIZE, /* max map size */ 15309 1, /* num discontinuous */ 15310 BCM_PAGE_SIZE, /* max seg size */ 15311 0, /* flags */ 15312 NULL, /* lock() */ 15313 NULL, /* lock() arg */ 15314 &fp->rx_sge_mbuf_tag); /* returned dma tag */ 15315 if (rc != 0) { 15316 /* XXX unwind and free previous fastpath allocations */ 15317 BLOGE(sc, "Failed to create dma tag for " 15318 "'fp %d rx sge mbufs' (%d)\n", 15319 i, rc); 15320 return (1); 15321 } 15322 15323 /* create dma maps for the rx sge mbuf clusters */ 15324 for (j = 0; j < RX_SGE_TOTAL; j++) { 15325 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15326 BUS_DMA_NOWAIT, 15327 &fp->rx_sge_mbuf_chain[j].m_map)) { 15328 /* XXX unwind and free previous fastpath allocations */ 15329 BLOGE(sc, "Failed to create dma map for " 15330 "'fp %d rx sge mbuf %d' (%d)\n", 15331 i, j, rc); 15332 return (1); 15333 } 15334 } 15335 15336 /* create dma map for the spare rx sge mbuf cluster */ 15337 if (bus_dmamap_create(fp->rx_sge_mbuf_tag, 15338 BUS_DMA_NOWAIT, 15339 &fp->rx_sge_mbuf_spare_map)) { 15340 /* XXX unwind and free previous fastpath allocations */ 15341 BLOGE(sc, "Failed to create dma map for " 15342 "'fp %d spare rx sge mbuf' (%d)\n", 15343 i, rc); 15344 return (1); 15345 } 15346 15347 /***************************/ 15348 /* FP RX TPA MBUF DMA MAPS */ 15349 /***************************/ 15350 15351 /* create dma maps for the rx tpa mbuf clusters */ 15352 max_agg_queues = MAX_AGG_QS(sc); 15353 15354 for (j = 0; j < max_agg_queues; j++) { 15355 if (bus_dmamap_create(fp->rx_mbuf_tag, 15356 BUS_DMA_NOWAIT, 15357 &fp->rx_tpa_info[j].bd.m_map)) { 15358 /* XXX unwind and free previous fastpath allocations */ 15359 BLOGE(sc, "Failed to create dma map for " 15360 "'fp %d rx tpa mbuf %d' (%d)\n", 15361 i, j, rc); 15362 return (1); 15363 } 15364 } 15365 15366 /* create dma map for the spare rx tpa mbuf cluster */ 15367 if (bus_dmamap_create(fp->rx_mbuf_tag, 15368 BUS_DMA_NOWAIT, 15369 &fp->rx_tpa_info_mbuf_spare_map)) { 15370 /* XXX unwind and free previous fastpath allocations */ 15371 BLOGE(sc, "Failed to create dma map for " 15372 "'fp %d spare rx tpa mbuf' (%d)\n", 15373 i, rc); 15374 return (1); 15375 } 15376 15377 bxe_init_sge_ring_bit_mask(fp); 15378 } 15379 15380 return (0); 15381} 15382 15383static void 15384bxe_free_hsi_mem(struct bxe_softc *sc) 15385{ 15386 struct bxe_fastpath *fp; 15387 int max_agg_queues; 15388 int i, j; 15389 15390 if (sc->parent_dma_tag == NULL) { 15391 return; /* assume nothing was allocated */ 15392 } 15393 15394 for (i = 0; i < sc->num_queues; i++) { 15395 fp = &sc->fp[i]; 15396 15397 /*******************/ 15398 /* FP STATUS BLOCK */ 15399 /*******************/ 15400 15401 bxe_dma_free(sc, &fp->sb_dma); 15402 memset(&fp->status_block, 0, sizeof(fp->status_block)); 15403 15404 /******************/ 15405 /* FP TX BD CHAIN */ 15406 /******************/ 15407 15408 bxe_dma_free(sc, &fp->tx_dma); 15409 fp->tx_chain = NULL; 15410 15411 /******************/ 15412 /* FP RX BD CHAIN */ 15413 /******************/ 15414 15415 bxe_dma_free(sc, &fp->rx_dma); 15416 fp->rx_chain = NULL; 15417 15418 /*******************/ 15419 /* FP RX RCQ CHAIN */ 15420 /*******************/ 15421 15422 bxe_dma_free(sc, &fp->rcq_dma); 15423 fp->rcq_chain = NULL; 15424 15425 /*******************/ 15426 /* FP RX SGE CHAIN */ 15427 /*******************/ 15428 15429 bxe_dma_free(sc, &fp->rx_sge_dma); 15430 fp->rx_sge_chain = NULL; 15431 15432 /***********************/ 15433 /* FP TX MBUF DMA MAPS */ 15434 /***********************/ 15435 15436 if (fp->tx_mbuf_tag != NULL) { 15437 for (j = 0; j < TX_BD_TOTAL; j++) { 15438 if (fp->tx_mbuf_chain[j].m_map != NULL) { 15439 bus_dmamap_unload(fp->tx_mbuf_tag, 15440 fp->tx_mbuf_chain[j].m_map); 15441 bus_dmamap_destroy(fp->tx_mbuf_tag, 15442 fp->tx_mbuf_chain[j].m_map); 15443 } 15444 } 15445 15446 bus_dma_tag_destroy(fp->tx_mbuf_tag); 15447 fp->tx_mbuf_tag = NULL; 15448 } 15449 15450 /***********************/ 15451 /* FP RX MBUF DMA MAPS */ 15452 /***********************/ 15453 15454 if (fp->rx_mbuf_tag != NULL) { 15455 for (j = 0; j < RX_BD_TOTAL; j++) { 15456 if (fp->rx_mbuf_chain[j].m_map != NULL) { 15457 bus_dmamap_unload(fp->rx_mbuf_tag, 15458 fp->rx_mbuf_chain[j].m_map); 15459 bus_dmamap_destroy(fp->rx_mbuf_tag, 15460 fp->rx_mbuf_chain[j].m_map); 15461 } 15462 } 15463 15464 if (fp->rx_mbuf_spare_map != NULL) { 15465 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15466 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); 15467 } 15468 15469 /***************************/ 15470 /* FP RX TPA MBUF DMA MAPS */ 15471 /***************************/ 15472 15473 max_agg_queues = MAX_AGG_QS(sc); 15474 15475 for (j = 0; j < max_agg_queues; j++) { 15476 if (fp->rx_tpa_info[j].bd.m_map != NULL) { 15477 bus_dmamap_unload(fp->rx_mbuf_tag, 15478 fp->rx_tpa_info[j].bd.m_map); 15479 bus_dmamap_destroy(fp->rx_mbuf_tag, 15480 fp->rx_tpa_info[j].bd.m_map); 15481 } 15482 } 15483 15484 if (fp->rx_tpa_info_mbuf_spare_map != NULL) { 15485 bus_dmamap_unload(fp->rx_mbuf_tag, 15486 fp->rx_tpa_info_mbuf_spare_map); 15487 bus_dmamap_destroy(fp->rx_mbuf_tag, 15488 fp->rx_tpa_info_mbuf_spare_map); 15489 } 15490 15491 bus_dma_tag_destroy(fp->rx_mbuf_tag); 15492 fp->rx_mbuf_tag = NULL; 15493 } 15494 15495 /***************************/ 15496 /* FP RX SGE MBUF DMA MAPS */ 15497 /***************************/ 15498 15499 if (fp->rx_sge_mbuf_tag != NULL) { 15500 for (j = 0; j < RX_SGE_TOTAL; j++) { 15501 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { 15502 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15503 fp->rx_sge_mbuf_chain[j].m_map); 15504 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15505 fp->rx_sge_mbuf_chain[j].m_map); 15506 } 15507 } 15508 15509 if (fp->rx_sge_mbuf_spare_map != NULL) { 15510 bus_dmamap_unload(fp->rx_sge_mbuf_tag, 15511 fp->rx_sge_mbuf_spare_map); 15512 bus_dmamap_destroy(fp->rx_sge_mbuf_tag, 15513 fp->rx_sge_mbuf_spare_map); 15514 } 15515 15516 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); 15517 fp->rx_sge_mbuf_tag = NULL; 15518 } 15519 } 15520 15521 /***************************/ 15522 /* FW DECOMPRESSION BUFFER */ 15523 /***************************/ 15524 15525 bxe_dma_free(sc, &sc->gz_buf_dma); 15526 sc->gz_buf = NULL; 15527 free(sc->gz_strm, M_DEVBUF); 15528 sc->gz_strm = NULL; 15529 15530 /*******************/ 15531 /* SLOW PATH QUEUE */ 15532 /*******************/ 15533 15534 bxe_dma_free(sc, &sc->spq_dma); 15535 sc->spq = NULL; 15536 15537 /*************/ 15538 /* SLOW PATH */ 15539 /*************/ 15540 15541 bxe_dma_free(sc, &sc->sp_dma); 15542 sc->sp = NULL; 15543 15544 /***************/ 15545 /* EVENT QUEUE */ 15546 /***************/ 15547 15548 bxe_dma_free(sc, &sc->eq_dma); 15549 sc->eq = NULL; 15550 15551 /************************/ 15552 /* DEFAULT STATUS BLOCK */ 15553 /************************/ 15554 15555 bxe_dma_free(sc, &sc->def_sb_dma); 15556 sc->def_sb = NULL; 15557 15558 bus_dma_tag_destroy(sc->parent_dma_tag); 15559 sc->parent_dma_tag = NULL; 15560} 15561 15562/* 15563 * Previous driver DMAE transaction may have occurred when pre-boot stage 15564 * ended and boot began. This would invalidate the addresses of the 15565 * transaction, resulting in was-error bit set in the PCI causing all 15566 * hw-to-host PCIe transactions to timeout. If this happened we want to clear 15567 * the interrupt which detected this from the pglueb and the was-done bit 15568 */ 15569static void 15570bxe_prev_interrupted_dmae(struct bxe_softc *sc) 15571{ 15572 uint32_t val; 15573 15574 if (!CHIP_IS_E1x(sc)) { 15575 val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); 15576 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 15577 BLOGD(sc, DBG_LOAD, 15578 "Clearing 'was-error' bit that was set in pglueb"); 15579 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); 15580 } 15581 } 15582} 15583 15584static int 15585bxe_prev_mcp_done(struct bxe_softc *sc) 15586{ 15587 uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 15588 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 15589 if (!rc) { 15590 BLOGE(sc, "MCP response failure, aborting\n"); 15591 return (-1); 15592 } 15593 15594 return (0); 15595} 15596 15597static struct bxe_prev_list_node * 15598bxe_prev_path_get_entry(struct bxe_softc *sc) 15599{ 15600 struct bxe_prev_list_node *tmp; 15601 15602 LIST_FOREACH(tmp, &bxe_prev_list, node) { 15603 if ((sc->pcie_bus == tmp->bus) && 15604 (sc->pcie_device == tmp->slot) && 15605 (SC_PATH(sc) == tmp->path)) { 15606 return (tmp); 15607 } 15608 } 15609 15610 return (NULL); 15611} 15612 15613static uint8_t 15614bxe_prev_is_path_marked(struct bxe_softc *sc) 15615{ 15616 struct bxe_prev_list_node *tmp; 15617 int rc = FALSE; 15618 15619 mtx_lock(&bxe_prev_mtx); 15620 15621 tmp = bxe_prev_path_get_entry(sc); 15622 if (tmp) { 15623 if (tmp->aer) { 15624 BLOGD(sc, DBG_LOAD, 15625 "Path %d/%d/%d was marked by AER\n", 15626 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15627 } else { 15628 rc = TRUE; 15629 BLOGD(sc, DBG_LOAD, 15630 "Path %d/%d/%d was already cleaned from previous drivers\n", 15631 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15632 } 15633 } 15634 15635 mtx_unlock(&bxe_prev_mtx); 15636 15637 return (rc); 15638} 15639 15640static int 15641bxe_prev_mark_path(struct bxe_softc *sc, 15642 uint8_t after_undi) 15643{ 15644 struct bxe_prev_list_node *tmp; 15645 15646 mtx_lock(&bxe_prev_mtx); 15647 15648 /* Check whether the entry for this path already exists */ 15649 tmp = bxe_prev_path_get_entry(sc); 15650 if (tmp) { 15651 if (!tmp->aer) { 15652 BLOGD(sc, DBG_LOAD, 15653 "Re-marking AER in path %d/%d/%d\n", 15654 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15655 } else { 15656 BLOGD(sc, DBG_LOAD, 15657 "Removing AER indication from path %d/%d/%d\n", 15658 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15659 tmp->aer = 0; 15660 } 15661 15662 mtx_unlock(&bxe_prev_mtx); 15663 return (0); 15664 } 15665 15666 mtx_unlock(&bxe_prev_mtx); 15667 15668 /* Create an entry for this path and add it */ 15669 tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, 15670 (M_NOWAIT | M_ZERO)); 15671 if (!tmp) { 15672 BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); 15673 return (-1); 15674 } 15675 15676 tmp->bus = sc->pcie_bus; 15677 tmp->slot = sc->pcie_device; 15678 tmp->path = SC_PATH(sc); 15679 tmp->aer = 0; 15680 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; 15681 15682 mtx_lock(&bxe_prev_mtx); 15683 15684 BLOGD(sc, DBG_LOAD, 15685 "Marked path %d/%d/%d - finished previous unload\n", 15686 sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); 15687 LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); 15688 15689 mtx_unlock(&bxe_prev_mtx); 15690 15691 return (0); 15692} 15693 15694static int 15695bxe_do_flr(struct bxe_softc *sc) 15696{ 15697 int i; 15698 15699 /* only E2 and onwards support FLR */ 15700 if (CHIP_IS_E1x(sc)) { 15701 BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); 15702 return (-1); 15703 } 15704 15705 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 15706 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 15707 BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", 15708 sc->devinfo.bc_ver); 15709 return (-1); 15710 } 15711 15712 /* Wait for Transaction Pending bit clean */ 15713 for (i = 0; i < 4; i++) { 15714 if (i) { 15715 DELAY(((1 << (i - 1)) * 100) * 1000); 15716 } 15717 15718 if (!bxe_is_pcie_pending(sc)) { 15719 goto clear; 15720 } 15721 } 15722 15723 BLOGE(sc, "PCIE transaction is not cleared, " 15724 "proceeding with reset anyway\n"); 15725 15726clear: 15727 15728 BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); 15729 bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); 15730 15731 return (0); 15732} 15733 15734struct bxe_mac_vals { 15735 uint32_t xmac_addr; 15736 uint32_t xmac_val; 15737 uint32_t emac_addr; 15738 uint32_t emac_val; 15739 uint32_t umac_addr; 15740 uint32_t umac_val; 15741 uint32_t bmac_addr; 15742 uint32_t bmac_val[2]; 15743}; 15744 15745static void 15746bxe_prev_unload_close_mac(struct bxe_softc *sc, 15747 struct bxe_mac_vals *vals) 15748{ 15749 uint32_t val, base_addr, offset, mask, reset_reg; 15750 uint8_t mac_stopped = FALSE; 15751 uint8_t port = SC_PORT(sc); 15752 uint32_t wb_data[2]; 15753 15754 /* reset addresses as they also mark which values were changed */ 15755 vals->bmac_addr = 0; 15756 vals->umac_addr = 0; 15757 vals->xmac_addr = 0; 15758 vals->emac_addr = 0; 15759 15760 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); 15761 15762 if (!CHIP_IS_E3(sc)) { 15763 val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 15764 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 15765 if ((mask & reset_reg) && val) { 15766 BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); 15767 base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM 15768 : NIG_REG_INGRESS_BMAC0_MEM; 15769 offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL 15770 : BIGMAC_REGISTER_BMAC_CONTROL; 15771 15772 /* 15773 * use rd/wr since we cannot use dmae. This is safe 15774 * since MCP won't access the bus due to the request 15775 * to unload, and no function on the path can be 15776 * loaded at this time. 15777 */ 15778 wb_data[0] = REG_RD(sc, base_addr + offset); 15779 wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); 15780 vals->bmac_addr = base_addr + offset; 15781 vals->bmac_val[0] = wb_data[0]; 15782 vals->bmac_val[1] = wb_data[1]; 15783 wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; 15784 REG_WR(sc, vals->bmac_addr, wb_data[0]); 15785 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); 15786 } 15787 15788 BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); 15789 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; 15790 vals->emac_val = REG_RD(sc, vals->emac_addr); 15791 REG_WR(sc, vals->emac_addr, 0); 15792 mac_stopped = TRUE; 15793 } else { 15794 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 15795 BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); 15796 base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 15797 val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); 15798 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); 15799 REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); 15800 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 15801 vals->xmac_val = REG_RD(sc, vals->xmac_addr); 15802 REG_WR(sc, vals->xmac_addr, 0); 15803 mac_stopped = TRUE; 15804 } 15805 15806 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 15807 if (mask & reset_reg) { 15808 BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); 15809 base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 15810 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 15811 vals->umac_val = REG_RD(sc, vals->umac_addr); 15812 REG_WR(sc, vals->umac_addr, 0); 15813 mac_stopped = TRUE; 15814 } 15815 } 15816 15817 if (mac_stopped) { 15818 DELAY(20000); 15819 } 15820} 15821 15822#define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 15823#define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) 15824#define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 15825#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 15826 15827static void 15828bxe_prev_unload_undi_inc(struct bxe_softc *sc, 15829 uint8_t port, 15830 uint8_t inc) 15831{ 15832 uint16_t rcq, bd; 15833 uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); 15834 15835 rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; 15836 bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; 15837 15838 tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); 15839 REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); 15840 15841 BLOGD(sc, DBG_LOAD, 15842 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 15843 port, bd, rcq); 15844} 15845 15846static int 15847bxe_prev_unload_common(struct bxe_softc *sc) 15848{ 15849 uint32_t reset_reg, tmp_reg = 0, rc; 15850 uint8_t prev_undi = FALSE; 15851 struct bxe_mac_vals mac_vals; 15852 uint32_t timer_count = 1000; 15853 uint32_t prev_brb; 15854 15855 /* 15856 * It is possible a previous function received 'common' answer, 15857 * but hasn't loaded yet, therefore creating a scenario of 15858 * multiple functions receiving 'common' on the same path. 15859 */ 15860 BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); 15861 15862 memset(&mac_vals, 0, sizeof(mac_vals)); 15863 15864 if (bxe_prev_is_path_marked(sc)) { 15865 return (bxe_prev_mcp_done(sc)); 15866 } 15867 15868 reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); 15869 15870 /* Reset should be performed after BRB is emptied */ 15871 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 15872 /* Close the MAC Rx to prevent BRB from filling up */ 15873 bxe_prev_unload_close_mac(sc, &mac_vals); 15874 15875 /* close LLH filters towards the BRB */ 15876 elink_set_rx_filter(&sc->link_params, 0); 15877 15878 /* 15879 * Check if the UNDI driver was previously loaded. 15880 * UNDI driver initializes CID offset for normal bell to 0x7 15881 */ 15882 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 15883 tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); 15884 if (tmp_reg == 0x7) { 15885 BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); 15886 prev_undi = TRUE; 15887 /* clear the UNDI indication */ 15888 REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); 15889 /* clear possible idle check errors */ 15890 REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); 15891 } 15892 } 15893 15894 /* wait until BRB is empty */ 15895 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15896 while (timer_count) { 15897 prev_brb = tmp_reg; 15898 15899 tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); 15900 if (!tmp_reg) { 15901 break; 15902 } 15903 15904 BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); 15905 15906 /* reset timer as long as BRB actually gets emptied */ 15907 if (prev_brb > tmp_reg) { 15908 timer_count = 1000; 15909 } else { 15910 timer_count--; 15911 } 15912 15913 /* If UNDI resides in memory, manually increment it */ 15914 if (prev_undi) { 15915 bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); 15916 } 15917 15918 DELAY(10); 15919 } 15920 15921 if (!timer_count) { 15922 BLOGE(sc, "Failed to empty BRB\n"); 15923 } 15924 } 15925 15926 /* No packets are in the pipeline, path is ready for reset */ 15927 bxe_reset_common(sc); 15928 15929 if (mac_vals.xmac_addr) { 15930 REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); 15931 } 15932 if (mac_vals.umac_addr) { 15933 REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); 15934 } 15935 if (mac_vals.emac_addr) { 15936 REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); 15937 } 15938 if (mac_vals.bmac_addr) { 15939 REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 15940 REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 15941 } 15942 15943 rc = bxe_prev_mark_path(sc, prev_undi); 15944 if (rc) { 15945 bxe_prev_mcp_done(sc); 15946 return (rc); 15947 } 15948 15949 return (bxe_prev_mcp_done(sc)); 15950} 15951 15952static int 15953bxe_prev_unload_uncommon(struct bxe_softc *sc) 15954{ 15955 int rc; 15956 15957 BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); 15958 15959 /* Test if previous unload process was already finished for this path */ 15960 if (bxe_prev_is_path_marked(sc)) { 15961 return (bxe_prev_mcp_done(sc)); 15962 } 15963 15964 BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); 15965 15966 /* 15967 * If function has FLR capabilities, and existing FW version matches 15968 * the one required, then FLR will be sufficient to clean any residue 15969 * left by previous driver 15970 */ 15971 rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); 15972 if (!rc) { 15973 /* fw version is good */ 15974 BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); 15975 rc = bxe_do_flr(sc); 15976 } 15977 15978 if (!rc) { 15979 /* FLR was performed */ 15980 BLOGD(sc, DBG_LOAD, "FLR successful\n"); 15981 return (0); 15982 } 15983 15984 BLOGD(sc, DBG_LOAD, "Could not FLR\n"); 15985 15986 /* Close the MCP request, return failure*/ 15987 rc = bxe_prev_mcp_done(sc); 15988 if (!rc) { 15989 rc = BXE_PREV_WAIT_NEEDED; 15990 } 15991 15992 return (rc); 15993} 15994 15995static int 15996bxe_prev_unload(struct bxe_softc *sc) 15997{ 15998 int time_counter = 10; 15999 uint32_t fw, hw_lock_reg, hw_lock_val; 16000 uint32_t rc = 0; 16001 16002 /* 16003 * Clear HW from errors which may have resulted from an interrupted 16004 * DMAE transaction. 16005 */ 16006 bxe_prev_interrupted_dmae(sc); 16007 16008 /* Release previously held locks */ 16009 hw_lock_reg = 16010 (SC_FUNC(sc) <= 5) ? 16011 (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : 16012 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); 16013 16014 hw_lock_val = (REG_RD(sc, hw_lock_reg)); 16015 if (hw_lock_val) { 16016 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 16017 BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); 16018 REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, 16019 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); 16020 } 16021 BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); 16022 REG_WR(sc, hw_lock_reg, 0xffffffff); 16023 } else { 16024 BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); 16025 } 16026 16027 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { 16028 BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); 16029 REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); 16030 } 16031 16032 do { 16033 /* Lock MCP using an unload request */ 16034 fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 16035 if (!fw) { 16036 BLOGE(sc, "MCP response failure, aborting\n"); 16037 rc = -1; 16038 break; 16039 } 16040 16041 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 16042 rc = bxe_prev_unload_common(sc); 16043 break; 16044 } 16045 16046 /* non-common reply from MCP night require looping */ 16047 rc = bxe_prev_unload_uncommon(sc); 16048 if (rc != BXE_PREV_WAIT_NEEDED) { 16049 break; 16050 } 16051 16052 DELAY(20000); 16053 } while (--time_counter); 16054 16055 if (!time_counter || rc) { 16056 BLOGE(sc, "Failed to unload previous driver!\n"); 16057 rc = -1; 16058 } 16059 16060 return (rc); 16061} 16062 16063void 16064bxe_dcbx_set_state(struct bxe_softc *sc, 16065 uint8_t dcb_on, 16066 uint32_t dcbx_enabled) 16067{ 16068 if (!CHIP_IS_E1x(sc)) { 16069 sc->dcb_state = dcb_on; 16070 sc->dcbx_enabled = dcbx_enabled; 16071 } else { 16072 sc->dcb_state = FALSE; 16073 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; 16074 } 16075 BLOGD(sc, DBG_LOAD, 16076 "DCB state [%s:%s]\n", 16077 dcb_on ? "ON" : "OFF", 16078 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : 16079 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : 16080 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? 16081 "on-chip with negotiation" : "invalid"); 16082} 16083 16084/* must be called after sriov-enable */ 16085static int 16086bxe_set_qm_cid_count(struct bxe_softc *sc) 16087{ 16088 int cid_count = BXE_L2_MAX_CID(sc); 16089 16090 if (IS_SRIOV(sc)) { 16091 cid_count += BXE_VF_CIDS; 16092 } 16093 16094 if (CNIC_SUPPORT(sc)) { 16095 cid_count += CNIC_CID_MAX; 16096 } 16097 16098 return (roundup(cid_count, QM_CID_ROUND)); 16099} 16100 16101static void 16102bxe_init_multi_cos(struct bxe_softc *sc) 16103{ 16104 int pri, cos; 16105 16106 uint32_t pri_map = 0; /* XXX change to user config */ 16107 16108 for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { 16109 cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); 16110 if (cos < sc->max_cos) { 16111 sc->prio_to_cos[pri] = cos; 16112 } else { 16113 BLOGW(sc, "Invalid COS %d for priority %d " 16114 "(max COS is %d), setting to 0\n", 16115 cos, pri, (sc->max_cos - 1)); 16116 sc->prio_to_cos[pri] = 0; 16117 } 16118 } 16119} 16120 16121static int 16122bxe_sysctl_state(SYSCTL_HANDLER_ARGS) 16123{ 16124 struct bxe_softc *sc; 16125 int error, result; 16126 16127 result = 0; 16128 error = sysctl_handle_int(oidp, &result, 0, req); 16129 16130 if (error || !req->newptr) { 16131 return (error); 16132 } 16133 16134 if (result == 1) { 16135 sc = (struct bxe_softc *)arg1; 16136 BLOGI(sc, "... dumping driver state ...\n"); 16137 /* XXX */ 16138 } 16139 16140 return (error); 16141} 16142 16143static int 16144bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) 16145{ 16146 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16147 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; 16148 uint32_t *offset; 16149 uint64_t value = 0; 16150 int index = (int)arg2; 16151 16152 if (index >= BXE_NUM_ETH_STATS) { 16153 BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); 16154 return (-1); 16155 } 16156 16157 offset = (eth_stats + bxe_eth_stats_arr[index].offset); 16158 16159 switch (bxe_eth_stats_arr[index].size) { 16160 case 4: 16161 value = (uint64_t)*offset; 16162 break; 16163 case 8: 16164 value = HILO_U64(*offset, *(offset + 1)); 16165 break; 16166 default: 16167 BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", 16168 index, bxe_eth_stats_arr[index].size); 16169 return (-1); 16170 } 16171 16172 return (sysctl_handle_64(oidp, &value, 0, req)); 16173} 16174 16175static int 16176bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) 16177{ 16178 struct bxe_softc *sc = (struct bxe_softc *)arg1; 16179 uint32_t *eth_stats; 16180 uint32_t *offset; 16181 uint64_t value = 0; 16182 uint32_t q_stat = (uint32_t)arg2; 16183 uint32_t fp_index = ((q_stat >> 16) & 0xffff); 16184 uint32_t index = (q_stat & 0xffff); 16185 16186 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; 16187 16188 if (index >= BXE_NUM_ETH_Q_STATS) { 16189 BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); 16190 return (-1); 16191 } 16192 16193 offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); 16194 16195 switch (bxe_eth_q_stats_arr[index].size) { 16196 case 4: 16197 value = (uint64_t)*offset; 16198 break; 16199 case 8: 16200 value = HILO_U64(*offset, *(offset + 1)); 16201 break; 16202 default: 16203 BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", 16204 index, bxe_eth_q_stats_arr[index].size); 16205 return (-1); 16206 } 16207 16208 return (sysctl_handle_64(oidp, &value, 0, req)); 16209} 16210 16211static void 16212bxe_add_sysctls(struct bxe_softc *sc) 16213{ 16214 struct sysctl_ctx_list *ctx; 16215 struct sysctl_oid_list *children; 16216 struct sysctl_oid *queue_top, *queue; 16217 struct sysctl_oid_list *queue_top_children, *queue_children; 16218 char queue_num_buf[32]; 16219 uint32_t q_stat; 16220 int i, j; 16221 16222 ctx = device_get_sysctl_ctx(sc->dev); 16223 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 16224 16225 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", 16226 CTLFLAG_RD, BXE_DRIVER_VERSION, 0, 16227 "version"); 16228 16229 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", 16230 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, 16231 "bootcode version"); 16232 16233 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", 16234 BCM_5710_FW_MAJOR_VERSION, 16235 BCM_5710_FW_MINOR_VERSION, 16236 BCM_5710_FW_REVISION_VERSION, 16237 BCM_5710_FW_ENGINEERING_VERSION); 16238 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", 16239 CTLFLAG_RD, sc->fw_ver_str, 0, 16240 "firmware version"); 16241 16242 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", 16243 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : 16244 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : 16245 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : 16246 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : 16247 "Unknown")); 16248 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", 16249 CTLFLAG_RD, sc->mf_mode_str, 0, 16250 "multifunction mode"); 16251 16252 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", 16253 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, 16254 "multifunction vnics per port"); 16255 16256 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", 16257 CTLFLAG_RD, sc->mac_addr_str, 0, 16258 "mac address"); 16259 16260 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", 16261 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : 16262 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : 16263 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : 16264 "???GT/s"), 16265 sc->devinfo.pcie_link_width); 16266 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", 16267 CTLFLAG_RD, sc->pci_link_str, 0, 16268 "pci link status"); 16269 16270 sc->debug = bxe_debug; 16271 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", 16272 CTLFLAG_RW, &sc->debug, 16273 "debug logging mode"); 16274 16275 sc->rx_budget = bxe_rx_budget; 16276 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", 16277 CTLFLAG_RW, &sc->rx_budget, 0, 16278 "rx processing budget"); 16279 16280 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", 16281 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, 16282 bxe_sysctl_state, "IU", "dump driver state"); 16283 16284 for (i = 0; i < BXE_NUM_ETH_STATS; i++) { 16285 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 16286 bxe_eth_stats_arr[i].string, 16287 CTLTYPE_U64 | CTLFLAG_RD, sc, i, 16288 bxe_sysctl_eth_stat, "LU", 16289 bxe_eth_stats_arr[i].string); 16290 } 16291 16292 /* add a new parent node for all queues "dev.bxe.#.queue" */ 16293 queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", 16294 CTLFLAG_RD, NULL, "queue"); 16295 queue_top_children = SYSCTL_CHILDREN(queue_top); 16296 16297 for (i = 0; i < sc->num_queues; i++) { 16298 /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ 16299 snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); 16300 queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, 16301 queue_num_buf, CTLFLAG_RD, NULL, 16302 "single queue"); 16303 queue_children = SYSCTL_CHILDREN(queue); 16304 16305 for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { 16306 q_stat = ((i << 16) | j); 16307 SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, 16308 bxe_eth_q_stats_arr[j].string, 16309 CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, 16310 bxe_sysctl_eth_q_stat, "LU", 16311 bxe_eth_q_stats_arr[j].string); 16312 } 16313 } 16314} 16315 16316/* 16317 * Device attach function. 16318 * 16319 * Allocates device resources, performs secondary chip identification, and 16320 * initializes driver instance variables. This function is called from driver 16321 * load after a successful probe. 16322 * 16323 * Returns: 16324 * 0 = Success, >0 = Failure 16325 */ 16326static int 16327bxe_attach(device_t dev) 16328{ 16329 struct bxe_softc *sc; 16330 16331 sc = device_get_softc(dev); 16332 16333 BLOGD(sc, DBG_LOAD, "Starting attach...\n"); 16334 16335 sc->state = BXE_STATE_CLOSED; 16336 16337 sc->dev = dev; 16338 sc->unit = device_get_unit(dev); 16339 16340 BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); 16341 16342 sc->pcie_bus = pci_get_bus(dev); 16343 sc->pcie_device = pci_get_slot(dev); 16344 sc->pcie_func = pci_get_function(dev); 16345 16346 /* enable bus master capability */ 16347 pci_enable_busmaster(dev); 16348 16349 /* get the BARs */ 16350 if (bxe_allocate_bars(sc) != 0) { 16351 return (ENXIO); 16352 } 16353 16354 /* initialize the mutexes */ 16355 bxe_init_mutexes(sc); 16356 16357 /* prepare the periodic callout */ 16358 callout_init(&sc->periodic_callout, 0); 16359 16360 /* prepare the chip taskqueue */ 16361 sc->chip_tq_flags = CHIP_TQ_NONE; 16362 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), 16363 "bxe%d_chip_tq", sc->unit); 16364 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); 16365 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, 16366 taskqueue_thread_enqueue, 16367 &sc->chip_tq); 16368 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ 16369 "%s", sc->chip_tq_name); 16370 16371 /* get device info and set params */ 16372 if (bxe_get_device_info(sc) != 0) { 16373 BLOGE(sc, "getting device info\n"); 16374 bxe_deallocate_bars(sc); 16375 pci_disable_busmaster(dev); 16376 return (ENXIO); 16377 } 16378 16379 /* get final misc params */ 16380 bxe_get_params(sc); 16381 16382 /* set the default MTU (changed via ifconfig) */ 16383 sc->mtu = ETHERMTU; 16384 16385 bxe_set_modes_bitmap(sc); 16386 16387 /* XXX 16388 * If in AFEX mode and the function is configured for FCoE 16389 * then bail... no L2 allowed. 16390 */ 16391 16392 /* get phy settings from shmem and 'and' against admin settings */ 16393 bxe_get_phy_info(sc); 16394 16395 /* initialize the FreeBSD ifnet interface */ 16396 if (bxe_init_ifnet(sc) != 0) { 16397 bxe_release_mutexes(sc); 16398 bxe_deallocate_bars(sc); 16399 pci_disable_busmaster(dev); 16400 return (ENXIO); 16401 } 16402 16403 /* allocate device interrupts */ 16404 if (bxe_interrupt_alloc(sc) != 0) { 16405 if (sc->ifnet != NULL) { 16406 ether_ifdetach(sc->ifnet); 16407 } 16408 ifmedia_removeall(&sc->ifmedia); 16409 bxe_release_mutexes(sc); 16410 bxe_deallocate_bars(sc); 16411 pci_disable_busmaster(dev); 16412 return (ENXIO); 16413 } 16414 16415 /* allocate ilt */ 16416 if (bxe_alloc_ilt_mem(sc) != 0) { 16417 bxe_interrupt_free(sc); 16418 if (sc->ifnet != NULL) { 16419 ether_ifdetach(sc->ifnet); 16420 } 16421 ifmedia_removeall(&sc->ifmedia); 16422 bxe_release_mutexes(sc); 16423 bxe_deallocate_bars(sc); 16424 pci_disable_busmaster(dev); 16425 return (ENXIO); 16426 } 16427 16428 /* allocate the host hardware/software hsi structures */ 16429 if (bxe_alloc_hsi_mem(sc) != 0) { 16430 bxe_free_ilt_mem(sc); 16431 bxe_interrupt_free(sc); 16432 if (sc->ifnet != NULL) { 16433 ether_ifdetach(sc->ifnet); 16434 } 16435 ifmedia_removeall(&sc->ifmedia); 16436 bxe_release_mutexes(sc); 16437 bxe_deallocate_bars(sc); 16438 pci_disable_busmaster(dev); 16439 return (ENXIO); 16440 } 16441 16442 /* need to reset chip if UNDI was active */ 16443 if (IS_PF(sc) && !BXE_NOMCP(sc)) { 16444 /* init fw_seq */ 16445 sc->fw_seq = 16446 (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & 16447 DRV_MSG_SEQ_NUMBER_MASK); 16448 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); 16449 bxe_prev_unload(sc); 16450 } 16451 16452#if 1 16453 /* XXX */ 16454 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16455#else 16456 if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && 16457 SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && 16458 SHMEM2_RD(sc, dcbx_lldp_params_offset) && 16459 SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { 16460 bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); 16461 bxe_dcbx_init_params(sc); 16462 } else { 16463 bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); 16464 } 16465#endif 16466 16467 /* calculate qm_cid_count */ 16468 sc->qm_cid_count = bxe_set_qm_cid_count(sc); 16469 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); 16470 16471 sc->max_cos = 1; 16472 bxe_init_multi_cos(sc); 16473 16474 bxe_add_sysctls(sc); 16475 16476 return (0); 16477} 16478 16479/* 16480 * Device detach function. 16481 * 16482 * Stops the controller, resets the controller, and releases resources. 16483 * 16484 * Returns: 16485 * 0 = Success, >0 = Failure 16486 */ 16487static int 16488bxe_detach(device_t dev) 16489{ 16490 struct bxe_softc *sc; 16491 struct ifnet *ifp; 16492 16493 sc = device_get_softc(dev); 16494 16495 BLOGD(sc, DBG_LOAD, "Starting detach...\n"); 16496 16497 ifp = sc->ifnet; 16498 if (ifp != NULL && ifp->if_vlantrunk != NULL) { 16499 BLOGE(sc, "Cannot detach while VLANs are in use.\n"); 16500 return(EBUSY); 16501 } 16502 16503 /* stop the periodic callout */ 16504 bxe_periodic_stop(sc); 16505 16506 /* stop the chip taskqueue */ 16507 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); 16508 if (sc->chip_tq) { 16509 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); 16510 taskqueue_free(sc->chip_tq); 16511 sc->chip_tq = NULL; 16512 } 16513 16514 /* stop and reset the controller if it was open */ 16515 if (sc->state != BXE_STATE_CLOSED) { 16516 BXE_CORE_LOCK(sc); 16517 bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); 16518 BXE_CORE_UNLOCK(sc); 16519 } 16520 16521 /* release the network interface */ 16522 if (ifp != NULL) { 16523 ether_ifdetach(ifp); 16524 } 16525 ifmedia_removeall(&sc->ifmedia); 16526 16527 /* XXX do the following based on driver state... */ 16528 16529 /* free the host hardware/software hsi structures */ 16530 bxe_free_hsi_mem(sc); 16531 16532 /* free ilt */ 16533 bxe_free_ilt_mem(sc); 16534 16535 /* release the interrupts */ 16536 bxe_interrupt_free(sc); 16537 16538 /* Release the mutexes*/ 16539 bxe_release_mutexes(sc); 16540 16541 /* Release the PCIe BAR mapped memory */ 16542 bxe_deallocate_bars(sc); 16543 16544 /* Release the FreeBSD interface. */ 16545 if (sc->ifnet != NULL) { 16546 if_free(sc->ifnet); 16547 } 16548 16549 pci_disable_busmaster(dev); 16550 16551 return (0); 16552} 16553 16554/* 16555 * Device shutdown function. 16556 * 16557 * Stops and resets the controller. 16558 * 16559 * Returns: 16560 * Nothing 16561 */ 16562static int 16563bxe_shutdown(device_t dev) 16564{ 16565 struct bxe_softc *sc; 16566 16567 sc = device_get_softc(dev); 16568 16569 BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); 16570 16571 /* stop the periodic callout */ 16572 bxe_periodic_stop(sc); 16573 16574 BXE_CORE_LOCK(sc); 16575 bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); 16576 BXE_CORE_UNLOCK(sc); 16577 16578 return (0); 16579} 16580 16581void 16582bxe_igu_ack_sb(struct bxe_softc *sc, 16583 uint8_t igu_sb_id, 16584 uint8_t segment, 16585 uint16_t index, 16586 uint8_t op, 16587 uint8_t update) 16588{ 16589 uint32_t igu_addr = sc->igu_base_addr; 16590 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 16591 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); 16592} 16593 16594static void 16595bxe_igu_clear_sb_gen(struct bxe_softc *sc, 16596 uint8_t func, 16597 uint8_t idu_sb_id, 16598 uint8_t is_pf) 16599{ 16600 uint32_t data, ctl, cnt = 100; 16601 uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 16602 uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 16603 uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 16604 uint32_t sb_bit = 1 << (idu_sb_id%32); 16605 uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 16606 uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 16607 16608 /* Not supported in BC mode */ 16609 if (CHIP_INT_MODE_IS_BC(sc)) { 16610 return; 16611 } 16612 16613 data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << 16614 IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 16615 IGU_REGULAR_CLEANUP_SET | 16616 IGU_REGULAR_BCLEANUP); 16617 16618 ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | 16619 (func_encode << IGU_CTRL_REG_FID_SHIFT) | 16620 (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); 16621 16622 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16623 data, igu_addr_data); 16624 REG_WR(sc, igu_addr_data, data); 16625 16626 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16627 BUS_SPACE_BARRIER_WRITE); 16628 mb(); 16629 16630 BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 16631 ctl, igu_addr_ctl); 16632 REG_WR(sc, igu_addr_ctl, ctl); 16633 16634 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, 16635 BUS_SPACE_BARRIER_WRITE); 16636 mb(); 16637 16638 /* wait for clean up to finish */ 16639 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { 16640 DELAY(20000); 16641 } 16642 16643 if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { 16644 BLOGD(sc, DBG_LOAD, 16645 "Unable to finish IGU cleanup: " 16646 "idu_sb_id %d offset %d bit %d (cnt %d)\n", 16647 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 16648 } 16649} 16650 16651static void 16652bxe_igu_clear_sb(struct bxe_softc *sc, 16653 uint8_t idu_sb_id) 16654{ 16655 bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); 16656} 16657 16658 16659 16660 16661 16662 16663 16664/*******************/ 16665/* ECORE CALLBACKS */ 16666/*******************/ 16667 16668static void 16669bxe_reset_common(struct bxe_softc *sc) 16670{ 16671 uint32_t val = 0x1400; 16672 16673 /* reset_common */ 16674 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); 16675 16676 if (CHIP_IS_E3(sc)) { 16677 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 16678 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 16679 } 16680 16681 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); 16682} 16683 16684static void 16685bxe_common_init_phy(struct bxe_softc *sc) 16686{ 16687 uint32_t shmem_base[2]; 16688 uint32_t shmem2_base[2]; 16689 16690 /* Avoid common init in case MFW supports LFA */ 16691 if (SHMEM2_RD(sc, size) > 16692 (uint32_t)offsetof(struct shmem2_region, 16693 lfa_host_addr[SC_PORT(sc)])) { 16694 return; 16695 } 16696 16697 shmem_base[0] = sc->devinfo.shmem_base; 16698 shmem2_base[0] = sc->devinfo.shmem2_base; 16699 16700 if (!CHIP_IS_E1x(sc)) { 16701 shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); 16702 shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); 16703 } 16704 16705 BXE_PHY_LOCK(sc); 16706 elink_common_init_phy(sc, shmem_base, shmem2_base, 16707 sc->devinfo.chip_id, 0); 16708 BXE_PHY_UNLOCK(sc); 16709} 16710 16711static void 16712bxe_pf_disable(struct bxe_softc *sc) 16713{ 16714 uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); 16715 16716 val &= ~IGU_PF_CONF_FUNC_EN; 16717 16718 REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); 16719 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 16720 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); 16721} 16722 16723static void 16724bxe_init_pxp(struct bxe_softc *sc) 16725{ 16726 uint16_t devctl; 16727 int r_order, w_order; 16728 16729 devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); 16730 16731 BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); 16732 16733 w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); 16734 16735 if (sc->mrrs == -1) { 16736 r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); 16737 } else { 16738 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); 16739 r_order = sc->mrrs; 16740 } 16741 16742 ecore_init_pxp_arb(sc, r_order, w_order); 16743} 16744 16745static uint32_t 16746bxe_get_pretend_reg(struct bxe_softc *sc) 16747{ 16748 uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; 16749 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); 16750 return (base + (SC_ABS_FUNC(sc)) * stride); 16751} 16752 16753/* 16754 * Called only on E1H or E2. 16755 * When pretending to be PF, the pretend value is the function number 0..7. 16756 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 16757 * combination. 16758 */ 16759static int 16760bxe_pretend_func(struct bxe_softc *sc, 16761 uint16_t pretend_func_val) 16762{ 16763 uint32_t pretend_reg; 16764 16765 if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { 16766 return (-1); 16767 } 16768 16769 /* get my own pretend register */ 16770 pretend_reg = bxe_get_pretend_reg(sc); 16771 REG_WR(sc, pretend_reg, pretend_func_val); 16772 REG_RD(sc, pretend_reg); 16773 return (0); 16774} 16775 16776static void 16777bxe_iov_init_dmae(struct bxe_softc *sc) 16778{ 16779 return; 16780#if 0 16781 BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF"); 16782 16783 if (!IS_SRIOV(sc)) { 16784 return; 16785 } 16786 16787 REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0); 16788#endif 16789} 16790 16791#if 0 16792static int 16793bxe_iov_init_ilt(struct bxe_softc *sc, 16794 uint16_t line) 16795{ 16796 return (line); 16797#if 0 16798 int i; 16799 struct ecore_ilt* ilt = sc->ilt; 16800 16801 if (!IS_SRIOV(sc)) { 16802 return (line); 16803 } 16804 16805 /* set vfs ilt lines */ 16806 for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) { 16807 struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i); 16808 ilt->lines[line+i].page = hw_cxt->addr; 16809 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 16810 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 16811 } 16812 return (line+i); 16813#endif 16814} 16815#endif 16816 16817static void 16818bxe_iov_init_dq(struct bxe_softc *sc) 16819{ 16820 return; 16821#if 0 16822 if (!IS_SRIOV(sc)) { 16823 return; 16824 } 16825 16826 /* Set the DQ such that the CID reflect the abs_vfid */ 16827 REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0); 16828 REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 16829 16830 /* 16831 * Set VFs starting CID. If its > 0 the preceding CIDs are belong to 16832 * the PF L2 queues 16833 */ 16834 REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 16835 16836 /* The VF window size is the log2 of the max number of CIDs per VF */ 16837 REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 16838 16839 /* 16840 * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 16841 * the Pf doorbell size although the 2 are independent. 16842 */ 16843 REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST, 16844 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 16845 16846 /* 16847 * No security checks for now - 16848 * configure single rule (out of 16) mask = 0x1, value = 0x0, 16849 * CID range 0 - 0x1ffff 16850 */ 16851 REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1); 16852 REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0); 16853 REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 16854 REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 16855 16856 /* set the number of VF alllowed doorbells to the full DQ range */ 16857 REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 16858 16859 /* set the VF doorbell threshold */ 16860 REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 16861#endif 16862} 16863 16864/* send a NIG loopback debug packet */ 16865static void 16866bxe_lb_pckt(struct bxe_softc *sc) 16867{ 16868 uint32_t wb_write[3]; 16869 16870 /* Ethernet source and destination addresses */ 16871 wb_write[0] = 0x55555555; 16872 wb_write[1] = 0x55555555; 16873 wb_write[2] = 0x20; /* SOP */ 16874 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16875 16876 /* NON-IP protocol */ 16877 wb_write[0] = 0x09000000; 16878 wb_write[1] = 0x55555555; 16879 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 16880 REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 16881} 16882 16883/* 16884 * Some of the internal memories are not directly readable from the driver. 16885 * To test them we send debug packets. 16886 */ 16887static int 16888bxe_int_mem_test(struct bxe_softc *sc) 16889{ 16890 int factor; 16891 int count, i; 16892 uint32_t val = 0; 16893 16894 if (CHIP_REV_IS_FPGA(sc)) { 16895 factor = 120; 16896 } else if (CHIP_REV_IS_EMUL(sc)) { 16897 factor = 200; 16898 } else { 16899 factor = 1; 16900 } 16901 16902 /* disable inputs of parser neighbor blocks */ 16903 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16904 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16905 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16906 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16907 16908 /* write 0 to parser credits for CFC search request */ 16909 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16910 16911 /* send Ethernet packet */ 16912 bxe_lb_pckt(sc); 16913 16914 /* TODO do i reset NIG statistic? */ 16915 /* Wait until NIG register shows 1 packet of size 0x10 */ 16916 count = 1000 * factor; 16917 while (count) { 16918 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16919 val = *BXE_SP(sc, wb_data[0]); 16920 if (val == 0x10) { 16921 break; 16922 } 16923 16924 DELAY(10000); 16925 count--; 16926 } 16927 16928 if (val != 0x10) { 16929 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16930 return (-1); 16931 } 16932 16933 /* wait until PRS register shows 1 packet */ 16934 count = (1000 * factor); 16935 while (count) { 16936 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16937 if (val == 1) { 16938 break; 16939 } 16940 16941 DELAY(10000); 16942 count--; 16943 } 16944 16945 if (val != 0x1) { 16946 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16947 return (-2); 16948 } 16949 16950 /* Reset and init BRB, PRS */ 16951 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 16952 DELAY(50000); 16953 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 16954 DELAY(50000); 16955 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 16956 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 16957 16958 /* Disable inputs of parser neighbor blocks */ 16959 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); 16960 REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); 16961 REG_WR(sc, CFC_REG_DEBUG0, 0x1); 16962 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); 16963 16964 /* Write 0 to parser credits for CFC search request */ 16965 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 16966 16967 /* send 10 Ethernet packets */ 16968 for (i = 0; i < 10; i++) { 16969 bxe_lb_pckt(sc); 16970 } 16971 16972 /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ 16973 count = (1000 * factor); 16974 while (count) { 16975 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 16976 val = *BXE_SP(sc, wb_data[0]); 16977 if (val == 0xb0) { 16978 break; 16979 } 16980 16981 DELAY(10000); 16982 count--; 16983 } 16984 16985 if (val != 0xb0) { 16986 BLOGE(sc, "NIG timeout val=0x%x\n", val); 16987 return (-3); 16988 } 16989 16990 /* Wait until PRS register shows 2 packets */ 16991 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 16992 if (val != 2) { 16993 BLOGE(sc, "PRS timeout val=0x%x\n", val); 16994 } 16995 16996 /* Write 1 to parser credits for CFC search request */ 16997 REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 16998 16999 /* Wait until PRS register shows 3 packets */ 17000 DELAY(10000 * factor); 17001 17002 /* Wait until NIG register shows 1 packet of size 0x10 */ 17003 val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); 17004 if (val != 3) { 17005 BLOGE(sc, "PRS timeout val=0x%x\n", val); 17006 } 17007 17008 /* clear NIG EOP FIFO */ 17009 for (i = 0; i < 11; i++) { 17010 REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); 17011 } 17012 17013 val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); 17014 if (val != 1) { 17015 BLOGE(sc, "clear of NIG failed\n"); 17016 return (-4); 17017 } 17018 17019 /* Reset and init BRB, PRS, NIG */ 17020 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 17021 DELAY(50000); 17022 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 17023 DELAY(50000); 17024 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17025 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17026 if (!CNIC_SUPPORT(sc)) { 17027 /* set NIC mode */ 17028 REG_WR(sc, PRS_REG_NIC_MODE, 1); 17029 } 17030 17031 /* Enable inputs of parser neighbor blocks */ 17032 REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); 17033 REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); 17034 REG_WR(sc, CFC_REG_DEBUG0, 0x0); 17035 REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); 17036 17037 return (0); 17038} 17039 17040static void 17041bxe_setup_fan_failure_detection(struct bxe_softc *sc) 17042{ 17043 int is_required; 17044 uint32_t val; 17045 int port; 17046 17047 is_required = 0; 17048 val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & 17049 SHARED_HW_CFG_FAN_FAILURE_MASK); 17050 17051 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { 17052 is_required = 1; 17053 } 17054 /* 17055 * The fan failure mechanism is usually related to the PHY type since 17056 * the power consumption of the board is affected by the PHY. Currently, 17057 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 17058 */ 17059 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { 17060 for (port = PORT_0; port < PORT_MAX; port++) { 17061 is_required |= elink_fan_failure_det_req(sc, 17062 sc->devinfo.shmem_base, 17063 sc->devinfo.shmem2_base, 17064 port); 17065 } 17066 } 17067 17068 BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); 17069 17070 if (is_required == 0) { 17071 return; 17072 } 17073 17074 /* Fan failure is indicated by SPIO 5 */ 17075 bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 17076 17077 /* set to active low mode */ 17078 val = REG_RD(sc, MISC_REG_SPIO_INT); 17079 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 17080 REG_WR(sc, MISC_REG_SPIO_INT, val); 17081 17082 /* enable interrupt to signal the IGU */ 17083 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17084 val |= MISC_SPIO_SPIO5; 17085 REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); 17086} 17087 17088static void 17089bxe_enable_blocks_attention(struct bxe_softc *sc) 17090{ 17091 uint32_t val; 17092 17093 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17094 if (!CHIP_IS_E1x(sc)) { 17095 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); 17096 } else { 17097 REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); 17098 } 17099 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17100 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17101 /* 17102 * mask read length error interrupts in brb for parser 17103 * (parsing unit and 'checksum and crc' unit) 17104 * these errors are legal (PU reads fixed length and CAC can cause 17105 * read length error on truncated packets) 17106 */ 17107 REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); 17108 REG_WR(sc, QM_REG_QM_INT_MASK, 0); 17109 REG_WR(sc, TM_REG_TM_INT_MASK, 0); 17110 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); 17111 REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); 17112 REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); 17113/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ 17114/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ 17115 REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); 17116 REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); 17117 REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); 17118/* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ 17119/* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ 17120 REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 17121 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); 17122 REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); 17123 REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); 17124/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ 17125/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ 17126 17127 val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 17128 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 17129 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); 17130 if (!CHIP_IS_E1x(sc)) { 17131 val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 17132 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); 17133 } 17134 REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); 17135 17136 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); 17137 REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); 17138 REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); 17139/* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ 17140 17141 if (!CHIP_IS_E1x(sc)) { 17142 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 17143 REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 17144 } 17145 17146 REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); 17147 REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); 17148/* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ 17149 REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 17150} 17151 17152/** 17153 * bxe_init_hw_common - initialize the HW at the COMMON phase. 17154 * 17155 * @sc: driver handle 17156 */ 17157static int 17158bxe_init_hw_common(struct bxe_softc *sc) 17159{ 17160 uint8_t abs_func_id; 17161 uint32_t val; 17162 17163 BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", 17164 SC_ABS_FUNC(sc)); 17165 17166 /* 17167 * take the RESET lock to protect undi_unload flow from accessing 17168 * registers while we are resetting the chip 17169 */ 17170 bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17171 17172 bxe_reset_common(sc); 17173 17174 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); 17175 17176 val = 0xfffc; 17177 if (CHIP_IS_E3(sc)) { 17178 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 17179 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 17180 } 17181 17182 REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); 17183 17184 bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); 17185 17186 ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); 17187 BLOGD(sc, DBG_LOAD, "after misc block init\n"); 17188 17189 if (!CHIP_IS_E1x(sc)) { 17190 /* 17191 * 4-port mode or 2-port mode we need to turn off master-enable for 17192 * everyone. After that we turn it back on for self. So, we disregard 17193 * multi-function, and always disable all functions on the given path, 17194 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 17195 */ 17196 for (abs_func_id = SC_PATH(sc); 17197 abs_func_id < (E2_FUNC_MAX * 2); 17198 abs_func_id += 2) { 17199 if (abs_func_id == SC_ABS_FUNC(sc)) { 17200 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17201 continue; 17202 } 17203 17204 bxe_pretend_func(sc, abs_func_id); 17205 17206 /* clear pf enable */ 17207 bxe_pf_disable(sc); 17208 17209 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17210 } 17211 } 17212 17213 BLOGD(sc, DBG_LOAD, "after pf disable\n"); 17214 17215 ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); 17216 17217 if (CHIP_IS_E1(sc)) { 17218 /* 17219 * enable HW interrupt from PXP on USDM overflow 17220 * bit 16 on INT_MASK_0 17221 */ 17222 REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); 17223 } 17224 17225 ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); 17226 bxe_init_pxp(sc); 17227 17228#ifdef __BIG_ENDIAN 17229 REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); 17230 REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); 17231 REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 17232 REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 17233 REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 17234 /* make sure this value is 0 */ 17235 REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); 17236 17237 //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); 17238 REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); 17239 REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); 17240 REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); 17241 REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 17242#endif 17243 17244 ecore_ilt_init_page_size(sc, INITOP_SET); 17245 17246 if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { 17247 REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 17248 } 17249 17250 /* let the HW do it's magic... */ 17251 DELAY(100000); 17252 17253 /* finish PXP init */ 17254 val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); 17255 if (val != 1) { 17256 BLOGE(sc, "PXP2 CFG failed\n"); 17257 return (-1); 17258 } 17259 val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); 17260 if (val != 1) { 17261 BLOGE(sc, "PXP2 RD_INIT failed\n"); 17262 return (-1); 17263 } 17264 17265 BLOGD(sc, DBG_LOAD, "after pxp init\n"); 17266 17267 /* 17268 * Timer bug workaround for E2 only. We need to set the entire ILT to have 17269 * entries with value "0" and valid bit on. This needs to be done by the 17270 * first PF that is loaded in a path (i.e. common phase) 17271 */ 17272 if (!CHIP_IS_E1x(sc)) { 17273/* 17274 * In E2 there is a bug in the timers block that can cause function 6 / 7 17275 * (i.e. vnic3) to start even if it is marked as "scan-off". 17276 * This occurs when a different function (func2,3) is being marked 17277 * as "scan-off". Real-life scenario for example: if a driver is being 17278 * load-unloaded while func6,7 are down. This will cause the timer to access 17279 * the ilt, translate to a logical address and send a request to read/write. 17280 * Since the ilt for the function that is down is not valid, this will cause 17281 * a translation error which is unrecoverable. 17282 * The Workaround is intended to make sure that when this happens nothing 17283 * fatal will occur. The workaround: 17284 * 1. First PF driver which loads on a path will: 17285 * a. After taking the chip out of reset, by using pretend, 17286 * it will write "0" to the following registers of 17287 * the other vnics. 17288 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 17289 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 17290 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 17291 * And for itself it will write '1' to 17292 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 17293 * dmae-operations (writing to pram for example.) 17294 * note: can be done for only function 6,7 but cleaner this 17295 * way. 17296 * b. Write zero+valid to the entire ILT. 17297 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 17298 * VNIC3 (of that port). The range allocated will be the 17299 * entire ILT. This is needed to prevent ILT range error. 17300 * 2. Any PF driver load flow: 17301 * a. ILT update with the physical addresses of the allocated 17302 * logical pages. 17303 * b. Wait 20msec. - note that this timeout is needed to make 17304 * sure there are no requests in one of the PXP internal 17305 * queues with "old" ILT addresses. 17306 * c. PF enable in the PGLC. 17307 * d. Clear the was_error of the PF in the PGLC. (could have 17308 * occurred while driver was down) 17309 * e. PF enable in the CFC (WEAK + STRONG) 17310 * f. Timers scan enable 17311 * 3. PF driver unload flow: 17312 * a. Clear the Timers scan_en. 17313 * b. Polling for scan_on=0 for that PF. 17314 * c. Clear the PF enable bit in the PXP. 17315 * d. Clear the PF enable in the CFC (WEAK + STRONG) 17316 * e. Write zero+valid to all ILT entries (The valid bit must 17317 * stay set) 17318 * f. If this is VNIC 3 of a port then also init 17319 * first_timers_ilt_entry to zero and last_timers_ilt_entry 17320 * to the last enrty in the ILT. 17321 * 17322 * Notes: 17323 * Currently the PF error in the PGLC is non recoverable. 17324 * In the future the there will be a recovery routine for this error. 17325 * Currently attention is masked. 17326 * Having an MCP lock on the load/unload process does not guarantee that 17327 * there is no Timer disable during Func6/7 enable. This is because the 17328 * Timers scan is currently being cleared by the MCP on FLR. 17329 * Step 2.d can be done only for PF6/7 and the driver can also check if 17330 * there is error before clearing it. But the flow above is simpler and 17331 * more general. 17332 * All ILT entries are written by zero+valid and not just PF6/7 17333 * ILT entries since in the future the ILT entries allocation for 17334 * PF-s might be dynamic. 17335 */ 17336 struct ilt_client_info ilt_cli; 17337 struct ecore_ilt ilt; 17338 17339 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 17340 memset(&ilt, 0, sizeof(struct ecore_ilt)); 17341 17342 /* initialize dummy TM client */ 17343 ilt_cli.start = 0; 17344 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 17345 ilt_cli.client_num = ILT_CLIENT_TM; 17346 17347 /* 17348 * Step 1: set zeroes to all ilt page entries with valid bit on 17349 * Step 2: set the timers first/last ilt entry to point 17350 * to the entire range to prevent ILT range error for 3rd/4th 17351 * vnic (this code assumes existence of the vnic) 17352 * 17353 * both steps performed by call to ecore_ilt_client_init_op() 17354 * with dummy TM client 17355 * 17356 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 17357 * and his brother are split registers 17358 */ 17359 17360 bxe_pretend_func(sc, (SC_PATH(sc) + 6)); 17361 ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); 17362 bxe_pretend_func(sc, SC_ABS_FUNC(sc)); 17363 17364 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); 17365 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); 17366 REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 17367 } 17368 17369 REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); 17370 REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); 17371 17372 if (!CHIP_IS_E1x(sc)) { 17373 int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : 17374 (CHIP_REV_IS_FPGA(sc) ? 400 : 0); 17375 17376 ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); 17377 ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); 17378 17379 /* let the HW do it's magic... */ 17380 do { 17381 DELAY(200000); 17382 val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); 17383 } while (factor-- && (val != 1)); 17384 17385 if (val != 1) { 17386 BLOGE(sc, "ATC_INIT failed\n"); 17387 return (-1); 17388 } 17389 } 17390 17391 BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); 17392 17393 ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); 17394 17395 bxe_iov_init_dmae(sc); 17396 17397 /* clean the DMAE memory */ 17398 sc->dmae_ready = 1; 17399 ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); 17400 17401 ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); 17402 17403 ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); 17404 17405 ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); 17406 17407 ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); 17408 17409 bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); 17410 bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); 17411 bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); 17412 bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); 17413 17414 ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); 17415 17416 /* QM queues pointers table */ 17417 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); 17418 17419 /* soft reset pulse */ 17420 REG_WR(sc, QM_REG_SOFT_RESET, 1); 17421 REG_WR(sc, QM_REG_SOFT_RESET, 0); 17422 17423 if (CNIC_SUPPORT(sc)) 17424 ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); 17425 17426 ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); 17427 REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); 17428 if (!CHIP_REV_IS_SLOW(sc)) { 17429 /* enable hw interrupt from doorbell Q */ 17430 REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); 17431 } 17432 17433 ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); 17434 17435 ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); 17436 REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); 17437 17438 if (!CHIP_IS_E1(sc)) { 17439 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); 17440 } 17441 17442 if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { 17443 if (IS_MF_AFEX(sc)) { 17444 /* 17445 * configure that AFEX and VLAN headers must be 17446 * received in AFEX mode 17447 */ 17448 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); 17449 REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); 17450 REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 17451 REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 17452 REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); 17453 } else { 17454 /* 17455 * Bit-map indicating which L2 hdrs may appear 17456 * after the basic Ethernet header 17457 */ 17458 REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 17459 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17460 } 17461 } 17462 17463 ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); 17464 ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); 17465 ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); 17466 ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); 17467 17468 if (!CHIP_IS_E1x(sc)) { 17469 /* reset VFC memories */ 17470 REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17471 VFC_MEMORIES_RST_REG_CAM_RST | 17472 VFC_MEMORIES_RST_REG_RAM_RST); 17473 REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 17474 VFC_MEMORIES_RST_REG_CAM_RST | 17475 VFC_MEMORIES_RST_REG_RAM_RST); 17476 17477 DELAY(20000); 17478 } 17479 17480 ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); 17481 ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); 17482 ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); 17483 ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); 17484 17485 /* sync semi rtc */ 17486 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 17487 0x80000000); 17488 REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 17489 0x80000000); 17490 17491 ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); 17492 ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); 17493 ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); 17494 17495 if (!CHIP_IS_E1x(sc)) { 17496 if (IS_MF_AFEX(sc)) { 17497 /* 17498 * configure that AFEX and VLAN headers must be 17499 * sent in AFEX mode 17500 */ 17501 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); 17502 REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); 17503 REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 17504 REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 17505 REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); 17506 } else { 17507 REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 17508 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); 17509 } 17510 } 17511 17512 REG_WR(sc, SRC_REG_SOFT_RST, 1); 17513 17514 ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); 17515 17516 if (CNIC_SUPPORT(sc)) { 17517 REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); 17518 REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 17519 REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); 17520 REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); 17521 REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); 17522 REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 17523 REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); 17524 REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 17525 REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); 17526 REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); 17527 } 17528 REG_WR(sc, SRC_REG_SOFT_RST, 0); 17529 17530 if (sizeof(union cdu_context) != 1024) { 17531 /* we currently assume that a context is 1024 bytes */ 17532 BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", 17533 (long)sizeof(union cdu_context)); 17534 } 17535 17536 ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); 17537 val = (4 << 24) + (0 << 12) + 1024; 17538 REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); 17539 17540 ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); 17541 17542 REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); 17543 /* enable context validation interrupt from CFC */ 17544 REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); 17545 17546 /* set the thresholds to prevent CFC/CDU race */ 17547 REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); 17548 ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); 17549 17550 if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { 17551 REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); 17552 } 17553 17554 ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); 17555 ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); 17556 17557 /* Reset PCIE errors for debug */ 17558 REG_WR(sc, 0x2814, 0xffffffff); 17559 REG_WR(sc, 0x3820, 0xffffffff); 17560 17561 if (!CHIP_IS_E1x(sc)) { 17562 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 17563 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 17564 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 17565 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 17566 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 17567 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 17568 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 17569 REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 17570 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 17571 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 17572 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 17573 } 17574 17575 ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); 17576 17577 if (!CHIP_IS_E1(sc)) { 17578 /* in E3 this done in per-port section */ 17579 if (!CHIP_IS_E3(sc)) 17580 REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17581 } 17582 17583 if (CHIP_IS_E1H(sc)) { 17584 /* not applicable for E2 (and above ...) */ 17585 REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); 17586 } 17587 17588 if (CHIP_REV_IS_SLOW(sc)) { 17589 DELAY(200000); 17590 } 17591 17592 /* finish CFC init */ 17593 val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); 17594 if (val != 1) { 17595 BLOGE(sc, "CFC LL_INIT failed\n"); 17596 return (-1); 17597 } 17598 val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); 17599 if (val != 1) { 17600 BLOGE(sc, "CFC AC_INIT failed\n"); 17601 return (-1); 17602 } 17603 val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 17604 if (val != 1) { 17605 BLOGE(sc, "CFC CAM_INIT failed\n"); 17606 return (-1); 17607 } 17608 REG_WR(sc, CFC_REG_DEBUG0, 0); 17609 17610 if (CHIP_IS_E1(sc)) { 17611 /* read NIG statistic to see if this is our first up since powerup */ 17612 bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); 17613 val = *BXE_SP(sc, wb_data[0]); 17614 17615 /* do internal memory self test */ 17616 if ((val == 0) && bxe_int_mem_test(sc)) { 17617 BLOGE(sc, "internal mem self test failed\n"); 17618 return (-1); 17619 } 17620 } 17621 17622 bxe_setup_fan_failure_detection(sc); 17623 17624 /* clear PXP2 attentions */ 17625 REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); 17626 17627 bxe_enable_blocks_attention(sc); 17628 17629 if (!CHIP_REV_IS_SLOW(sc)) { 17630 ecore_enable_blocks_parity(sc); 17631 } 17632 17633 if (!BXE_NOMCP(sc)) { 17634 if (CHIP_IS_E1x(sc)) { 17635 bxe_common_init_phy(sc); 17636 } 17637 } 17638 17639 return (0); 17640} 17641 17642/** 17643 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. 17644 * 17645 * @sc: driver handle 17646 */ 17647static int 17648bxe_init_hw_common_chip(struct bxe_softc *sc) 17649{ 17650 int rc = bxe_init_hw_common(sc); 17651 17652 if (rc) { 17653 return (rc); 17654 } 17655 17656 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 17657 if (!BXE_NOMCP(sc)) { 17658 bxe_common_init_phy(sc); 17659 } 17660 17661 return (0); 17662} 17663 17664static int 17665bxe_init_hw_port(struct bxe_softc *sc) 17666{ 17667 int port = SC_PORT(sc); 17668 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 17669 uint32_t low, high; 17670 uint32_t val; 17671 17672 BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); 17673 17674 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 17675 17676 ecore_init_block(sc, BLOCK_MISC, init_phase); 17677 ecore_init_block(sc, BLOCK_PXP, init_phase); 17678 ecore_init_block(sc, BLOCK_PXP2, init_phase); 17679 17680 /* 17681 * Timers bug workaround: disables the pf_master bit in pglue at 17682 * common phase, we need to enable it here before any dmae access are 17683 * attempted. Therefore we manually added the enable-master to the 17684 * port phase (it also happens in the function phase) 17685 */ 17686 if (!CHIP_IS_E1x(sc)) { 17687 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 17688 } 17689 17690 ecore_init_block(sc, BLOCK_ATC, init_phase); 17691 ecore_init_block(sc, BLOCK_DMAE, init_phase); 17692 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 17693 ecore_init_block(sc, BLOCK_QM, init_phase); 17694 17695 ecore_init_block(sc, BLOCK_TCM, init_phase); 17696 ecore_init_block(sc, BLOCK_UCM, init_phase); 17697 ecore_init_block(sc, BLOCK_CCM, init_phase); 17698 ecore_init_block(sc, BLOCK_XCM, init_phase); 17699 17700 /* QM cid (connection) count */ 17701 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); 17702 17703 if (CNIC_SUPPORT(sc)) { 17704 ecore_init_block(sc, BLOCK_TM, init_phase); 17705 REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); 17706 REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 17707 } 17708 17709 ecore_init_block(sc, BLOCK_DORQ, init_phase); 17710 17711 ecore_init_block(sc, BLOCK_BRB1, init_phase); 17712 17713 if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { 17714 if (IS_MF(sc)) { 17715 low = (BXE_ONE_PORT(sc) ? 160 : 246); 17716 } else if (sc->mtu > 4096) { 17717 if (BXE_ONE_PORT(sc)) { 17718 low = 160; 17719 } else { 17720 val = sc->mtu; 17721 /* (24*1024 + val*4)/256 */ 17722 low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); 17723 } 17724 } else { 17725 low = (BXE_ONE_PORT(sc) ? 80 : 160); 17726 } 17727 high = (low + 56); /* 14*1024/256 */ 17728 REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 17729 REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 17730 } 17731 17732 if (CHIP_IS_MODE_4_PORT(sc)) { 17733 REG_WR(sc, SC_PORT(sc) ? 17734 BRB1_REG_MAC_GUARANTIED_1 : 17735 BRB1_REG_MAC_GUARANTIED_0, 40); 17736 } 17737 17738 ecore_init_block(sc, BLOCK_PRS, init_phase); 17739 if (CHIP_IS_E3B0(sc)) { 17740 if (IS_MF_AFEX(sc)) { 17741 /* configure headers for AFEX mode */ 17742 REG_WR(sc, SC_PORT(sc) ? 17743 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17744 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 17745 REG_WR(sc, SC_PORT(sc) ? 17746 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 17747 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 17748 REG_WR(sc, SC_PORT(sc) ? 17749 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 17750 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 17751 } else { 17752 /* Ovlan exists only if we are in multi-function + 17753 * switch-dependent mode, in switch-independent there 17754 * is no ovlan headers 17755 */ 17756 REG_WR(sc, SC_PORT(sc) ? 17757 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 17758 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 17759 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); 17760 } 17761 } 17762 17763 ecore_init_block(sc, BLOCK_TSDM, init_phase); 17764 ecore_init_block(sc, BLOCK_CSDM, init_phase); 17765 ecore_init_block(sc, BLOCK_USDM, init_phase); 17766 ecore_init_block(sc, BLOCK_XSDM, init_phase); 17767 17768 ecore_init_block(sc, BLOCK_TSEM, init_phase); 17769 ecore_init_block(sc, BLOCK_USEM, init_phase); 17770 ecore_init_block(sc, BLOCK_CSEM, init_phase); 17771 ecore_init_block(sc, BLOCK_XSEM, init_phase); 17772 17773 ecore_init_block(sc, BLOCK_UPB, init_phase); 17774 ecore_init_block(sc, BLOCK_XPB, init_phase); 17775 17776 ecore_init_block(sc, BLOCK_PBF, init_phase); 17777 17778 if (CHIP_IS_E1x(sc)) { 17779 /* configure PBF to work without PAUSE mtu 9000 */ 17780 REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 17781 17782 /* update threshold */ 17783 REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 17784 /* update init credit */ 17785 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 17786 17787 /* probe changes */ 17788 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); 17789 DELAY(50); 17790 REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); 17791 } 17792 17793 if (CNIC_SUPPORT(sc)) { 17794 ecore_init_block(sc, BLOCK_SRC, init_phase); 17795 } 17796 17797 ecore_init_block(sc, BLOCK_CDU, init_phase); 17798 ecore_init_block(sc, BLOCK_CFC, init_phase); 17799 17800 if (CHIP_IS_E1(sc)) { 17801 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 17802 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 17803 } 17804 ecore_init_block(sc, BLOCK_HC, init_phase); 17805 17806 ecore_init_block(sc, BLOCK_IGU, init_phase); 17807 17808 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 17809 /* init aeu_mask_attn_func_0/1: 17810 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 17811 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 17812 * bits 4-7 are used for "per vn group attention" */ 17813 val = IS_MF(sc) ? 0xF7 : 0x7; 17814 /* Enable DCBX attention for all but E1 */ 17815 val |= CHIP_IS_E1(sc) ? 0 : 0x10; 17816 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 17817 17818 ecore_init_block(sc, BLOCK_NIG, init_phase); 17819 17820 if (!CHIP_IS_E1x(sc)) { 17821 /* Bit-map indicating which L2 hdrs may appear after the 17822 * basic Ethernet header 17823 */ 17824 if (IS_MF_AFEX(sc)) { 17825 REG_WR(sc, SC_PORT(sc) ? 17826 NIG_REG_P1_HDRS_AFTER_BASIC : 17827 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 17828 } else { 17829 REG_WR(sc, SC_PORT(sc) ? 17830 NIG_REG_P1_HDRS_AFTER_BASIC : 17831 NIG_REG_P0_HDRS_AFTER_BASIC, 17832 IS_MF_SD(sc) ? 7 : 6); 17833 } 17834 17835 if (CHIP_IS_E3(sc)) { 17836 REG_WR(sc, SC_PORT(sc) ? 17837 NIG_REG_LLH1_MF_MODE : 17838 NIG_REG_LLH_MF_MODE, IS_MF(sc)); 17839 } 17840 } 17841 if (!CHIP_IS_E3(sc)) { 17842 REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 17843 } 17844 17845 if (!CHIP_IS_E1(sc)) { 17846 /* 0x2 disable mf_ov, 0x1 enable */ 17847 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 17848 (IS_MF_SD(sc) ? 0x1 : 0x2)); 17849 17850 if (!CHIP_IS_E1x(sc)) { 17851 val = 0; 17852 switch (sc->devinfo.mf_info.mf_mode) { 17853 case MULTI_FUNCTION_SD: 17854 val = 1; 17855 break; 17856 case MULTI_FUNCTION_SI: 17857 case MULTI_FUNCTION_AFEX: 17858 val = 2; 17859 break; 17860 } 17861 17862 REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : 17863 NIG_REG_LLH0_CLS_TYPE), val); 17864 } 17865 REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 17866 REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 17867 REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 17868 } 17869 17870 /* If SPIO5 is set to generate interrupts, enable it for this port */ 17871 val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); 17872 if (val & MISC_SPIO_SPIO5) { 17873 uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 17874 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 17875 val = REG_RD(sc, reg_addr); 17876 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 17877 REG_WR(sc, reg_addr, val); 17878 } 17879 17880 return (0); 17881} 17882 17883static uint32_t 17884bxe_flr_clnup_reg_poll(struct bxe_softc *sc, 17885 uint32_t reg, 17886 uint32_t expected, 17887 uint32_t poll_count) 17888{ 17889 uint32_t cur_cnt = poll_count; 17890 uint32_t val; 17891 17892 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { 17893 DELAY(FLR_WAIT_INTERVAL); 17894 } 17895 17896 return (val); 17897} 17898 17899static int 17900bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, 17901 uint32_t reg, 17902 char *msg, 17903 uint32_t poll_cnt) 17904{ 17905 uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); 17906 17907 if (val != 0) { 17908 BLOGE(sc, "%s usage count=%d\n", msg, val); 17909 return (1); 17910 } 17911 17912 return (0); 17913} 17914 17915/* Common routines with VF FLR cleanup */ 17916static uint32_t 17917bxe_flr_clnup_poll_count(struct bxe_softc *sc) 17918{ 17919 /* adjust polling timeout */ 17920 if (CHIP_REV_IS_EMUL(sc)) { 17921 return (FLR_POLL_CNT * 2000); 17922 } 17923 17924 if (CHIP_REV_IS_FPGA(sc)) { 17925 return (FLR_POLL_CNT * 120); 17926 } 17927 17928 return (FLR_POLL_CNT); 17929} 17930 17931static int 17932bxe_poll_hw_usage_counters(struct bxe_softc *sc, 17933 uint32_t poll_cnt) 17934{ 17935 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 17936 if (bxe_flr_clnup_poll_hw_counter(sc, 17937 CFC_REG_NUM_LCIDS_INSIDE_PF, 17938 "CFC PF usage counter timed out", 17939 poll_cnt)) { 17940 return (1); 17941 } 17942 17943 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 17944 if (bxe_flr_clnup_poll_hw_counter(sc, 17945 DORQ_REG_PF_USAGE_CNT, 17946 "DQ PF usage counter timed out", 17947 poll_cnt)) { 17948 return (1); 17949 } 17950 17951 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 17952 if (bxe_flr_clnup_poll_hw_counter(sc, 17953 QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), 17954 "QM PF usage counter timed out", 17955 poll_cnt)) { 17956 return (1); 17957 } 17958 17959 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 17960 if (bxe_flr_clnup_poll_hw_counter(sc, 17961 TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), 17962 "Timers VNIC usage counter timed out", 17963 poll_cnt)) { 17964 return (1); 17965 } 17966 17967 if (bxe_flr_clnup_poll_hw_counter(sc, 17968 TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), 17969 "Timers NUM_SCANS usage counter timed out", 17970 poll_cnt)) { 17971 return (1); 17972 } 17973 17974 /* Wait DMAE PF usage counter to zero */ 17975 if (bxe_flr_clnup_poll_hw_counter(sc, 17976 dmae_reg_go_c[INIT_DMAE_C(sc)], 17977 "DMAE dommand register timed out", 17978 poll_cnt)) { 17979 return (1); 17980 } 17981 17982 return (0); 17983} 17984 17985#define OP_GEN_PARAM(param) \ 17986 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 17987#define OP_GEN_TYPE(type) \ 17988 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 17989#define OP_GEN_AGG_VECT(index) \ 17990 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 17991 17992static int 17993bxe_send_final_clnup(struct bxe_softc *sc, 17994 uint8_t clnup_func, 17995 uint32_t poll_cnt) 17996{ 17997 uint32_t op_gen_command = 0; 17998 uint32_t comp_addr = (BAR_CSTRORM_INTMEM + 17999 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); 18000 int ret = 0; 18001 18002 if (REG_RD(sc, comp_addr)) { 18003 BLOGE(sc, "Cleanup complete was not 0 before sending\n"); 18004 return (1); 18005 } 18006 18007 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 18008 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 18009 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 18010 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 18011 18012 BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); 18013 REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); 18014 18015 if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { 18016 BLOGE(sc, "FW final cleanup did not succeed\n"); 18017 BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", 18018 (REG_RD(sc, comp_addr))); 18019 bxe_panic(sc, ("FLR cleanup failed\n")); 18020 return (1); 18021 } 18022 18023 /* Zero completion for nxt FLR */ 18024 REG_WR(sc, comp_addr, 0); 18025 18026 return (ret); 18027} 18028 18029static void 18030bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, 18031 struct pbf_pN_buf_regs *regs, 18032 uint32_t poll_count) 18033{ 18034 uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; 18035 uint32_t cur_cnt = poll_count; 18036 18037 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); 18038 crd = crd_start = REG_RD(sc, regs->crd); 18039 init_crd = REG_RD(sc, regs->init_crd); 18040 18041 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 18042 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); 18043 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 18044 18045 while ((crd != init_crd) && 18046 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < 18047 (init_crd - crd_start))) { 18048 if (cur_cnt--) { 18049 DELAY(FLR_WAIT_INTERVAL); 18050 crd = REG_RD(sc, regs->crd); 18051 crd_freed = REG_RD(sc, regs->crd_freed); 18052 } else { 18053 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); 18054 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); 18055 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); 18056 break; 18057 } 18058 } 18059 18060 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", 18061 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18062} 18063 18064static void 18065bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, 18066 struct pbf_pN_cmd_regs *regs, 18067 uint32_t poll_count) 18068{ 18069 uint32_t occup, to_free, freed, freed_start; 18070 uint32_t cur_cnt = poll_count; 18071 18072 occup = to_free = REG_RD(sc, regs->lines_occup); 18073 freed = freed_start = REG_RD(sc, regs->lines_freed); 18074 18075 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18076 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18077 18078 while (occup && 18079 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { 18080 if (cur_cnt--) { 18081 DELAY(FLR_WAIT_INTERVAL); 18082 occup = REG_RD(sc, regs->lines_occup); 18083 freed = REG_RD(sc, regs->lines_freed); 18084 } else { 18085 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); 18086 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 18087 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 18088 break; 18089 } 18090 } 18091 18092 BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", 18093 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 18094} 18095 18096static void 18097bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) 18098{ 18099 struct pbf_pN_cmd_regs cmd_regs[] = { 18100 {0, (CHIP_IS_E3B0(sc)) ? 18101 PBF_REG_TQ_OCCUPANCY_Q0 : 18102 PBF_REG_P0_TQ_OCCUPANCY, 18103 (CHIP_IS_E3B0(sc)) ? 18104 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 18105 PBF_REG_P0_TQ_LINES_FREED_CNT}, 18106 {1, (CHIP_IS_E3B0(sc)) ? 18107 PBF_REG_TQ_OCCUPANCY_Q1 : 18108 PBF_REG_P1_TQ_OCCUPANCY, 18109 (CHIP_IS_E3B0(sc)) ? 18110 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 18111 PBF_REG_P1_TQ_LINES_FREED_CNT}, 18112 {4, (CHIP_IS_E3B0(sc)) ? 18113 PBF_REG_TQ_OCCUPANCY_LB_Q : 18114 PBF_REG_P4_TQ_OCCUPANCY, 18115 (CHIP_IS_E3B0(sc)) ? 18116 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 18117 PBF_REG_P4_TQ_LINES_FREED_CNT} 18118 }; 18119 18120 struct pbf_pN_buf_regs buf_regs[] = { 18121 {0, (CHIP_IS_E3B0(sc)) ? 18122 PBF_REG_INIT_CRD_Q0 : 18123 PBF_REG_P0_INIT_CRD , 18124 (CHIP_IS_E3B0(sc)) ? 18125 PBF_REG_CREDIT_Q0 : 18126 PBF_REG_P0_CREDIT, 18127 (CHIP_IS_E3B0(sc)) ? 18128 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 18129 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 18130 {1, (CHIP_IS_E3B0(sc)) ? 18131 PBF_REG_INIT_CRD_Q1 : 18132 PBF_REG_P1_INIT_CRD, 18133 (CHIP_IS_E3B0(sc)) ? 18134 PBF_REG_CREDIT_Q1 : 18135 PBF_REG_P1_CREDIT, 18136 (CHIP_IS_E3B0(sc)) ? 18137 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 18138 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 18139 {4, (CHIP_IS_E3B0(sc)) ? 18140 PBF_REG_INIT_CRD_LB_Q : 18141 PBF_REG_P4_INIT_CRD, 18142 (CHIP_IS_E3B0(sc)) ? 18143 PBF_REG_CREDIT_LB_Q : 18144 PBF_REG_P4_CREDIT, 18145 (CHIP_IS_E3B0(sc)) ? 18146 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 18147 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 18148 }; 18149 18150 int i; 18151 18152 /* Verify the command queues are flushed P0, P1, P4 */ 18153 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { 18154 bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); 18155 } 18156 18157 /* Verify the transmission buffers are flushed P0, P1, P4 */ 18158 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { 18159 bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); 18160 } 18161} 18162 18163static void 18164bxe_hw_enable_status(struct bxe_softc *sc) 18165{ 18166 uint32_t val; 18167 18168 val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); 18169 BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 18170 18171 val = REG_RD(sc, PBF_REG_DISABLE_PF); 18172 BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); 18173 18174 val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); 18175 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 18176 18177 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); 18178 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 18179 18180 val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 18181 BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 18182 18183 val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 18184 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 18185 18186 val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 18187 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 18188 18189 val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 18190 BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); 18191} 18192 18193static int 18194bxe_pf_flr_clnup(struct bxe_softc *sc) 18195{ 18196 uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); 18197 18198 BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); 18199 18200 /* Re-enable PF target read access */ 18201 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 18202 18203 /* Poll HW usage counters */ 18204 BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); 18205 if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { 18206 return (-1); 18207 } 18208 18209 /* Zero the igu 'trailing edge' and 'leading edge' */ 18210 18211 /* Send the FW cleanup command */ 18212 if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { 18213 return (-1); 18214 } 18215 18216 /* ATC cleanup */ 18217 18218 /* Verify TX hw is flushed */ 18219 bxe_tx_hw_flushed(sc, poll_cnt); 18220 18221 /* Wait 100ms (not adjusted according to platform) */ 18222 DELAY(100000); 18223 18224 /* Verify no pending pci transactions */ 18225 if (bxe_is_pcie_pending(sc)) { 18226 BLOGE(sc, "PCIE Transactions still pending\n"); 18227 } 18228 18229 /* Debug */ 18230 bxe_hw_enable_status(sc); 18231 18232 /* 18233 * Master enable - Due to WB DMAE writes performed before this 18234 * register is re-initialized as part of the regular function init 18235 */ 18236 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18237 18238 return (0); 18239} 18240 18241#if 0 18242static void 18243bxe_init_searcher(struct bxe_softc *sc) 18244{ 18245 int port = SC_PORT(sc); 18246 ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM); 18247 /* T1 hash bits value determines the T1 number of entries */ 18248 REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 18249} 18250#endif 18251 18252static int 18253bxe_init_hw_func(struct bxe_softc *sc) 18254{ 18255 int port = SC_PORT(sc); 18256 int func = SC_FUNC(sc); 18257 int init_phase = PHASE_PF0 + func; 18258 struct ecore_ilt *ilt = sc->ilt; 18259 uint16_t cdu_ilt_start; 18260 uint32_t addr, val; 18261 uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; 18262 int i, main_mem_width, rc; 18263 18264 BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); 18265 18266 /* FLR cleanup */ 18267 if (!CHIP_IS_E1x(sc)) { 18268 rc = bxe_pf_flr_clnup(sc); 18269 if (rc) { 18270 BLOGE(sc, "FLR cleanup failed!\n"); 18271 // XXX bxe_fw_dump(sc); 18272 // XXX bxe_idle_chk(sc); 18273 return (rc); 18274 } 18275 } 18276 18277 /* set MSI reconfigure capability */ 18278 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18279 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 18280 val = REG_RD(sc, addr); 18281 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 18282 REG_WR(sc, addr, val); 18283 } 18284 18285 ecore_init_block(sc, BLOCK_PXP, init_phase); 18286 ecore_init_block(sc, BLOCK_PXP2, init_phase); 18287 18288 ilt = sc->ilt; 18289 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18290 18291#if 0 18292 if (IS_SRIOV(sc)) { 18293 cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS; 18294 } 18295 cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start); 18296 18297#if (BXE_FIRST_VF_CID > 0) 18298 /* 18299 * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes 18300 * those of the VFs, so start line should be reset 18301 */ 18302 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 18303#endif 18304#endif 18305 18306 for (i = 0; i < L2_ILT_LINES(sc); i++) { 18307 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; 18308 ilt->lines[cdu_ilt_start + i].page_mapping = 18309 sc->context[i].vcxt_dma.paddr; 18310 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; 18311 } 18312 ecore_ilt_init_op(sc, INITOP_SET); 18313 18314#if 0 18315 if (!CONFIGURE_NIC_MODE(sc)) { 18316 bxe_init_searcher(sc); 18317 REG_WR(sc, PRS_REG_NIC_MODE, 0); 18318 BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n"); 18319 } else 18320#endif 18321 { 18322 /* Set NIC mode */ 18323 REG_WR(sc, PRS_REG_NIC_MODE, 1); 18324 BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); 18325 } 18326 18327 if (!CHIP_IS_E1x(sc)) { 18328 uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; 18329 18330 /* Turn on a single ISR mode in IGU if driver is going to use 18331 * INT#x or MSI 18332 */ 18333 if (sc->interrupt_mode != INTR_MODE_MSIX) { 18334 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 18335 } 18336 18337 /* 18338 * Timers workaround bug: function init part. 18339 * Need to wait 20msec after initializing ILT, 18340 * needed to make sure there are no requests in 18341 * one of the PXP internal queues with "old" ILT addresses 18342 */ 18343 DELAY(20000); 18344 18345 /* 18346 * Master enable - Due to WB DMAE writes performed before this 18347 * register is re-initialized as part of the regular function 18348 * init 18349 */ 18350 REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 18351 /* Enable the function in IGU */ 18352 REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); 18353 } 18354 18355 sc->dmae_ready = 1; 18356 18357 ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); 18358 18359 if (!CHIP_IS_E1x(sc)) 18360 REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 18361 18362 ecore_init_block(sc, BLOCK_ATC, init_phase); 18363 ecore_init_block(sc, BLOCK_DMAE, init_phase); 18364 ecore_init_block(sc, BLOCK_NIG, init_phase); 18365 ecore_init_block(sc, BLOCK_SRC, init_phase); 18366 ecore_init_block(sc, BLOCK_MISC, init_phase); 18367 ecore_init_block(sc, BLOCK_TCM, init_phase); 18368 ecore_init_block(sc, BLOCK_UCM, init_phase); 18369 ecore_init_block(sc, BLOCK_CCM, init_phase); 18370 ecore_init_block(sc, BLOCK_XCM, init_phase); 18371 ecore_init_block(sc, BLOCK_TSEM, init_phase); 18372 ecore_init_block(sc, BLOCK_USEM, init_phase); 18373 ecore_init_block(sc, BLOCK_CSEM, init_phase); 18374 ecore_init_block(sc, BLOCK_XSEM, init_phase); 18375 18376 if (!CHIP_IS_E1x(sc)) 18377 REG_WR(sc, QM_REG_PF_EN, 1); 18378 18379 if (!CHIP_IS_E1x(sc)) { 18380 REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18381 REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18382 REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18383 REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); 18384 } 18385 ecore_init_block(sc, BLOCK_QM, init_phase); 18386 18387 ecore_init_block(sc, BLOCK_TM, init_phase); 18388 ecore_init_block(sc, BLOCK_DORQ, init_phase); 18389 18390 bxe_iov_init_dq(sc); 18391 18392 ecore_init_block(sc, BLOCK_BRB1, init_phase); 18393 ecore_init_block(sc, BLOCK_PRS, init_phase); 18394 ecore_init_block(sc, BLOCK_TSDM, init_phase); 18395 ecore_init_block(sc, BLOCK_CSDM, init_phase); 18396 ecore_init_block(sc, BLOCK_USDM, init_phase); 18397 ecore_init_block(sc, BLOCK_XSDM, init_phase); 18398 ecore_init_block(sc, BLOCK_UPB, init_phase); 18399 ecore_init_block(sc, BLOCK_XPB, init_phase); 18400 ecore_init_block(sc, BLOCK_PBF, init_phase); 18401 if (!CHIP_IS_E1x(sc)) 18402 REG_WR(sc, PBF_REG_DISABLE_PF, 0); 18403 18404 ecore_init_block(sc, BLOCK_CDU, init_phase); 18405 18406 ecore_init_block(sc, BLOCK_CFC, init_phase); 18407 18408 if (!CHIP_IS_E1x(sc)) 18409 REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); 18410 18411 if (IS_MF(sc)) { 18412 REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); 18413 REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); 18414 } 18415 18416 ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); 18417 18418 /* HC init per function */ 18419 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18420 if (CHIP_IS_E1H(sc)) { 18421 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18422 18423 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18424 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18425 } 18426 ecore_init_block(sc, BLOCK_HC, init_phase); 18427 18428 } else { 18429 int num_segs, sb_idx, prod_offset; 18430 18431 REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 18432 18433 if (!CHIP_IS_E1x(sc)) { 18434 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18435 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18436 } 18437 18438 ecore_init_block(sc, BLOCK_IGU, init_phase); 18439 18440 if (!CHIP_IS_E1x(sc)) { 18441 int dsb_idx = 0; 18442 /** 18443 * Producer memory: 18444 * E2 mode: address 0-135 match to the mapping memory; 18445 * 136 - PF0 default prod; 137 - PF1 default prod; 18446 * 138 - PF2 default prod; 139 - PF3 default prod; 18447 * 140 - PF0 attn prod; 141 - PF1 attn prod; 18448 * 142 - PF2 attn prod; 143 - PF3 attn prod; 18449 * 144-147 reserved. 18450 * 18451 * E1.5 mode - In backward compatible mode; 18452 * for non default SB; each even line in the memory 18453 * holds the U producer and each odd line hold 18454 * the C producer. The first 128 producers are for 18455 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 18456 * producers are for the DSB for each PF. 18457 * Each PF has five segments: (the order inside each 18458 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 18459 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 18460 * 144-147 attn prods; 18461 */ 18462 /* non-default-status-blocks */ 18463 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18464 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 18465 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { 18466 prod_offset = (sc->igu_base_sb + sb_idx) * 18467 num_segs; 18468 18469 for (i = 0; i < num_segs; i++) { 18470 addr = IGU_REG_PROD_CONS_MEMORY + 18471 (prod_offset + i) * 4; 18472 REG_WR(sc, addr, 0); 18473 } 18474 /* send consumer update with value 0 */ 18475 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, 18476 USTORM_ID, 0, IGU_INT_NOP, 1); 18477 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); 18478 } 18479 18480 /* default-status-blocks */ 18481 num_segs = CHIP_INT_MODE_IS_BC(sc) ? 18482 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 18483 18484 if (CHIP_IS_MODE_4_PORT(sc)) 18485 dsb_idx = SC_FUNC(sc); 18486 else 18487 dsb_idx = SC_VN(sc); 18488 18489 prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? 18490 IGU_BC_BASE_DSB_PROD + dsb_idx : 18491 IGU_NORM_BASE_DSB_PROD + dsb_idx); 18492 18493 /* 18494 * igu prods come in chunks of E1HVN_MAX (4) - 18495 * does not matters what is the current chip mode 18496 */ 18497 for (i = 0; i < (num_segs * E1HVN_MAX); 18498 i += E1HVN_MAX) { 18499 addr = IGU_REG_PROD_CONS_MEMORY + 18500 (prod_offset + i)*4; 18501 REG_WR(sc, addr, 0); 18502 } 18503 /* send consumer update with 0 */ 18504 if (CHIP_INT_MODE_IS_BC(sc)) { 18505 bxe_ack_sb(sc, sc->igu_dsb_id, 18506 USTORM_ID, 0, IGU_INT_NOP, 1); 18507 bxe_ack_sb(sc, sc->igu_dsb_id, 18508 CSTORM_ID, 0, IGU_INT_NOP, 1); 18509 bxe_ack_sb(sc, sc->igu_dsb_id, 18510 XSTORM_ID, 0, IGU_INT_NOP, 1); 18511 bxe_ack_sb(sc, sc->igu_dsb_id, 18512 TSTORM_ID, 0, IGU_INT_NOP, 1); 18513 bxe_ack_sb(sc, sc->igu_dsb_id, 18514 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18515 } else { 18516 bxe_ack_sb(sc, sc->igu_dsb_id, 18517 USTORM_ID, 0, IGU_INT_NOP, 1); 18518 bxe_ack_sb(sc, sc->igu_dsb_id, 18519 ATTENTION_ID, 0, IGU_INT_NOP, 1); 18520 } 18521 bxe_igu_clear_sb(sc, sc->igu_dsb_id); 18522 18523 /* !!! these should become driver const once 18524 rf-tool supports split-68 const */ 18525 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 18526 REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 18527 REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); 18528 REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); 18529 REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); 18530 REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); 18531 } 18532 } 18533 18534 /* Reset PCIE errors for debug */ 18535 REG_WR(sc, 0x2114, 0xffffffff); 18536 REG_WR(sc, 0x2120, 0xffffffff); 18537 18538 if (CHIP_IS_E1x(sc)) { 18539 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 18540 main_mem_base = HC_REG_MAIN_MEMORY + 18541 SC_PORT(sc) * (main_mem_size * 4); 18542 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 18543 main_mem_width = 8; 18544 18545 val = REG_RD(sc, main_mem_prty_clr); 18546 if (val) { 18547 BLOGD(sc, DBG_LOAD, 18548 "Parity errors in HC block during function init (0x%x)!\n", 18549 val); 18550 } 18551 18552 /* Clear "false" parity errors in MSI-X table */ 18553 for (i = main_mem_base; 18554 i < main_mem_base + main_mem_size * 4; 18555 i += main_mem_width) { 18556 bxe_read_dmae(sc, i, main_mem_width / 4); 18557 bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), 18558 i, main_mem_width / 4); 18559 } 18560 /* Clear HC parity attention */ 18561 REG_RD(sc, main_mem_prty_clr); 18562 } 18563 18564#if 1 18565 /* Enable STORMs SP logging */ 18566 REG_WR8(sc, BAR_USTRORM_INTMEM + 18567 USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18568 REG_WR8(sc, BAR_TSTRORM_INTMEM + 18569 TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18570 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18571 CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18572 REG_WR8(sc, BAR_XSTRORM_INTMEM + 18573 XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); 18574#endif 18575 18576 elink_phy_probe(&sc->link_params); 18577 18578 return (0); 18579} 18580 18581static void 18582bxe_link_reset(struct bxe_softc *sc) 18583{ 18584 if (!BXE_NOMCP(sc)) { 18585 BXE_PHY_LOCK(sc); 18586 elink_lfa_reset(&sc->link_params, &sc->link_vars); 18587 BXE_PHY_UNLOCK(sc); 18588 } else { 18589 if (!CHIP_REV_IS_SLOW(sc)) { 18590 BLOGW(sc, "Bootcode is missing - cannot reset link\n"); 18591 } 18592 } 18593} 18594 18595static void 18596bxe_reset_port(struct bxe_softc *sc) 18597{ 18598 int port = SC_PORT(sc); 18599 uint32_t val; 18600 18601 /* reset physical Link */ 18602 bxe_link_reset(sc); 18603 18604 REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 18605 18606 /* Do not rcv packets to BRB */ 18607 REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 18608 /* Do not direct rcv packets that are not for MCP to the BRB */ 18609 REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 18610 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 18611 18612 /* Configure AEU */ 18613 REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 18614 18615 DELAY(100000); 18616 18617 /* Check for BRB port occupancy */ 18618 val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 18619 if (val) { 18620 BLOGD(sc, DBG_LOAD, 18621 "BRB1 is not empty, %d blocks are occupied\n", val); 18622 } 18623 18624 /* TODO: Close Doorbell port? */ 18625} 18626 18627static void 18628bxe_ilt_wr(struct bxe_softc *sc, 18629 uint32_t index, 18630 bus_addr_t addr) 18631{ 18632 int reg; 18633 uint32_t wb_write[2]; 18634 18635 if (CHIP_IS_E1(sc)) { 18636 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 18637 } else { 18638 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 18639 } 18640 18641 wb_write[0] = ONCHIP_ADDR1(addr); 18642 wb_write[1] = ONCHIP_ADDR2(addr); 18643 REG_WR_DMAE(sc, reg, wb_write, 2); 18644} 18645 18646static void 18647bxe_clear_func_ilt(struct bxe_softc *sc, 18648 uint32_t func) 18649{ 18650 uint32_t i, base = FUNC_ILT_BASE(func); 18651 for (i = base; i < base + ILT_PER_FUNC; i++) { 18652 bxe_ilt_wr(sc, i, 0); 18653 } 18654} 18655 18656static void 18657bxe_reset_func(struct bxe_softc *sc) 18658{ 18659 struct bxe_fastpath *fp; 18660 int port = SC_PORT(sc); 18661 int func = SC_FUNC(sc); 18662 int i; 18663 18664 /* Disable the function in the FW */ 18665 REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 18666 REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 18667 REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 18668 REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 18669 18670 /* FP SBs */ 18671 FOR_EACH_ETH_QUEUE(sc, i) { 18672 fp = &sc->fp[i]; 18673 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18674 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 18675 SB_DISABLED); 18676 } 18677 18678#if 0 18679 if (CNIC_LOADED(sc)) { 18680 /* CNIC SB */ 18681 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18682 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 18683 (bxe_cnic_fw_sb_id(sc)), SB_DISABLED); 18684 } 18685#endif 18686 18687 /* SP SB */ 18688 REG_WR8(sc, BAR_CSTRORM_INTMEM + 18689 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 18690 SB_DISABLED); 18691 18692 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { 18693 REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); 18694 } 18695 18696 /* Configure IGU */ 18697 if (sc->devinfo.int_block == INT_BLOCK_HC) { 18698 REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); 18699 REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); 18700 } else { 18701 REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); 18702 REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); 18703 } 18704 18705 if (CNIC_LOADED(sc)) { 18706 /* Disable Timer scan */ 18707 REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 18708 /* 18709 * Wait for at least 10ms and up to 2 second for the timers 18710 * scan to complete 18711 */ 18712 for (i = 0; i < 200; i++) { 18713 DELAY(10000); 18714 if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) 18715 break; 18716 } 18717 } 18718 18719 /* Clear ILT */ 18720 bxe_clear_func_ilt(sc, func); 18721 18722 /* 18723 * Timers workaround bug for E2: if this is vnic-3, 18724 * we need to set the entire ilt range for this timers. 18725 */ 18726 if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { 18727 struct ilt_client_info ilt_cli; 18728 /* use dummy TM client */ 18729 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 18730 ilt_cli.start = 0; 18731 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 18732 ilt_cli.client_num = ILT_CLIENT_TM; 18733 18734 ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); 18735 } 18736 18737 /* this assumes that reset_port() called before reset_func()*/ 18738 if (!CHIP_IS_E1x(sc)) { 18739 bxe_pf_disable(sc); 18740 } 18741 18742 sc->dmae_ready = 0; 18743} 18744 18745static int 18746bxe_gunzip_init(struct bxe_softc *sc) 18747{ 18748 return (0); 18749} 18750 18751static void 18752bxe_gunzip_end(struct bxe_softc *sc) 18753{ 18754 return; 18755} 18756 18757static int 18758bxe_init_firmware(struct bxe_softc *sc) 18759{ 18760 if (CHIP_IS_E1(sc)) { 18761 ecore_init_e1_firmware(sc); 18762 sc->iro_array = e1_iro_arr; 18763 } else if (CHIP_IS_E1H(sc)) { 18764 ecore_init_e1h_firmware(sc); 18765 sc->iro_array = e1h_iro_arr; 18766 } else if (!CHIP_IS_E1x(sc)) { 18767 ecore_init_e2_firmware(sc); 18768 sc->iro_array = e2_iro_arr; 18769 } else { 18770 BLOGE(sc, "Unsupported chip revision\n"); 18771 return (-1); 18772 } 18773 18774 return (0); 18775} 18776 18777static void 18778bxe_release_firmware(struct bxe_softc *sc) 18779{ 18780 /* Do nothing */ 18781 return; 18782} 18783 18784static int 18785ecore_gunzip(struct bxe_softc *sc, 18786 const uint8_t *zbuf, 18787 int len) 18788{ 18789 /* XXX : Implement... */ 18790 BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); 18791 return (FALSE); 18792} 18793 18794static void 18795ecore_reg_wr_ind(struct bxe_softc *sc, 18796 uint32_t addr, 18797 uint32_t val) 18798{ 18799 bxe_reg_wr_ind(sc, addr, val); 18800} 18801 18802static void 18803ecore_write_dmae_phys_len(struct bxe_softc *sc, 18804 bus_addr_t phys_addr, 18805 uint32_t addr, 18806 uint32_t len) 18807{ 18808 bxe_write_dmae_phys_len(sc, phys_addr, addr, len); 18809} 18810 18811void 18812ecore_storm_memset_struct(struct bxe_softc *sc, 18813 uint32_t addr, 18814 size_t size, 18815 uint32_t *data) 18816{ 18817 uint8_t i; 18818 for (i = 0; i < size/4; i++) { 18819 REG_WR(sc, addr + (i * 4), data[i]); 18820 } 18821} 18822 18823