qlnx_os.c revision 318661
1/* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29/* 30 * File: qlnx_os.c 31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 32 */ 33 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD: stable/10/sys/dev/qlnx/qlnxe/qlnx_os.c 318661 2017-05-22 19:36:26Z davidcs $"); 36 37#include "qlnx_os.h" 38#include "bcm_osal.h" 39#include "reg_addr.h" 40#include "ecore_gtt_reg_addr.h" 41#include "ecore.h" 42#include "ecore_chain.h" 43#include "ecore_status.h" 44#include "ecore_hw.h" 45#include "ecore_rt_defs.h" 46#include "ecore_init_ops.h" 47#include "ecore_int.h" 48#include "ecore_cxt.h" 49#include "ecore_spq.h" 50#include "ecore_init_fw_funcs.h" 51#include "ecore_sp_commands.h" 52#include "ecore_dev_api.h" 53#include "ecore_l2_api.h" 54#include "ecore_mcp.h" 55#include "ecore_hw_defs.h" 56#include "mcp_public.h" 57#include "ecore_iro.h" 58#include "nvm_cfg.h" 59#include "ecore_dev_api.h" 60#include "ecore_dbg_fw_funcs.h" 61 62#include "qlnx_ioctl.h" 63#include "qlnx_def.h" 64#include "qlnx_ver.h" 65#include <sys/smp.h> 66 67 68/* 69 * static functions 70 */ 71/* 72 * ioctl related functions 73 */ 74static void qlnx_add_sysctls(qlnx_host_t *ha); 75 76/* 77 * main driver 78 */ 79static void qlnx_release(qlnx_host_t *ha); 80static void qlnx_fp_isr(void *arg); 81static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 82static void qlnx_init(void *arg); 83static void qlnx_init_locked(qlnx_host_t *ha); 84static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 85static int qlnx_set_promisc(qlnx_host_t *ha); 86static int qlnx_set_allmulti(qlnx_host_t *ha); 87static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 88static int qlnx_media_change(struct ifnet *ifp); 89static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 90static void qlnx_stop(qlnx_host_t *ha); 91static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 92 struct mbuf **m_headp); 93static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 94static uint32_t qlnx_get_optics(qlnx_host_t *ha, 95 struct qlnx_link_output *if_link); 96static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 97static void qlnx_qflush(struct ifnet *ifp); 98 99static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 100static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 101static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 102static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 103static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 104static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 105 106static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 107static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 108 109static int qlnx_nic_setup(struct ecore_dev *cdev, 110 struct ecore_pf_params *func_params); 111static int qlnx_nic_start(struct ecore_dev *cdev); 112static int qlnx_slowpath_start(qlnx_host_t *ha); 113static int qlnx_slowpath_stop(qlnx_host_t *ha); 114static int qlnx_init_hw(qlnx_host_t *ha); 115static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 116 char ver_str[VER_SIZE]); 117static void qlnx_unload(qlnx_host_t *ha); 118static int qlnx_load(qlnx_host_t *ha); 119static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 120 uint32_t add_mac); 121static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 122 uint32_t len); 123static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 124static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 125static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 126 struct qlnx_rx_queue *rxq); 127static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 128static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 129 int hwfn_index); 130static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 131 int hwfn_index); 132static void qlnx_timer(void *arg); 133static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 134static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 135static void qlnx_trigger_dump(qlnx_host_t *ha); 136static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 137 struct qlnx_tx_queue *txq); 138static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 139 int lro_enable); 140static void qlnx_fp_taskqueue(void *context, int pending); 141static void qlnx_sample_storm_stats(qlnx_host_t *ha); 142static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 143 struct qlnx_agg_info *tpa); 144static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 145 146#if __FreeBSD_version >= 1100000 147static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 148#endif 149 150 151/* 152 * Hooks to the Operating Systems 153 */ 154static int qlnx_pci_probe (device_t); 155static int qlnx_pci_attach (device_t); 156static int qlnx_pci_detach (device_t); 157 158static device_method_t qlnx_pci_methods[] = { 159 /* Device interface */ 160 DEVMETHOD(device_probe, qlnx_pci_probe), 161 DEVMETHOD(device_attach, qlnx_pci_attach), 162 DEVMETHOD(device_detach, qlnx_pci_detach), 163 { 0, 0 } 164}; 165 166static driver_t qlnx_pci_driver = { 167 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 168}; 169 170static devclass_t qlnx_devclass; 171 172MODULE_VERSION(if_qlnxe,1); 173DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 174 175MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 176MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 177 178MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 179 180 181char qlnx_dev_str[64]; 182char qlnx_ver_str[VER_SIZE]; 183char qlnx_name_str[NAME_SIZE]; 184 185/* 186 * Some PCI Configuration Space Related Defines 187 */ 188 189#ifndef PCI_VENDOR_QLOGIC 190#define PCI_VENDOR_QLOGIC 0x1077 191#endif 192 193/* 40G Adapter QLE45xxx*/ 194#ifndef QLOGIC_PCI_DEVICE_ID_1634 195#define QLOGIC_PCI_DEVICE_ID_1634 0x1634 196#endif 197 198/* 100G Adapter QLE45xxx*/ 199#ifndef QLOGIC_PCI_DEVICE_ID_1644 200#define QLOGIC_PCI_DEVICE_ID_1644 0x1644 201#endif 202 203/* 25G Adapter QLE45xxx*/ 204#ifndef QLOGIC_PCI_DEVICE_ID_1656 205#define QLOGIC_PCI_DEVICE_ID_1656 0x1656 206#endif 207 208/* 50G Adapter QLE45xxx*/ 209#ifndef QLOGIC_PCI_DEVICE_ID_1654 210#define QLOGIC_PCI_DEVICE_ID_1654 0x1654 211#endif 212 213static int 214qlnx_valid_device(device_t dev) 215{ 216 uint16_t device_id; 217 218 device_id = pci_get_device(dev); 219 220 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 221 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 222 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 223 (device_id == QLOGIC_PCI_DEVICE_ID_1654)) 224 return 0; 225 226 return -1; 227} 228 229/* 230 * Name: qlnx_pci_probe 231 * Function: Validate the PCI device to be a QLA80XX device 232 */ 233static int 234qlnx_pci_probe(device_t dev) 235{ 236 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 237 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 238 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 239 240 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 241 return (ENXIO); 242 } 243 244 switch (pci_get_device(dev)) { 245 246 case QLOGIC_PCI_DEVICE_ID_1644: 247 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 248 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 249 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 250 QLNX_VERSION_BUILD); 251 device_set_desc_copy(dev, qlnx_dev_str); 252 253 break; 254 255 case QLOGIC_PCI_DEVICE_ID_1634: 256 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 257 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 258 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 259 QLNX_VERSION_BUILD); 260 device_set_desc_copy(dev, qlnx_dev_str); 261 262 break; 263 264 case QLOGIC_PCI_DEVICE_ID_1656: 265 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 266 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 267 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 268 QLNX_VERSION_BUILD); 269 device_set_desc_copy(dev, qlnx_dev_str); 270 271 break; 272 273 case QLOGIC_PCI_DEVICE_ID_1654: 274 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 275 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 276 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 277 QLNX_VERSION_BUILD); 278 device_set_desc_copy(dev, qlnx_dev_str); 279 280 break; 281 282 default: 283 return (ENXIO); 284 } 285 286 return (BUS_PROBE_DEFAULT); 287} 288 289 290static void 291qlnx_sp_intr(void *arg) 292{ 293 struct ecore_hwfn *p_hwfn; 294 qlnx_host_t *ha; 295 int i; 296 297 p_hwfn = arg; 298 299 if (p_hwfn == NULL) { 300 printf("%s: spurious slowpath intr\n", __func__); 301 return; 302 } 303 304 ha = (qlnx_host_t *)p_hwfn->p_dev; 305 306 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 307 308 for (i = 0; i < ha->cdev.num_hwfns; i++) { 309 if (&ha->cdev.hwfns[i] == p_hwfn) { 310 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 311 break; 312 } 313 } 314 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 315 316 return; 317} 318 319static void 320qlnx_sp_taskqueue(void *context, int pending) 321{ 322 struct ecore_hwfn *p_hwfn; 323 324 p_hwfn = context; 325 326 if (p_hwfn != NULL) { 327 qlnx_sp_isr(p_hwfn); 328 } 329 return; 330} 331 332static int 333qlnx_create_sp_taskqueues(qlnx_host_t *ha) 334{ 335 int i; 336 uint8_t tq_name[32]; 337 338 for (i = 0; i < ha->cdev.num_hwfns; i++) { 339 340 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 341 342 bzero(tq_name, sizeof (tq_name)); 343 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 344 345 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 346 347 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT, 348 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 349 350 if (ha->sp_taskqueue[i] == NULL) 351 return (-1); 352 353 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 354 tq_name); 355 356 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 357 ha->sp_taskqueue[i])); 358 } 359 360 return (0); 361} 362 363static void 364qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 365{ 366 int i; 367 368 for (i = 0; i < ha->cdev.num_hwfns; i++) { 369 if (ha->sp_taskqueue[i] != NULL) { 370 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 371 taskqueue_free(ha->sp_taskqueue[i]); 372 } 373 } 374 return; 375} 376 377static void 378qlnx_fp_taskqueue(void *context, int pending) 379{ 380 struct qlnx_fastpath *fp; 381 qlnx_host_t *ha; 382 struct ifnet *ifp; 383 struct mbuf *mp; 384 int ret; 385 int lro_enable, tc; 386 int rx_int = 0, total_rx_count = 0; 387 struct thread *cthread; 388 389 fp = context; 390 391 if (fp == NULL) 392 return; 393 394 cthread = curthread; 395 396 thread_lock(cthread); 397 398 if (!sched_is_bound(cthread)) 399 sched_bind(cthread, fp->rss_id); 400 401 thread_unlock(cthread); 402 403 ha = (qlnx_host_t *)fp->edev; 404 405 ifp = ha->ifp; 406 407 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 408 409 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable); 410 411 if (rx_int) { 412 fp->rx_pkts += rx_int; 413 total_rx_count += rx_int; 414 } 415 416#ifdef QLNX_SOFT_LRO 417 { 418 struct lro_ctrl *lro; 419 420 lro = &fp->rxq->lro; 421 422 if (lro_enable && total_rx_count) { 423 424#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 425 426 if (ha->dbg_trace_lro_cnt) { 427 if (lro->lro_mbuf_count & ~1023) 428 fp->lro_cnt_1024++; 429 else if (lro->lro_mbuf_count & ~511) 430 fp->lro_cnt_512++; 431 else if (lro->lro_mbuf_count & ~255) 432 fp->lro_cnt_256++; 433 else if (lro->lro_mbuf_count & ~127) 434 fp->lro_cnt_128++; 435 else if (lro->lro_mbuf_count & ~63) 436 fp->lro_cnt_64++; 437 } 438 tcp_lro_flush_all(lro); 439 440#else 441 struct lro_entry *queued; 442 443 while ((!SLIST_EMPTY(&lro->lro_active))) { 444 queued = SLIST_FIRST(&lro->lro_active); 445 SLIST_REMOVE_HEAD(&lro->lro_active, next); 446 tcp_lro_flush(lro, queued); 447 } 448#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 449 } 450 } 451#endif /* #ifdef QLNX_SOFT_LRO */ 452 453 ecore_sb_update_sb_idx(fp->sb_info); 454 rmb(); 455 456 mtx_lock(&fp->tx_mtx); 457 458 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 459 IFF_DRV_RUNNING) || (!ha->link_up)) { 460 461 mtx_unlock(&fp->tx_mtx); 462 goto qlnx_fp_taskqueue_exit; 463 } 464 465 for (tc = 0; tc < ha->num_tc; tc++) { 466 (void)qlnx_tx_int(ha, fp, fp->txq[tc]); 467 } 468 469 mp = drbr_peek(ifp, fp->tx_br); 470 471 while (mp != NULL) { 472 473 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 474 ret = qlnx_send(ha, fp, &mp); 475 } else { 476 ret = -1; 477 } 478 479 if (ret) { 480 481 if (mp != NULL) { 482 drbr_putback(ifp, fp->tx_br, mp); 483 } else { 484 fp->tx_pkts_processed++; 485 drbr_advance(ifp, fp->tx_br); 486 } 487 488 mtx_unlock(&fp->tx_mtx); 489 490 goto qlnx_fp_taskqueue_exit; 491 492 } else { 493 drbr_advance(ifp, fp->tx_br); 494 fp->tx_pkts_transmitted++; 495 fp->tx_pkts_processed++; 496 } 497 498 if (fp->tx_ring_full) 499 break; 500 501 mp = drbr_peek(ifp, fp->tx_br); 502 } 503 504 for (tc = 0; tc < ha->num_tc; tc++) { 505 (void)qlnx_tx_int(ha, fp, fp->txq[tc]); 506 } 507 508 mtx_unlock(&fp->tx_mtx); 509 510qlnx_fp_taskqueue_exit: 511 if (rx_int) { 512 if (fp->fp_taskqueue != NULL) 513 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 514 } else { 515 if (fp->tx_ring_full) { 516 qlnx_mdelay(__func__, 100); 517 } 518 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 519 } 520 521 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 522 return; 523} 524 525static int 526qlnx_create_fp_taskqueues(qlnx_host_t *ha) 527{ 528 int i; 529 uint8_t tq_name[32]; 530 struct qlnx_fastpath *fp; 531 532 for (i = 0; i < ha->num_rss; i++) { 533 534 fp = &ha->fp_array[i]; 535 536 bzero(tq_name, sizeof (tq_name)); 537 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 538 539 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 540 541 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 542 taskqueue_thread_enqueue, 543 &fp->fp_taskqueue); 544 545 if (fp->fp_taskqueue == NULL) 546 return (-1); 547 548 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 549 tq_name); 550 551 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 552 fp->fp_taskqueue)); 553 } 554 555 return (0); 556} 557 558static void 559qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 560{ 561 int i; 562 struct qlnx_fastpath *fp; 563 564 for (i = 0; i < ha->num_rss; i++) { 565 566 fp = &ha->fp_array[i]; 567 568 if (fp->fp_taskqueue != NULL) { 569 570 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 571 taskqueue_free(fp->fp_taskqueue); 572 fp->fp_taskqueue = NULL; 573 } 574 } 575 return; 576} 577 578static void 579qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 580{ 581 int i; 582 struct qlnx_fastpath *fp; 583 584 for (i = 0; i < ha->num_rss; i++) { 585 fp = &ha->fp_array[i]; 586 587 if (fp->fp_taskqueue != NULL) { 588 QLNX_UNLOCK(ha); 589 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 590 QLNX_LOCK(ha); 591 } 592 } 593 return; 594} 595 596/* 597 * Name: qlnx_pci_attach 598 * Function: attaches the device to the operating system 599 */ 600static int 601qlnx_pci_attach(device_t dev) 602{ 603 qlnx_host_t *ha = NULL; 604 uint32_t rsrc_len_reg = 0; 605 uint32_t rsrc_len_dbells = 0; 606 uint32_t rsrc_len_msix = 0; 607 int i; 608 uint32_t mfw_ver; 609 610 if ((ha = device_get_softc(dev)) == NULL) { 611 device_printf(dev, "cannot get softc\n"); 612 return (ENOMEM); 613 } 614 615 memset(ha, 0, sizeof (qlnx_host_t)); 616 617 if (qlnx_valid_device(dev) != 0) { 618 device_printf(dev, "device is not valid device\n"); 619 return (ENXIO); 620 } 621 ha->pci_func = pci_get_function(dev); 622 623 ha->pci_dev = dev; 624 625 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 626 627 ha->flags.lock_init = 1; 628 629 pci_enable_busmaster(dev); 630 631 /* 632 * map the PCI BARs 633 */ 634 635 ha->reg_rid = PCIR_BAR(0); 636 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 637 RF_ACTIVE); 638 639 if (ha->pci_reg == NULL) { 640 device_printf(dev, "unable to map BAR0\n"); 641 goto qlnx_pci_attach_err; 642 } 643 644 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 645 ha->reg_rid); 646 647 ha->dbells_rid = PCIR_BAR(2); 648 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 649 &ha->dbells_rid, RF_ACTIVE); 650 651 if (ha->pci_dbells == NULL) { 652 device_printf(dev, "unable to map BAR1\n"); 653 goto qlnx_pci_attach_err; 654 } 655 656 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 657 ha->dbells_rid); 658 659 ha->dbells_phys_addr = (uint64_t) 660 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);; 661 ha->dbells_size = rsrc_len_dbells; 662 663 ha->msix_rid = PCIR_BAR(4); 664 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 665 &ha->msix_rid, RF_ACTIVE); 666 667 if (ha->msix_bar == NULL) { 668 device_printf(dev, "unable to map BAR2\n"); 669 goto qlnx_pci_attach_err; 670 } 671 672 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 673 ha->msix_rid); 674 /* 675 * allocate dma tags 676 */ 677 678 if (qlnx_alloc_parent_dma_tag(ha)) 679 goto qlnx_pci_attach_err; 680 681 if (qlnx_alloc_tx_dma_tag(ha)) 682 goto qlnx_pci_attach_err; 683 684 if (qlnx_alloc_rx_dma_tag(ha)) 685 goto qlnx_pci_attach_err; 686 687 688 if (qlnx_init_hw(ha) != 0) 689 goto qlnx_pci_attach_err; 690 691 /* 692 * Allocate MSI-x vectors 693 */ 694 ha->num_rss = QLNX_MAX_RSS; 695 ha->num_tc = QLNX_MAX_TC; 696 697 ha->msix_count = pci_msix_count(dev); 698 699 if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns)) 700 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns; 701 702 if (!ha->msix_count || 703 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) { 704 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 705 ha->msix_count); 706 goto qlnx_pci_attach_err; 707 } 708 709 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns )) 710 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns; 711 else 712 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns; 713 714 QL_DPRINT1(ha, (dev, "%s:\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]" 715 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]" 716 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]" 717 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 718 __func__, ha->pci_reg, rsrc_len_reg, 719 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 720 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 721 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc)); 722 723 if (pci_alloc_msix(dev, &ha->msix_count)) { 724 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 725 ha->msix_count); 726 ha->msix_count = 0; 727 goto qlnx_pci_attach_err; 728 } 729 730 /* 731 * Initialize slow path interrupt and task queue 732 */ 733 if (qlnx_create_sp_taskqueues(ha) != 0) 734 goto qlnx_pci_attach_err; 735 736 for (i = 0; i < ha->cdev.num_hwfns; i++) { 737 738 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 739 740 ha->sp_irq_rid[i] = i + 1; 741 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 742 &ha->sp_irq_rid[i], 743 (RF_ACTIVE | RF_SHAREABLE)); 744 if (ha->sp_irq[i] == NULL) { 745 device_printf(dev, 746 "could not allocate mbx interrupt\n"); 747 goto qlnx_pci_attach_err; 748 } 749 750 if (bus_setup_intr(dev, ha->sp_irq[i], 751 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 752 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 753 device_printf(dev, 754 "could not setup slow path interrupt\n"); 755 goto qlnx_pci_attach_err; 756 } 757 758 QL_DPRINT1(ha, (dev, "%s: p_hwfn [%p] sp_irq_rid %d" 759 " sp_irq %p sp_handle %p\n", __func__, p_hwfn, 760 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i])); 761 762 } 763 764 /* 765 * initialize fast path interrupt 766 */ 767 if (qlnx_create_fp_taskqueues(ha) != 0) 768 goto qlnx_pci_attach_err; 769 770 for (i = 0; i < ha->num_rss; i++) { 771 ha->irq_vec[i].rss_idx = i; 772 ha->irq_vec[i].ha = ha; 773 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i; 774 775 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 776 &ha->irq_vec[i].irq_rid, 777 (RF_ACTIVE | RF_SHAREABLE)); 778 779 if (ha->irq_vec[i].irq == NULL) { 780 device_printf(dev, 781 "could not allocate interrupt[%d]\n", i); 782 goto qlnx_pci_attach_err; 783 } 784 785 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 786 device_printf(dev, "could not allocate tx_br[%d]\n", i); 787 goto qlnx_pci_attach_err; 788 789 } 790 } 791 792 callout_init(&ha->qlnx_callout, 1); 793 ha->flags.callout_init = 1; 794 795 for (i = 0; i < ha->cdev.num_hwfns; i++) { 796 797 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 798 goto qlnx_pci_attach_err; 799 if (ha->grcdump_size[i] == 0) 800 goto qlnx_pci_attach_err; 801 802 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 803 QL_DPRINT1(ha, (dev, "grcdump_size[%d] = 0x%08x\n", 804 i, ha->grcdump_size[i])); 805 806 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 807 if (ha->grcdump[i] == NULL) { 808 device_printf(dev, "grcdump alloc[%d] failed\n", i); 809 goto qlnx_pci_attach_err; 810 } 811 812 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 813 goto qlnx_pci_attach_err; 814 if (ha->idle_chk_size[i] == 0) 815 goto qlnx_pci_attach_err; 816 817 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 818 QL_DPRINT1(ha, (dev, "idle_chk_size[%d] = 0x%08x\n", 819 i, ha->idle_chk_size[i])); 820 821 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 822 823 if (ha->idle_chk[i] == NULL) { 824 device_printf(dev, "idle_chk alloc failed\n"); 825 goto qlnx_pci_attach_err; 826 } 827 } 828 829 if (qlnx_slowpath_start(ha) != 0) { 830 831 qlnx_mdelay(__func__, 1000); 832 qlnx_trigger_dump(ha); 833 834 goto qlnx_pci_attach_err0; 835 } else 836 ha->flags.slowpath_start = 1; 837 838 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 839 qlnx_mdelay(__func__, 1000); 840 qlnx_trigger_dump(ha); 841 842 goto qlnx_pci_attach_err0; 843 } 844 845 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 846 qlnx_mdelay(__func__, 1000); 847 qlnx_trigger_dump(ha); 848 849 goto qlnx_pci_attach_err0; 850 } 851 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 852 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 853 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 854 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 855 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 856 FW_ENGINEERING_VERSION); 857 858 QL_DPRINT1(ha, (dev, "%s: STORM_FW version %s MFW version %s\n", 859 __func__, ha->stormfw_ver, ha->mfw_ver)); 860 861 qlnx_init_ifnet(dev, ha); 862 863 /* 864 * add sysctls 865 */ 866 qlnx_add_sysctls(ha); 867 868qlnx_pci_attach_err0: 869 /* 870 * create ioctl device interface 871 */ 872 if (qlnx_make_cdev(ha)) { 873 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 874 goto qlnx_pci_attach_err; 875 } 876 877 QL_DPRINT2(ha, (dev, "%s: success\n", __func__)); 878 879 return (0); 880 881qlnx_pci_attach_err: 882 883 qlnx_release(ha); 884 885 return (ENXIO); 886} 887 888/* 889 * Name: qlnx_pci_detach 890 * Function: Unhooks the device from the operating system 891 */ 892static int 893qlnx_pci_detach(device_t dev) 894{ 895 qlnx_host_t *ha = NULL; 896 897 if ((ha = device_get_softc(dev)) == NULL) { 898 device_printf(dev, "cannot get softc\n"); 899 return (ENOMEM); 900 } 901 902 QLNX_LOCK(ha); 903 qlnx_stop(ha); 904 QLNX_UNLOCK(ha); 905 906 qlnx_release(ha); 907 908 return (0); 909} 910 911static int 912qlnx_init_hw(qlnx_host_t *ha) 913{ 914 int rval = 0; 915 struct ecore_hw_prepare_params params; 916 917 ecore_init_struct(&ha->cdev); 918 919 /* ha->dp_module = ECORE_MSG_PROBE | 920 ECORE_MSG_INTR | 921 ECORE_MSG_SP | 922 ECORE_MSG_LINK | 923 ECORE_MSG_SPQ | 924 ECORE_MSG_RDMA; 925 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 926 ha->dp_level = ECORE_LEVEL_NOTICE; 927 928 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 929 930 ha->cdev.regview = ha->pci_reg; 931 ha->cdev.doorbells = ha->pci_dbells; 932 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 933 ha->cdev.db_size = ha->dbells_size; 934 935 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 936 937 ha->personality = ECORE_PCI_DEFAULT; 938 939 params.personality = ha->personality; 940 941 params.drv_resc_alloc = false; 942 params.chk_reg_fifo = false; 943 params.initiate_pf_flr = true; 944 params.epoch = 0; 945 946 ecore_hw_prepare(&ha->cdev, ¶ms); 947 948 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 949 950 return (rval); 951} 952 953static void 954qlnx_release(qlnx_host_t *ha) 955{ 956 device_t dev; 957 int i; 958 959 dev = ha->pci_dev; 960 961 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 962 963 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 964 if (ha->idle_chk[i] != NULL) { 965 free(ha->idle_chk[i], M_QLNXBUF); 966 ha->idle_chk[i] = NULL; 967 } 968 969 if (ha->grcdump[i] != NULL) { 970 free(ha->grcdump[i], M_QLNXBUF); 971 ha->grcdump[i] = NULL; 972 } 973 } 974 975 if (ha->flags.callout_init) 976 callout_drain(&ha->qlnx_callout); 977 978 if (ha->flags.slowpath_start) { 979 qlnx_slowpath_stop(ha); 980 } 981 982 ecore_hw_remove(&ha->cdev); 983 984 qlnx_del_cdev(ha); 985 986 if (ha->ifp != NULL) 987 ether_ifdetach(ha->ifp); 988 989 qlnx_free_tx_dma_tag(ha); 990 991 qlnx_free_rx_dma_tag(ha); 992 993 qlnx_free_parent_dma_tag(ha); 994 995 for (i = 0; i < ha->num_rss; i++) { 996 struct qlnx_fastpath *fp = &ha->fp_array[i]; 997 998 if (ha->irq_vec[i].handle) { 999 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1000 ha->irq_vec[i].handle); 1001 } 1002 1003 if (ha->irq_vec[i].irq) { 1004 (void)bus_release_resource(dev, SYS_RES_IRQ, 1005 ha->irq_vec[i].irq_rid, 1006 ha->irq_vec[i].irq); 1007 } 1008 1009 qlnx_free_tx_br(ha, fp); 1010 } 1011 qlnx_destroy_fp_taskqueues(ha); 1012 1013 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1014 if (ha->sp_handle[i]) 1015 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1016 ha->sp_handle[i]); 1017 1018 if (ha->sp_irq[i]) 1019 (void) bus_release_resource(dev, SYS_RES_IRQ, 1020 ha->sp_irq_rid[i], ha->sp_irq[i]); 1021 } 1022 1023 qlnx_destroy_sp_taskqueues(ha); 1024 1025 if (ha->msix_count) 1026 pci_release_msi(dev); 1027 1028 if (ha->flags.lock_init) { 1029 mtx_destroy(&ha->hw_lock); 1030 } 1031 1032 if (ha->pci_reg) 1033 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1034 ha->pci_reg); 1035 1036 if (ha->pci_dbells) 1037 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1038 ha->pci_dbells); 1039 1040 if (ha->msix_bar) 1041 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1042 ha->msix_bar); 1043 1044 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 1045 return; 1046} 1047 1048static void 1049qlnx_trigger_dump(qlnx_host_t *ha) 1050{ 1051 int i; 1052 1053 if (ha->ifp != NULL) 1054 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1055 1056 QL_DPRINT2(ha, (ha->pci_dev, "%s: start\n", __func__)); 1057 1058 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1059 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1060 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1061 } 1062 1063 QL_DPRINT2(ha, (ha->pci_dev, "%s: end\n", __func__)); 1064 1065 return; 1066} 1067 1068static int 1069qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1070{ 1071 int err, ret = 0; 1072 qlnx_host_t *ha; 1073 1074 err = sysctl_handle_int(oidp, &ret, 0, req); 1075 1076 if (err || !req->newptr) 1077 return (err); 1078 1079 if (ret == 1) { 1080 ha = (qlnx_host_t *)arg1; 1081 qlnx_trigger_dump(ha); 1082 } 1083 return (err); 1084} 1085 1086static int 1087qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1088{ 1089 int err, i, ret = 0, usecs = 0; 1090 qlnx_host_t *ha; 1091 struct ecore_hwfn *p_hwfn; 1092 struct qlnx_fastpath *fp; 1093 1094 err = sysctl_handle_int(oidp, &usecs, 0, req); 1095 1096 if (err || !req->newptr || !usecs || (usecs > 255)) 1097 return (err); 1098 1099 ha = (qlnx_host_t *)arg1; 1100 1101 for (i = 0; i < ha->num_rss; i++) { 1102 1103 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1104 1105 fp = &ha->fp_array[i]; 1106 1107 if (fp->txq[0]->handle != NULL) { 1108 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1109 (uint16_t)usecs, fp->txq[0]->handle); 1110 } 1111 } 1112 1113 if (!ret) 1114 ha->tx_coalesce_usecs = (uint8_t)usecs; 1115 1116 return (err); 1117} 1118 1119static int 1120qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1121{ 1122 int err, i, ret = 0, usecs = 0; 1123 qlnx_host_t *ha; 1124 struct ecore_hwfn *p_hwfn; 1125 struct qlnx_fastpath *fp; 1126 1127 err = sysctl_handle_int(oidp, &usecs, 0, req); 1128 1129 if (err || !req->newptr || !usecs || (usecs > 255)) 1130 return (err); 1131 1132 ha = (qlnx_host_t *)arg1; 1133 1134 for (i = 0; i < ha->num_rss; i++) { 1135 1136 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1137 1138 fp = &ha->fp_array[i]; 1139 1140 if (fp->rxq->handle != NULL) { 1141 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1142 0, fp->rxq->handle); 1143 } 1144 } 1145 1146 if (!ret) 1147 ha->rx_coalesce_usecs = (uint8_t)usecs; 1148 1149 return (err); 1150} 1151 1152static void 1153qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1154{ 1155 struct sysctl_ctx_list *ctx; 1156 struct sysctl_oid_list *children; 1157 struct sysctl_oid *ctx_oid; 1158 1159 ctx = device_get_sysctl_ctx(ha->pci_dev); 1160 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1161 1162 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1163 CTLFLAG_RD, NULL, "spstat"); 1164 children = SYSCTL_CHILDREN(ctx_oid); 1165 1166 SYSCTL_ADD_QUAD(ctx, children, 1167 OID_AUTO, "sp_interrupts", 1168 CTLFLAG_RD, &ha->sp_interrupts, 1169 "No. of slowpath interrupts"); 1170 1171 return; 1172} 1173 1174static void 1175qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1176{ 1177 struct sysctl_ctx_list *ctx; 1178 struct sysctl_oid_list *children; 1179 struct sysctl_oid_list *node_children; 1180 struct sysctl_oid *ctx_oid; 1181 int i, j; 1182 uint8_t name_str[16]; 1183 1184 ctx = device_get_sysctl_ctx(ha->pci_dev); 1185 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1186 1187 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1188 CTLFLAG_RD, NULL, "fpstat"); 1189 children = SYSCTL_CHILDREN(ctx_oid); 1190 1191 for (i = 0; i < ha->num_rss; i++) { 1192 1193 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1194 snprintf(name_str, sizeof(name_str), "%d", i); 1195 1196 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1197 CTLFLAG_RD, NULL, name_str); 1198 node_children = SYSCTL_CHILDREN(ctx_oid); 1199 1200 /* Tx Related */ 1201 1202 SYSCTL_ADD_QUAD(ctx, node_children, 1203 OID_AUTO, "tx_pkts_processed", 1204 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1205 "No. of packets processed for transmission"); 1206 1207 SYSCTL_ADD_QUAD(ctx, node_children, 1208 OID_AUTO, "tx_pkts_freed", 1209 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1210 "No. of freed packets"); 1211 1212 SYSCTL_ADD_QUAD(ctx, node_children, 1213 OID_AUTO, "tx_pkts_transmitted", 1214 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1215 "No. of transmitted packets"); 1216 1217 SYSCTL_ADD_QUAD(ctx, node_children, 1218 OID_AUTO, "tx_pkts_completed", 1219 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1220 "No. of transmit completions"); 1221 1222 SYSCTL_ADD_QUAD(ctx, node_children, 1223 OID_AUTO, "tx_lso_wnd_min_len", 1224 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1225 "tx_lso_wnd_min_len"); 1226 1227 SYSCTL_ADD_QUAD(ctx, node_children, 1228 OID_AUTO, "tx_defrag", 1229 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1230 "tx_defrag"); 1231 1232 SYSCTL_ADD_QUAD(ctx, node_children, 1233 OID_AUTO, "tx_nsegs_gt_elem_left", 1234 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1235 "tx_nsegs_gt_elem_left"); 1236 1237 SYSCTL_ADD_UINT(ctx, node_children, 1238 OID_AUTO, "tx_tso_max_nsegs", 1239 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1240 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1241 1242 SYSCTL_ADD_UINT(ctx, node_children, 1243 OID_AUTO, "tx_tso_min_nsegs", 1244 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1245 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1246 1247 SYSCTL_ADD_UINT(ctx, node_children, 1248 OID_AUTO, "tx_tso_max_pkt_len", 1249 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1250 ha->fp_array[i].tx_tso_max_pkt_len, 1251 "tx_tso_max_pkt_len"); 1252 1253 SYSCTL_ADD_UINT(ctx, node_children, 1254 OID_AUTO, "tx_tso_min_pkt_len", 1255 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1256 ha->fp_array[i].tx_tso_min_pkt_len, 1257 "tx_tso_min_pkt_len"); 1258 1259 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1260 1261 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1262 snprintf(name_str, sizeof(name_str), 1263 "tx_pkts_nseg_%02d", (j+1)); 1264 1265 SYSCTL_ADD_QUAD(ctx, node_children, 1266 OID_AUTO, name_str, CTLFLAG_RD, 1267 &ha->fp_array[i].tx_pkts[j], name_str); 1268 } 1269 1270 SYSCTL_ADD_QUAD(ctx, node_children, 1271 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1272 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1273 "err_tx_nsegs_gt_elem_left"); 1274 1275 SYSCTL_ADD_QUAD(ctx, node_children, 1276 OID_AUTO, "err_tx_dmamap_create", 1277 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1278 "err_tx_dmamap_create"); 1279 1280 SYSCTL_ADD_QUAD(ctx, node_children, 1281 OID_AUTO, "err_tx_defrag_dmamap_load", 1282 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1283 "err_tx_defrag_dmamap_load"); 1284 1285 SYSCTL_ADD_QUAD(ctx, node_children, 1286 OID_AUTO, "err_tx_non_tso_max_seg", 1287 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1288 "err_tx_non_tso_max_seg"); 1289 1290 SYSCTL_ADD_QUAD(ctx, node_children, 1291 OID_AUTO, "err_tx_dmamap_load", 1292 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1293 "err_tx_dmamap_load"); 1294 1295 SYSCTL_ADD_QUAD(ctx, node_children, 1296 OID_AUTO, "err_tx_defrag", 1297 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1298 "err_tx_defrag"); 1299 1300 SYSCTL_ADD_QUAD(ctx, node_children, 1301 OID_AUTO, "err_tx_free_pkt_null", 1302 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1303 "err_tx_free_pkt_null"); 1304 1305 SYSCTL_ADD_QUAD(ctx, node_children, 1306 OID_AUTO, "err_tx_cons_idx_conflict", 1307 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1308 "err_tx_cons_idx_conflict"); 1309 1310 SYSCTL_ADD_QUAD(ctx, node_children, 1311 OID_AUTO, "lro_cnt_64", 1312 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1313 "lro_cnt_64"); 1314 1315 SYSCTL_ADD_QUAD(ctx, node_children, 1316 OID_AUTO, "lro_cnt_128", 1317 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1318 "lro_cnt_128"); 1319 1320 SYSCTL_ADD_QUAD(ctx, node_children, 1321 OID_AUTO, "lro_cnt_256", 1322 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1323 "lro_cnt_256"); 1324 1325 SYSCTL_ADD_QUAD(ctx, node_children, 1326 OID_AUTO, "lro_cnt_512", 1327 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1328 "lro_cnt_512"); 1329 1330 SYSCTL_ADD_QUAD(ctx, node_children, 1331 OID_AUTO, "lro_cnt_1024", 1332 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1333 "lro_cnt_1024"); 1334 1335 /* Rx Related */ 1336 1337 SYSCTL_ADD_QUAD(ctx, node_children, 1338 OID_AUTO, "rx_pkts", 1339 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1340 "No. of received packets"); 1341 1342 SYSCTL_ADD_QUAD(ctx, node_children, 1343 OID_AUTO, "tpa_start", 1344 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1345 "No. of tpa_start packets"); 1346 1347 SYSCTL_ADD_QUAD(ctx, node_children, 1348 OID_AUTO, "tpa_cont", 1349 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1350 "No. of tpa_cont packets"); 1351 1352 SYSCTL_ADD_QUAD(ctx, node_children, 1353 OID_AUTO, "tpa_end", 1354 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1355 "No. of tpa_end packets"); 1356 1357 SYSCTL_ADD_QUAD(ctx, node_children, 1358 OID_AUTO, "err_m_getcl", 1359 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1360 "err_m_getcl"); 1361 1362 SYSCTL_ADD_QUAD(ctx, node_children, 1363 OID_AUTO, "err_m_getjcl", 1364 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1365 "err_m_getjcl"); 1366 1367 SYSCTL_ADD_QUAD(ctx, node_children, 1368 OID_AUTO, "err_rx_hw_errors", 1369 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1370 "err_rx_hw_errors"); 1371 1372 SYSCTL_ADD_QUAD(ctx, node_children, 1373 OID_AUTO, "err_rx_alloc_errors", 1374 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1375 "err_rx_alloc_errors"); 1376 } 1377 1378 return; 1379} 1380 1381static void 1382qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1383{ 1384 struct sysctl_ctx_list *ctx; 1385 struct sysctl_oid_list *children; 1386 struct sysctl_oid *ctx_oid; 1387 1388 ctx = device_get_sysctl_ctx(ha->pci_dev); 1389 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1390 1391 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1392 CTLFLAG_RD, NULL, "hwstat"); 1393 children = SYSCTL_CHILDREN(ctx_oid); 1394 1395 SYSCTL_ADD_QUAD(ctx, children, 1396 OID_AUTO, "no_buff_discards", 1397 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1398 "No. of packets discarded due to lack of buffer"); 1399 1400 SYSCTL_ADD_QUAD(ctx, children, 1401 OID_AUTO, "packet_too_big_discard", 1402 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1403 "No. of packets discarded because packet was too big"); 1404 1405 SYSCTL_ADD_QUAD(ctx, children, 1406 OID_AUTO, "ttl0_discard", 1407 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1408 "ttl0_discard"); 1409 1410 SYSCTL_ADD_QUAD(ctx, children, 1411 OID_AUTO, "rx_ucast_bytes", 1412 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1413 "rx_ucast_bytes"); 1414 1415 SYSCTL_ADD_QUAD(ctx, children, 1416 OID_AUTO, "rx_mcast_bytes", 1417 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1418 "rx_mcast_bytes"); 1419 1420 SYSCTL_ADD_QUAD(ctx, children, 1421 OID_AUTO, "rx_bcast_bytes", 1422 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1423 "rx_bcast_bytes"); 1424 1425 SYSCTL_ADD_QUAD(ctx, children, 1426 OID_AUTO, "rx_ucast_pkts", 1427 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1428 "rx_ucast_pkts"); 1429 1430 SYSCTL_ADD_QUAD(ctx, children, 1431 OID_AUTO, "rx_mcast_pkts", 1432 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1433 "rx_mcast_pkts"); 1434 1435 SYSCTL_ADD_QUAD(ctx, children, 1436 OID_AUTO, "rx_bcast_pkts", 1437 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1438 "rx_bcast_pkts"); 1439 1440 SYSCTL_ADD_QUAD(ctx, children, 1441 OID_AUTO, "mftag_filter_discards", 1442 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1443 "mftag_filter_discards"); 1444 1445 SYSCTL_ADD_QUAD(ctx, children, 1446 OID_AUTO, "mac_filter_discards", 1447 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1448 "mac_filter_discards"); 1449 1450 SYSCTL_ADD_QUAD(ctx, children, 1451 OID_AUTO, "tx_ucast_bytes", 1452 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1453 "tx_ucast_bytes"); 1454 1455 SYSCTL_ADD_QUAD(ctx, children, 1456 OID_AUTO, "tx_mcast_bytes", 1457 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1458 "tx_mcast_bytes"); 1459 1460 SYSCTL_ADD_QUAD(ctx, children, 1461 OID_AUTO, "tx_bcast_bytes", 1462 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1463 "tx_bcast_bytes"); 1464 1465 SYSCTL_ADD_QUAD(ctx, children, 1466 OID_AUTO, "tx_ucast_pkts", 1467 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1468 "tx_ucast_pkts"); 1469 1470 SYSCTL_ADD_QUAD(ctx, children, 1471 OID_AUTO, "tx_mcast_pkts", 1472 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1473 "tx_mcast_pkts"); 1474 1475 SYSCTL_ADD_QUAD(ctx, children, 1476 OID_AUTO, "tx_bcast_pkts", 1477 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1478 "tx_bcast_pkts"); 1479 1480 SYSCTL_ADD_QUAD(ctx, children, 1481 OID_AUTO, "tx_err_drop_pkts", 1482 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1483 "tx_err_drop_pkts"); 1484 1485 SYSCTL_ADD_QUAD(ctx, children, 1486 OID_AUTO, "tpa_coalesced_pkts", 1487 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1488 "tpa_coalesced_pkts"); 1489 1490 SYSCTL_ADD_QUAD(ctx, children, 1491 OID_AUTO, "tpa_coalesced_events", 1492 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1493 "tpa_coalesced_events"); 1494 1495 SYSCTL_ADD_QUAD(ctx, children, 1496 OID_AUTO, "tpa_aborts_num", 1497 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1498 "tpa_aborts_num"); 1499 1500 SYSCTL_ADD_QUAD(ctx, children, 1501 OID_AUTO, "tpa_not_coalesced_pkts", 1502 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1503 "tpa_not_coalesced_pkts"); 1504 1505 SYSCTL_ADD_QUAD(ctx, children, 1506 OID_AUTO, "tpa_coalesced_bytes", 1507 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1508 "tpa_coalesced_bytes"); 1509 1510 SYSCTL_ADD_QUAD(ctx, children, 1511 OID_AUTO, "rx_64_byte_packets", 1512 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1513 "rx_64_byte_packets"); 1514 1515 SYSCTL_ADD_QUAD(ctx, children, 1516 OID_AUTO, "rx_65_to_127_byte_packets", 1517 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1518 "rx_65_to_127_byte_packets"); 1519 1520 SYSCTL_ADD_QUAD(ctx, children, 1521 OID_AUTO, "rx_128_to_255_byte_packets", 1522 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1523 "rx_128_to_255_byte_packets"); 1524 1525 SYSCTL_ADD_QUAD(ctx, children, 1526 OID_AUTO, "rx_256_to_511_byte_packets", 1527 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1528 "rx_256_to_511_byte_packets"); 1529 1530 SYSCTL_ADD_QUAD(ctx, children, 1531 OID_AUTO, "rx_512_to_1023_byte_packets", 1532 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1533 "rx_512_to_1023_byte_packets"); 1534 1535 SYSCTL_ADD_QUAD(ctx, children, 1536 OID_AUTO, "rx_1024_to_1518_byte_packets", 1537 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1538 "rx_1024_to_1518_byte_packets"); 1539 1540 SYSCTL_ADD_QUAD(ctx, children, 1541 OID_AUTO, "rx_1519_to_1522_byte_packets", 1542 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1543 "rx_1519_to_1522_byte_packets"); 1544 1545 SYSCTL_ADD_QUAD(ctx, children, 1546 OID_AUTO, "rx_1523_to_2047_byte_packets", 1547 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1548 "rx_1523_to_2047_byte_packets"); 1549 1550 SYSCTL_ADD_QUAD(ctx, children, 1551 OID_AUTO, "rx_2048_to_4095_byte_packets", 1552 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1553 "rx_2048_to_4095_byte_packets"); 1554 1555 SYSCTL_ADD_QUAD(ctx, children, 1556 OID_AUTO, "rx_4096_to_9216_byte_packets", 1557 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1558 "rx_4096_to_9216_byte_packets"); 1559 1560 SYSCTL_ADD_QUAD(ctx, children, 1561 OID_AUTO, "rx_9217_to_16383_byte_packets", 1562 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1563 "rx_9217_to_16383_byte_packets"); 1564 1565 SYSCTL_ADD_QUAD(ctx, children, 1566 OID_AUTO, "rx_crc_errors", 1567 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1568 "rx_crc_errors"); 1569 1570 SYSCTL_ADD_QUAD(ctx, children, 1571 OID_AUTO, "rx_mac_crtl_frames", 1572 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1573 "rx_mac_crtl_frames"); 1574 1575 SYSCTL_ADD_QUAD(ctx, children, 1576 OID_AUTO, "rx_pause_frames", 1577 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1578 "rx_pause_frames"); 1579 1580 SYSCTL_ADD_QUAD(ctx, children, 1581 OID_AUTO, "rx_pfc_frames", 1582 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1583 "rx_pfc_frames"); 1584 1585 SYSCTL_ADD_QUAD(ctx, children, 1586 OID_AUTO, "rx_align_errors", 1587 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1588 "rx_align_errors"); 1589 1590 SYSCTL_ADD_QUAD(ctx, children, 1591 OID_AUTO, "rx_carrier_errors", 1592 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 1593 "rx_carrier_errors"); 1594 1595 SYSCTL_ADD_QUAD(ctx, children, 1596 OID_AUTO, "rx_oversize_packets", 1597 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 1598 "rx_oversize_packets"); 1599 1600 SYSCTL_ADD_QUAD(ctx, children, 1601 OID_AUTO, "rx_jabbers", 1602 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 1603 "rx_jabbers"); 1604 1605 SYSCTL_ADD_QUAD(ctx, children, 1606 OID_AUTO, "rx_undersize_packets", 1607 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 1608 "rx_undersize_packets"); 1609 1610 SYSCTL_ADD_QUAD(ctx, children, 1611 OID_AUTO, "rx_fragments", 1612 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 1613 "rx_fragments"); 1614 1615 SYSCTL_ADD_QUAD(ctx, children, 1616 OID_AUTO, "tx_64_byte_packets", 1617 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 1618 "tx_64_byte_packets"); 1619 1620 SYSCTL_ADD_QUAD(ctx, children, 1621 OID_AUTO, "tx_65_to_127_byte_packets", 1622 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 1623 "tx_65_to_127_byte_packets"); 1624 1625 SYSCTL_ADD_QUAD(ctx, children, 1626 OID_AUTO, "tx_128_to_255_byte_packets", 1627 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 1628 "tx_128_to_255_byte_packets"); 1629 1630 SYSCTL_ADD_QUAD(ctx, children, 1631 OID_AUTO, "tx_256_to_511_byte_packets", 1632 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 1633 "tx_256_to_511_byte_packets"); 1634 1635 SYSCTL_ADD_QUAD(ctx, children, 1636 OID_AUTO, "tx_512_to_1023_byte_packets", 1637 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 1638 "tx_512_to_1023_byte_packets"); 1639 1640 SYSCTL_ADD_QUAD(ctx, children, 1641 OID_AUTO, "tx_1024_to_1518_byte_packets", 1642 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 1643 "tx_1024_to_1518_byte_packets"); 1644 1645 SYSCTL_ADD_QUAD(ctx, children, 1646 OID_AUTO, "tx_1519_to_2047_byte_packets", 1647 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 1648 "tx_1519_to_2047_byte_packets"); 1649 1650 SYSCTL_ADD_QUAD(ctx, children, 1651 OID_AUTO, "tx_2048_to_4095_byte_packets", 1652 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 1653 "tx_2048_to_4095_byte_packets"); 1654 1655 SYSCTL_ADD_QUAD(ctx, children, 1656 OID_AUTO, "tx_4096_to_9216_byte_packets", 1657 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 1658 "tx_4096_to_9216_byte_packets"); 1659 1660 SYSCTL_ADD_QUAD(ctx, children, 1661 OID_AUTO, "tx_9217_to_16383_byte_packets", 1662 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 1663 "tx_9217_to_16383_byte_packets"); 1664 1665 SYSCTL_ADD_QUAD(ctx, children, 1666 OID_AUTO, "tx_pause_frames", 1667 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 1668 "tx_pause_frames"); 1669 1670 SYSCTL_ADD_QUAD(ctx, children, 1671 OID_AUTO, "tx_pfc_frames", 1672 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 1673 "tx_pfc_frames"); 1674 1675 SYSCTL_ADD_QUAD(ctx, children, 1676 OID_AUTO, "tx_lpi_entry_count", 1677 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 1678 "tx_lpi_entry_count"); 1679 1680 SYSCTL_ADD_QUAD(ctx, children, 1681 OID_AUTO, "tx_total_collisions", 1682 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 1683 "tx_total_collisions"); 1684 1685 SYSCTL_ADD_QUAD(ctx, children, 1686 OID_AUTO, "brb_truncates", 1687 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 1688 "brb_truncates"); 1689 1690 SYSCTL_ADD_QUAD(ctx, children, 1691 OID_AUTO, "brb_discards", 1692 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 1693 "brb_discards"); 1694 1695 SYSCTL_ADD_QUAD(ctx, children, 1696 OID_AUTO, "rx_mac_bytes", 1697 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 1698 "rx_mac_bytes"); 1699 1700 SYSCTL_ADD_QUAD(ctx, children, 1701 OID_AUTO, "rx_mac_uc_packets", 1702 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 1703 "rx_mac_uc_packets"); 1704 1705 SYSCTL_ADD_QUAD(ctx, children, 1706 OID_AUTO, "rx_mac_mc_packets", 1707 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 1708 "rx_mac_mc_packets"); 1709 1710 SYSCTL_ADD_QUAD(ctx, children, 1711 OID_AUTO, "rx_mac_bc_packets", 1712 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 1713 "rx_mac_bc_packets"); 1714 1715 SYSCTL_ADD_QUAD(ctx, children, 1716 OID_AUTO, "rx_mac_frames_ok", 1717 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 1718 "rx_mac_frames_ok"); 1719 1720 SYSCTL_ADD_QUAD(ctx, children, 1721 OID_AUTO, "tx_mac_bytes", 1722 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 1723 "tx_mac_bytes"); 1724 1725 SYSCTL_ADD_QUAD(ctx, children, 1726 OID_AUTO, "tx_mac_uc_packets", 1727 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 1728 "tx_mac_uc_packets"); 1729 1730 SYSCTL_ADD_QUAD(ctx, children, 1731 OID_AUTO, "tx_mac_mc_packets", 1732 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 1733 "tx_mac_mc_packets"); 1734 1735 SYSCTL_ADD_QUAD(ctx, children, 1736 OID_AUTO, "tx_mac_bc_packets", 1737 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 1738 "tx_mac_bc_packets"); 1739 1740 SYSCTL_ADD_QUAD(ctx, children, 1741 OID_AUTO, "tx_mac_ctrl_frames", 1742 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 1743 "tx_mac_ctrl_frames"); 1744 return; 1745} 1746 1747static void 1748qlnx_add_sysctls(qlnx_host_t *ha) 1749{ 1750 device_t dev = ha->pci_dev; 1751 struct sysctl_ctx_list *ctx; 1752 struct sysctl_oid_list *children; 1753 1754 ctx = device_get_sysctl_ctx(dev); 1755 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 1756 1757 qlnx_add_fp_stats_sysctls(ha); 1758 qlnx_add_sp_stats_sysctls(ha); 1759 qlnx_add_hw_stats_sysctls(ha); 1760 1761 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 1762 CTLFLAG_RD, qlnx_ver_str, 0, 1763 "Driver Version"); 1764 1765 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 1766 CTLFLAG_RD, ha->stormfw_ver, 0, 1767 "STORM Firmware Version"); 1768 1769 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 1770 CTLFLAG_RD, ha->mfw_ver, 0, 1771 "Management Firmware Version"); 1772 1773 SYSCTL_ADD_UINT(ctx, children, 1774 OID_AUTO, "personality", CTLFLAG_RD, 1775 &ha->personality, ha->personality, 1776 "\tpersonality = 0 => Ethernet Only\n" 1777 "\tpersonality = 3 => Ethernet and RoCE\n" 1778 "\tpersonality = 4 => Ethernet and iWARP\n" 1779 "\tpersonality = 6 => Default in Shared Memory\n"); 1780 1781 ha->dbg_level = 0; 1782 1783 SYSCTL_ADD_UINT(ctx, children, 1784 OID_AUTO, "debug", CTLFLAG_RW, 1785 &ha->dbg_level, ha->dbg_level, "Debug Level"); 1786 1787 ha->dp_level = 0; 1788 SYSCTL_ADD_UINT(ctx, children, 1789 OID_AUTO, "dp_level", CTLFLAG_RW, 1790 &ha->dp_level, ha->dp_level, "DP Level"); 1791 1792 ha->dbg_trace_lro_cnt = 0; 1793 SYSCTL_ADD_UINT(ctx, children, 1794 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 1795 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 1796 "Trace LRO Counts"); 1797 1798 ha->dbg_trace_tso_pkt_len = 0; 1799 SYSCTL_ADD_UINT(ctx, children, 1800 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 1801 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 1802 "Trace TSO packet lengths"); 1803 1804 ha->dp_module = 0; 1805 SYSCTL_ADD_UINT(ctx, children, 1806 OID_AUTO, "dp_module", CTLFLAG_RW, 1807 &ha->dp_module, ha->dp_module, "DP Module"); 1808 1809 ha->err_inject = 0; 1810 1811 SYSCTL_ADD_UINT(ctx, children, 1812 OID_AUTO, "err_inject", CTLFLAG_RW, 1813 &ha->err_inject, ha->err_inject, "Error Inject"); 1814 1815 ha->storm_stats_enable = 0; 1816 1817 SYSCTL_ADD_UINT(ctx, children, 1818 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 1819 &ha->storm_stats_enable, ha->storm_stats_enable, 1820 "Enable Storm Statistics Gathering"); 1821 1822 ha->storm_stats_index = 0; 1823 1824 SYSCTL_ADD_UINT(ctx, children, 1825 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 1826 &ha->storm_stats_index, ha->storm_stats_index, 1827 "Enable Storm Statistics Gathering Current Index"); 1828 1829 ha->grcdump_taken = 0; 1830 SYSCTL_ADD_UINT(ctx, children, 1831 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 1832 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken"); 1833 1834 ha->idle_chk_taken = 0; 1835 SYSCTL_ADD_UINT(ctx, children, 1836 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 1837 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken"); 1838 1839 SYSCTL_ADD_UINT(ctx, children, 1840 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 1841 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 1842 "rx_coalesce_usecs"); 1843 1844 SYSCTL_ADD_UINT(ctx, children, 1845 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 1846 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 1847 "tx_coalesce_usecs"); 1848 1849 ha->rx_pkt_threshold = 128; 1850 SYSCTL_ADD_UINT(ctx, children, 1851 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 1852 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 1853 "No. of Rx Pkts to process at a time"); 1854 1855 ha->rx_jumbo_buf_eq_mtu = 0; 1856 SYSCTL_ADD_UINT(ctx, children, 1857 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 1858 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 1859 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 1860 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 1861 1862 SYSCTL_ADD_PROC(ctx, children, 1863 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW, 1864 (void *)ha, 0, 1865 qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 1866 1867 SYSCTL_ADD_PROC(ctx, children, 1868 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1869 (void *)ha, 0, 1870 qlnx_set_rx_coalesce, "I", 1871 "rx interrupt coalesce period microseconds"); 1872 1873 SYSCTL_ADD_PROC(ctx, children, 1874 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1875 (void *)ha, 0, 1876 qlnx_set_tx_coalesce, "I", 1877 "tx interrupt coalesce period microseconds"); 1878 1879 SYSCTL_ADD_QUAD(ctx, children, 1880 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 1881 &ha->err_illegal_intr, "err_illegal_intr"); 1882 1883 SYSCTL_ADD_QUAD(ctx, children, 1884 OID_AUTO, "err_fp_null", CTLFLAG_RD, 1885 &ha->err_fp_null, "err_fp_null"); 1886 1887 SYSCTL_ADD_QUAD(ctx, children, 1888 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 1889 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 1890 return; 1891} 1892 1893 1894 1895/***************************************************************************** 1896 * Operating System Network Interface Functions 1897 *****************************************************************************/ 1898 1899static void 1900qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 1901{ 1902 uint16_t device_id; 1903 struct ifnet *ifp; 1904 1905 ifp = ha->ifp = if_alloc(IFT_ETHER); 1906 1907 if (ifp == NULL) 1908 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 1909 1910 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1911 1912 device_id = pci_get_device(ha->pci_dev); 1913 1914#if __FreeBSD_version >= 1000000 1915 1916 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 1917 ifp->if_baudrate = IF_Gbps(40); 1918 else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) 1919 ifp->if_baudrate = IF_Gbps(25); 1920 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 1921 ifp->if_baudrate = IF_Gbps(50); 1922 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 1923 ifp->if_baudrate = IF_Gbps(100); 1924 1925 ifp->if_capabilities = IFCAP_LINKSTATE; 1926#else 1927 ifp->if_mtu = ETHERMTU; 1928 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 1929 1930#endif /* #if __FreeBSD_version >= 1000000 */ 1931 1932 ifp->if_init = qlnx_init; 1933 ifp->if_softc = ha; 1934 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1935 ifp->if_ioctl = qlnx_ioctl; 1936 ifp->if_transmit = qlnx_transmit; 1937 ifp->if_qflush = qlnx_qflush; 1938 1939 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 1940 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 1941 IFQ_SET_READY(&ifp->if_snd); 1942 1943#if __FreeBSD_version >= 1100036 1944 if_setgetcounterfn(ifp, qlnx_get_counter); 1945#endif 1946 1947 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1948 1949 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 1950 ether_ifattach(ifp, ha->primary_mac); 1951 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 1952 1953 ifp->if_capabilities = IFCAP_HWCSUM; 1954 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1955 1956 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1957 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1958 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 1959 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1960 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 1961 ifp->if_capabilities |= IFCAP_TSO4; 1962 ifp->if_capabilities |= IFCAP_TSO6; 1963 ifp->if_capabilities |= IFCAP_LRO; 1964 1965 ifp->if_capenable = ifp->if_capabilities; 1966 1967 ifp->if_hwassist = CSUM_IP; 1968 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 1969 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 1970 ifp->if_hwassist |= CSUM_TSO; 1971 1972 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1973 1974 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 1975 qlnx_media_status); 1976 1977 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 1978 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 1979 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 1980 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 1981 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) { 1982 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 1983 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 1984 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 1985 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 1986 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 1987 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 1988 ifmedia_add(&ha->media, 1989 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 1990 ifmedia_add(&ha->media, 1991 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 1992 ifmedia_add(&ha->media, 1993 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 1994 } 1995 1996 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 1997 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 1998 1999 2000 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2001 2002 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 2003 2004 return; 2005} 2006 2007static void 2008qlnx_init_locked(qlnx_host_t *ha) 2009{ 2010 struct ifnet *ifp = ha->ifp; 2011 2012 qlnx_stop(ha); 2013 2014 if (qlnx_load(ha) == 0) { 2015 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2016 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2017 } 2018 2019 return; 2020} 2021 2022static void 2023qlnx_init(void *arg) 2024{ 2025 qlnx_host_t *ha; 2026 2027 ha = (qlnx_host_t *)arg; 2028 2029 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2030 2031 QLNX_LOCK(ha); 2032 qlnx_init_locked(ha); 2033 QLNX_UNLOCK(ha); 2034 2035 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 2036 2037 return; 2038} 2039 2040static int 2041qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2042{ 2043 struct ecore_filter_mcast *mcast; 2044 struct ecore_dev *cdev; 2045 int rc; 2046 2047 cdev = &ha->cdev; 2048 2049 mcast = &ha->ecore_mcast; 2050 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2051 2052 if (add_mac) 2053 mcast->opcode = ECORE_FILTER_ADD; 2054 else 2055 mcast->opcode = ECORE_FILTER_REMOVE; 2056 2057 mcast->num_mc_addrs = 1; 2058 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2059 2060 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2061 2062 return (rc); 2063} 2064 2065static int 2066qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2067{ 2068 int i; 2069 2070 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2071 2072 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2073 return 0; /* its been already added */ 2074 } 2075 2076 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2077 2078 if ((ha->mcast[i].addr[0] == 0) && 2079 (ha->mcast[i].addr[1] == 0) && 2080 (ha->mcast[i].addr[2] == 0) && 2081 (ha->mcast[i].addr[3] == 0) && 2082 (ha->mcast[i].addr[4] == 0) && 2083 (ha->mcast[i].addr[5] == 0)) { 2084 2085 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2086 return (-1); 2087 2088 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2089 ha->nmcast++; 2090 2091 return 0; 2092 } 2093 } 2094 return 0; 2095} 2096 2097static int 2098qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2099{ 2100 int i; 2101 2102 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2103 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2104 2105 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2106 return (-1); 2107 2108 ha->mcast[i].addr[0] = 0; 2109 ha->mcast[i].addr[1] = 0; 2110 ha->mcast[i].addr[2] = 0; 2111 ha->mcast[i].addr[3] = 0; 2112 ha->mcast[i].addr[4] = 0; 2113 ha->mcast[i].addr[5] = 0; 2114 2115 ha->nmcast--; 2116 2117 return 0; 2118 } 2119 } 2120 return 0; 2121} 2122 2123/* 2124 * Name: qls_hw_set_multi 2125 * Function: Sets the Multicast Addresses provided the host O.S into the 2126 * hardware (for the given interface) 2127 */ 2128static void 2129qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2130 uint32_t add_mac) 2131{ 2132 int i; 2133 2134 for (i = 0; i < mcnt; i++) { 2135 if (add_mac) { 2136 if (qlnx_hw_add_mcast(ha, mta)) 2137 break; 2138 } else { 2139 if (qlnx_hw_del_mcast(ha, mta)) 2140 break; 2141 } 2142 2143 mta += ETHER_HDR_LEN; 2144 } 2145 return; 2146} 2147 2148 2149#define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN) 2150static int 2151qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2152{ 2153 uint8_t mta[QLNX_MCAST_ADDRS_SIZE]; 2154 struct ifmultiaddr *ifma; 2155 int mcnt = 0; 2156 struct ifnet *ifp = ha->ifp; 2157 int ret = 0; 2158 2159 if_maddr_rlock(ifp); 2160 2161 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2162 2163 if (ifma->ifma_addr->sa_family != AF_LINK) 2164 continue; 2165 2166 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2167 break; 2168 2169 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2170 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2171 2172 mcnt++; 2173 } 2174 2175 if_maddr_runlock(ifp); 2176 2177 QLNX_LOCK(ha); 2178 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2179 QLNX_UNLOCK(ha); 2180 2181 return (ret); 2182} 2183 2184static int 2185qlnx_set_promisc(qlnx_host_t *ha) 2186{ 2187 int rc = 0; 2188 uint8_t filter; 2189 2190 filter = ha->filter; 2191 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2192 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2193 2194 rc = qlnx_set_rx_accept_filter(ha, filter); 2195 return (rc); 2196} 2197 2198static int 2199qlnx_set_allmulti(qlnx_host_t *ha) 2200{ 2201 int rc = 0; 2202 uint8_t filter; 2203 2204 filter = ha->filter; 2205 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2206 rc = qlnx_set_rx_accept_filter(ha, filter); 2207 2208 return (rc); 2209} 2210 2211 2212static int 2213qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2214{ 2215 int ret = 0, mask; 2216 struct ifreq *ifr = (struct ifreq *)data; 2217 struct ifaddr *ifa = (struct ifaddr *)data; 2218 qlnx_host_t *ha; 2219 2220 ha = (qlnx_host_t *)ifp->if_softc; 2221 2222 switch (cmd) { 2223 case SIOCSIFADDR: 2224 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 2225 __func__, cmd)); 2226 2227 if (ifa->ifa_addr->sa_family == AF_INET) { 2228 ifp->if_flags |= IFF_UP; 2229 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2230 QLNX_LOCK(ha); 2231 qlnx_init_locked(ha); 2232 QLNX_UNLOCK(ha); 2233 } 2234 QL_DPRINT4(ha, (ha->pci_dev, 2235 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2236 __func__, cmd, 2237 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 2238 2239 arp_ifinit(ifp, ifa); 2240 } else { 2241 ether_ioctl(ifp, cmd, data); 2242 } 2243 break; 2244 2245 case SIOCSIFMTU: 2246 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 2247 __func__, cmd)); 2248 2249 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2250 ret = EINVAL; 2251 } else { 2252 QLNX_LOCK(ha); 2253 ifp->if_mtu = ifr->ifr_mtu; 2254 ha->max_frame_size = 2255 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2256 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2257 qlnx_init_locked(ha); 2258 } 2259 2260 QLNX_UNLOCK(ha); 2261 } 2262 2263 break; 2264 2265 case SIOCSIFFLAGS: 2266 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 2267 __func__, cmd)); 2268 2269 QLNX_LOCK(ha); 2270 2271 if (ifp->if_flags & IFF_UP) { 2272 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2273 if ((ifp->if_flags ^ ha->if_flags) & 2274 IFF_PROMISC) { 2275 ret = qlnx_set_promisc(ha); 2276 } else if ((ifp->if_flags ^ ha->if_flags) & 2277 IFF_ALLMULTI) { 2278 ret = qlnx_set_allmulti(ha); 2279 } 2280 } else { 2281 ha->max_frame_size = ifp->if_mtu + 2282 ETHER_HDR_LEN + ETHER_CRC_LEN; 2283 qlnx_init_locked(ha); 2284 } 2285 } else { 2286 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2287 qlnx_stop(ha); 2288 ha->if_flags = ifp->if_flags; 2289 } 2290 2291 QLNX_UNLOCK(ha); 2292 break; 2293 2294 case SIOCADDMULTI: 2295 QL_DPRINT4(ha, (ha->pci_dev, 2296 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 2297 2298 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2299 if (qlnx_set_multi(ha, 1)) 2300 ret = EINVAL; 2301 } 2302 break; 2303 2304 case SIOCDELMULTI: 2305 QL_DPRINT4(ha, (ha->pci_dev, 2306 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 2307 2308 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2309 if (qlnx_set_multi(ha, 0)) 2310 ret = EINVAL; 2311 } 2312 break; 2313 2314 case SIOCSIFMEDIA: 2315 case SIOCGIFMEDIA: 2316 QL_DPRINT4(ha, (ha->pci_dev, 2317 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 2318 __func__, cmd)); 2319 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2320 break; 2321 2322 case SIOCSIFCAP: 2323 2324 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2325 2326 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 2327 __func__, cmd)); 2328 2329 if (mask & IFCAP_HWCSUM) 2330 ifp->if_capenable ^= IFCAP_HWCSUM; 2331 if (mask & IFCAP_TSO4) 2332 ifp->if_capenable ^= IFCAP_TSO4; 2333 if (mask & IFCAP_TSO6) 2334 ifp->if_capenable ^= IFCAP_TSO6; 2335 if (mask & IFCAP_VLAN_HWTAGGING) 2336 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2337 if (mask & IFCAP_VLAN_HWTSO) 2338 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2339 if (mask & IFCAP_LRO) 2340 ifp->if_capenable ^= IFCAP_LRO; 2341 2342 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 2343 qlnx_init(ha); 2344 2345 VLAN_CAPABILITIES(ifp); 2346 break; 2347 2348#if (__FreeBSD_version >= 1100101) 2349 2350 case SIOCGI2C: 2351 { 2352 struct ifi2creq i2c; 2353 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2354 struct ecore_ptt *p_ptt; 2355 2356 ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 2357 2358 if (ret) 2359 break; 2360 2361 if ((i2c.len > sizeof (i2c.data)) || 2362 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2363 ret = EINVAL; 2364 break; 2365 } 2366 2367 p_ptt = ecore_ptt_acquire(p_hwfn); 2368 2369 if (!p_ptt) { 2370 QL_DPRINT1(ha, (ha->pci_dev, "%s :" 2371 " ecore_ptt_acquire failed\n", __func__)); 2372 ret = -1; 2373 break; 2374 } 2375 2376 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2377 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2378 i2c.len, &i2c.data[0]); 2379 2380 ecore_ptt_release(p_hwfn, p_ptt); 2381 2382 if (ret) { 2383 ret = -1; 2384 break; 2385 } 2386 2387 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 2388 2389 QL_DPRINT8(ha, (ha->pci_dev, "SIOCGI2C copyout ret = %d" 2390 " len = %d addr = 0x%02x offset = 0x%04x" 2391 " data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" 2392 " 0x%02x 0x%02x 0x%02x\n", 2393 ret, i2c.len, i2c.dev_addr, i2c.offset, 2394 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2395 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7])); 2396 break; 2397 } 2398#endif /* #if (__FreeBSD_version >= 1100101) */ 2399 2400 default: 2401 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 2402 __func__, cmd)); 2403 ret = ether_ioctl(ifp, cmd, data); 2404 break; 2405 } 2406 2407 return (ret); 2408} 2409 2410static int 2411qlnx_media_change(struct ifnet *ifp) 2412{ 2413 qlnx_host_t *ha; 2414 struct ifmedia *ifm; 2415 int ret = 0; 2416 2417 ha = (qlnx_host_t *)ifp->if_softc; 2418 2419 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2420 2421 ifm = &ha->media; 2422 2423 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2424 ret = EINVAL; 2425 2426 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 2427 2428 return (ret); 2429} 2430 2431static void 2432qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2433{ 2434 qlnx_host_t *ha; 2435 2436 ha = (qlnx_host_t *)ifp->if_softc; 2437 2438 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2439 2440 ifmr->ifm_status = IFM_AVALID; 2441 ifmr->ifm_active = IFM_ETHER; 2442 2443 if (ha->link_up) { 2444 ifmr->ifm_status |= IFM_ACTIVE; 2445 ifmr->ifm_active |= 2446 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2447 2448 if (ha->if_link.link_partner_caps & 2449 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2450 ifmr->ifm_active |= 2451 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2452 } 2453 2454 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__, 2455 (ha->link_up ? "link_up" : "link_down"))); 2456 2457 return; 2458} 2459 2460 2461static void 2462qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2463 struct qlnx_tx_queue *txq) 2464{ 2465 u16 idx; 2466 struct mbuf *mp; 2467 bus_dmamap_t map; 2468 int i; 2469 struct eth_tx_bd *tx_data_bd; 2470 struct eth_tx_1st_bd *first_bd; 2471 int nbds = 0; 2472 2473 idx = txq->sw_tx_cons; 2474 mp = txq->sw_tx_ring[idx].mp; 2475 map = txq->sw_tx_ring[idx].map; 2476 2477 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2478 2479 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2480 2481 QL_DPRINT1(ha, (ha->pci_dev, "%s: (mp == NULL) " 2482 " tx_idx = 0x%x" 2483 " ecore_prod_idx = 0x%x" 2484 " ecore_cons_idx = 0x%x" 2485 " hw_bd_cons = 0x%x" 2486 " txq_db_last = 0x%x" 2487 " elem_left = 0x%x\n", 2488 __func__, 2489 fp->rss_id, 2490 ecore_chain_get_prod_idx(&txq->tx_pbl), 2491 ecore_chain_get_cons_idx(&txq->tx_pbl), 2492 le16toh(*txq->hw_cons_ptr), 2493 txq->tx_db.raw, 2494 ecore_chain_get_elem_left(&txq->tx_pbl))); 2495 2496 fp->err_tx_free_pkt_null++; 2497 2498 //DEBUG 2499 qlnx_trigger_dump(ha); 2500 2501 return; 2502 } else { 2503 2504 QLNX_INC_OPACKETS((ha->ifp)); 2505 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2506 2507 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2508 bus_dmamap_unload(ha->tx_tag, map); 2509 2510 fp->tx_pkts_freed++; 2511 fp->tx_pkts_completed++; 2512 2513 m_freem(mp); 2514 } 2515 2516 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2517 nbds = first_bd->data.nbds; 2518 2519// BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2520 2521 for (i = 1; i < nbds; i++) { 2522 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 2523// BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2524 } 2525 txq->sw_tx_ring[idx].flags = 0; 2526 txq->sw_tx_ring[idx].mp = NULL; 2527 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2528 2529 return; 2530} 2531 2532static void 2533qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2534 struct qlnx_tx_queue *txq) 2535{ 2536 u16 hw_bd_cons; 2537 u16 ecore_cons_idx; 2538 uint16_t diff; 2539 2540 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2541 2542 while (hw_bd_cons != 2543 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2544 2545 if (hw_bd_cons < ecore_cons_idx) { 2546 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 2547 } else { 2548 diff = hw_bd_cons - ecore_cons_idx; 2549 } 2550 if ((diff > TX_RING_SIZE) || 2551 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2552 2553 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2554 2555 QL_DPRINT1(ha, (ha->pci_dev, "%s: (diff = 0x%x) " 2556 " tx_idx = 0x%x" 2557 " ecore_prod_idx = 0x%x" 2558 " ecore_cons_idx = 0x%x" 2559 " hw_bd_cons = 0x%x" 2560 " txq_db_last = 0x%x" 2561 " elem_left = 0x%x\n", 2562 __func__, diff, 2563 fp->rss_id, 2564 ecore_chain_get_prod_idx(&txq->tx_pbl), 2565 ecore_chain_get_cons_idx(&txq->tx_pbl), 2566 le16toh(*txq->hw_cons_ptr), 2567 txq->tx_db.raw, 2568 ecore_chain_get_elem_left(&txq->tx_pbl))); 2569 2570 fp->err_tx_cons_idx_conflict++; 2571 2572 //DEBUG 2573 qlnx_trigger_dump(ha); 2574 } 2575 2576 qlnx_free_tx_pkt(ha, fp, txq); 2577 2578 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2579 } 2580 return; 2581} 2582 2583static int 2584qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 2585{ 2586 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 2587 struct qlnx_fastpath *fp; 2588 int rss_id = 0, ret = 0; 2589 2590 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2591 2592#if __FreeBSD_version >= 1100000 2593 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 2594#else 2595 if (mp->m_flags & M_FLOWID) 2596#endif 2597 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 2598 ha->num_rss; 2599 2600 fp = &ha->fp_array[rss_id]; 2601 2602 if (fp->tx_br == NULL) { 2603 ret = EINVAL; 2604 goto qlnx_transmit_exit; 2605 } 2606 2607 if (mp != NULL) { 2608 ret = drbr_enqueue(ifp, fp->tx_br, mp); 2609 } 2610 2611 if (fp->fp_taskqueue != NULL) 2612 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 2613 2614 ret = 0; 2615 2616qlnx_transmit_exit: 2617 2618 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 2619 return ret; 2620} 2621 2622static void 2623qlnx_qflush(struct ifnet *ifp) 2624{ 2625 int rss_id; 2626 struct qlnx_fastpath *fp; 2627 struct mbuf *mp; 2628 qlnx_host_t *ha; 2629 2630 ha = (qlnx_host_t *)ifp->if_softc; 2631 2632 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2633 2634 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 2635 2636 fp = &ha->fp_array[rss_id]; 2637 2638 if (fp == NULL) 2639 continue; 2640 2641 if (fp->tx_br) { 2642 mtx_lock(&fp->tx_mtx); 2643 2644 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 2645 fp->tx_pkts_freed++; 2646 m_freem(mp); 2647 } 2648 mtx_unlock(&fp->tx_mtx); 2649 } 2650 } 2651 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 2652 2653 return; 2654} 2655 2656static void 2657qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 2658{ 2659 struct ecore_dev *cdev; 2660 uint32_t offset; 2661 2662 cdev = &ha->cdev; 2663 2664 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells); 2665 2666 bus_write_4(ha->pci_dbells, offset, value); 2667 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 2668 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 2669 2670 return; 2671} 2672 2673static uint32_t 2674qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 2675{ 2676 struct ether_vlan_header *eh = NULL; 2677 struct ip *ip = NULL; 2678 struct ip6_hdr *ip6 = NULL; 2679 struct tcphdr *th = NULL; 2680 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 2681 uint16_t etype = 0; 2682 device_t dev; 2683 uint8_t buf[sizeof(struct ip6_hdr)]; 2684 2685 dev = ha->pci_dev; 2686 2687 eh = mtod(mp, struct ether_vlan_header *); 2688 2689 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2690 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2691 etype = ntohs(eh->evl_proto); 2692 } else { 2693 ehdrlen = ETHER_HDR_LEN; 2694 etype = ntohs(eh->evl_encap_proto); 2695 } 2696 2697 switch (etype) { 2698 2699 case ETHERTYPE_IP: 2700 ip = (struct ip *)(mp->m_data + ehdrlen); 2701 2702 ip_hlen = sizeof (struct ip); 2703 2704 if (mp->m_len < (ehdrlen + ip_hlen)) { 2705 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 2706 ip = (struct ip *)buf; 2707 } 2708 2709 th = (struct tcphdr *)(ip + 1); 2710 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2711 break; 2712 2713 case ETHERTYPE_IPV6: 2714 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2715 2716 ip_hlen = sizeof(struct ip6_hdr); 2717 2718 if (mp->m_len < (ehdrlen + ip_hlen)) { 2719 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 2720 buf); 2721 ip6 = (struct ip6_hdr *)buf; 2722 } 2723 th = (struct tcphdr *)(ip6 + 1); 2724 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2725 break; 2726 2727 default: 2728 break; 2729 } 2730 2731 return (offset); 2732} 2733 2734static __inline int 2735qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 2736 uint32_t offset) 2737{ 2738 int i; 2739 uint32_t sum, nbds_in_hdr = 1; 2740 bus_dma_segment_t *t_segs = segs; 2741 2742 /* count the number of segments spanned by TCP header */ 2743 2744 i = 0; 2745 while ((i < nsegs) && (offset > t_segs->ds_len)) { 2746 nbds_in_hdr++; 2747 offset = offset - t_segs->ds_len; 2748 t_segs++; 2749 i++; 2750 } 2751 2752 while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) { 2753 2754 sum = 0; 2755 2756 for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){ 2757 sum += segs->ds_len; 2758 segs++; 2759 } 2760 2761 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 2762 fp->tx_lso_wnd_min_len++; 2763 return (-1); 2764 } 2765 2766 nsegs -= QLNX_MAX_SEGMENTS_NON_TSO; 2767 } 2768 2769 return (0); 2770} 2771 2772static int 2773qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 2774{ 2775 bus_dma_segment_t *segs; 2776 bus_dmamap_t map = 0; 2777 uint32_t nsegs = 0; 2778 int ret = -1; 2779 struct mbuf *m_head = *m_headp; 2780 uint16_t idx = 0; 2781 uint16_t elem_left; 2782 2783 uint8_t nbd = 0; 2784 struct qlnx_tx_queue *txq; 2785 2786 struct eth_tx_1st_bd *first_bd; 2787 struct eth_tx_2nd_bd *second_bd; 2788 struct eth_tx_3rd_bd *third_bd; 2789 struct eth_tx_bd *tx_data_bd; 2790 2791 int seg_idx = 0; 2792 uint32_t nbds_in_hdr = 0; 2793 uint32_t offset = 0; 2794 2795 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2796 2797 if (!ha->link_up) 2798 return (-1); 2799 2800 first_bd = NULL; 2801 second_bd = NULL; 2802 third_bd = NULL; 2803 tx_data_bd = NULL; 2804 2805 txq = fp->txq[0]; 2806 2807 if (fp->tx_ring_full) { 2808 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 2809 2810 if (elem_left < (TX_RING_SIZE >> 4)) 2811 return (-1); 2812 else 2813 fp->tx_ring_full = 0; 2814 } 2815 2816 idx = txq->sw_tx_prod; 2817 2818 map = txq->sw_tx_ring[idx].map; 2819 segs = txq->segs; 2820 2821 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 2822 BUS_DMA_NOWAIT); 2823 2824 if (ha->dbg_trace_tso_pkt_len) { 2825 if (!fp->tx_tso_min_pkt_len) { 2826 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2827 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2828 } else { 2829 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 2830 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2831 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 2832 fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len; 2833 } 2834 } 2835 2836 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 2837 offset = qlnx_tcp_offset(ha, m_head); 2838 2839 if ((ret == EFBIG) || 2840 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 2841 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 2842 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 2843 qlnx_tso_check(fp, segs, nsegs, offset))))) { 2844 2845 struct mbuf *m; 2846 2847 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 2848 m_head->m_pkthdr.len)); 2849 2850 fp->tx_defrag++; 2851 2852 m = m_defrag(m_head, M_NOWAIT); 2853 if (m == NULL) { 2854 fp->err_tx_defrag++; 2855 fp->tx_pkts_freed++; 2856 m_freem(m_head); 2857 *m_headp = NULL; 2858 QL_DPRINT1(ha, (ha->pci_dev, 2859 "%s: m_defrag() = NULL [%d]\n", 2860 __func__, ret)); 2861 return (ENOBUFS); 2862 } 2863 2864 m_head = m; 2865 *m_headp = m_head; 2866 2867 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 2868 segs, &nsegs, BUS_DMA_NOWAIT))) { 2869 2870 fp->err_tx_defrag_dmamap_load++; 2871 2872 QL_DPRINT1(ha, (ha->pci_dev, 2873 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 2874 __func__, ret, m_head->m_pkthdr.len)); 2875 2876 fp->tx_pkts_freed++; 2877 m_freem(m_head); 2878 *m_headp = NULL; 2879 2880 return (ret); 2881 } 2882 2883 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 2884 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 2885 2886 fp->err_tx_non_tso_max_seg++; 2887 2888 QL_DPRINT1(ha, (ha->pci_dev, 2889 "%s: (%d) nsegs too many for non-TSO[%d, %d]\n", 2890 __func__, ret, nsegs, m_head->m_pkthdr.len)); 2891 2892 fp->tx_pkts_freed++; 2893 m_freem(m_head); 2894 *m_headp = NULL; 2895 2896 return (ret); 2897 } 2898 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 2899 offset = qlnx_tcp_offset(ha, m_head); 2900 2901 } else if (ret) { 2902 2903 fp->err_tx_dmamap_load++; 2904 2905 QL_DPRINT1(ha, (ha->pci_dev, 2906 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 2907 __func__, ret, m_head->m_pkthdr.len)); 2908 2909 fp->tx_pkts_freed++; 2910 m_freem(m_head); 2911 *m_headp = NULL; 2912 return (ret); 2913 } 2914 2915 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 2916 2917 if (ha->dbg_trace_tso_pkt_len) { 2918 if (nsegs < QLNX_FP_MAX_SEGS) 2919 fp->tx_pkts[(nsegs - 1)]++; 2920 else 2921 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 2922 } 2923 2924 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 2925 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 2926 2927 QL_DPRINT1(ha, (ha->pci_dev, "%s: (%d, 0x%x) insuffient BDs" 2928 "in chain[%d] trying to free packets\n", 2929 __func__, nsegs, elem_left, fp->rss_id)); 2930 2931 fp->tx_nsegs_gt_elem_left++; 2932 2933 (void)qlnx_tx_int(ha, fp, txq); 2934 2935 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 2936 ecore_chain_get_elem_left(&txq->tx_pbl))) { 2937 2938 QL_DPRINT1(ha, (ha->pci_dev, 2939 "%s: (%d, 0x%x) insuffient BDs in chain[%d]\n", 2940 __func__, nsegs, elem_left, fp->rss_id)); 2941 2942 fp->err_tx_nsegs_gt_elem_left++; 2943 fp->tx_ring_full = 1; 2944 ha->storm_stats_enable = 1; 2945 return (ENOBUFS); 2946 } 2947 } 2948 2949 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 2950 2951 txq->sw_tx_ring[idx].mp = m_head; 2952 2953 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 2954 2955 memset(first_bd, 0, sizeof(*first_bd)); 2956 2957 first_bd->data.bd_flags.bitfields = 2958 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 2959 2960 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 2961 2962 nbd++; 2963 2964 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 2965 first_bd->data.bd_flags.bitfields |= 2966 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 2967 } 2968 2969 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) { 2970 first_bd->data.bd_flags.bitfields |= 2971 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 2972 } 2973 2974 if (m_head->m_flags & M_VLANTAG) { 2975 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 2976 first_bd->data.bd_flags.bitfields |= 2977 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 2978 } 2979 2980 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 2981 2982 first_bd->data.bd_flags.bitfields |= 2983 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 2984 first_bd->data.bd_flags.bitfields |= 2985 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 2986 2987 nbds_in_hdr = 1; 2988 2989 if (offset == segs->ds_len) { 2990 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 2991 segs++; 2992 seg_idx++; 2993 2994 second_bd = (struct eth_tx_2nd_bd *) 2995 ecore_chain_produce(&txq->tx_pbl); 2996 memset(second_bd, 0, sizeof(*second_bd)); 2997 nbd++; 2998 2999 if (seg_idx < nsegs) { 3000 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3001 (segs->ds_addr), (segs->ds_len)); 3002 segs++; 3003 seg_idx++; 3004 } 3005 3006 third_bd = (struct eth_tx_3rd_bd *) 3007 ecore_chain_produce(&txq->tx_pbl); 3008 memset(third_bd, 0, sizeof(*third_bd)); 3009 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3010 third_bd->data.bitfields |= 3011 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3012 nbd++; 3013 3014 if (seg_idx < nsegs) { 3015 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3016 (segs->ds_addr), (segs->ds_len)); 3017 segs++; 3018 seg_idx++; 3019 } 3020 3021 for (; seg_idx < nsegs; seg_idx++) { 3022 tx_data_bd = (struct eth_tx_bd *) 3023 ecore_chain_produce(&txq->tx_pbl); 3024 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3025 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3026 segs->ds_addr,\ 3027 segs->ds_len); 3028 segs++; 3029 nbd++; 3030 } 3031 3032 } else if (offset < segs->ds_len) { 3033 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3034 3035 second_bd = (struct eth_tx_2nd_bd *) 3036 ecore_chain_produce(&txq->tx_pbl); 3037 memset(second_bd, 0, sizeof(*second_bd)); 3038 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3039 (segs->ds_addr + offset),\ 3040 (segs->ds_len - offset)); 3041 nbd++; 3042 segs++; 3043 3044 third_bd = (struct eth_tx_3rd_bd *) 3045 ecore_chain_produce(&txq->tx_pbl); 3046 memset(third_bd, 0, sizeof(*third_bd)); 3047 3048 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3049 segs->ds_addr,\ 3050 segs->ds_len); 3051 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3052 third_bd->data.bitfields |= 3053 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3054 segs++; 3055 nbd++; 3056 3057 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3058 tx_data_bd = (struct eth_tx_bd *) 3059 ecore_chain_produce(&txq->tx_pbl); 3060 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3061 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3062 segs->ds_addr,\ 3063 segs->ds_len); 3064 segs++; 3065 nbd++; 3066 } 3067 3068 } else { 3069 offset = offset - segs->ds_len; 3070 segs++; 3071 3072 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3073 3074 if (offset) 3075 nbds_in_hdr++; 3076 3077 tx_data_bd = (struct eth_tx_bd *) 3078 ecore_chain_produce(&txq->tx_pbl); 3079 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3080 3081 if (second_bd == NULL) { 3082 second_bd = (struct eth_tx_2nd_bd *) 3083 tx_data_bd; 3084 } else if (third_bd == NULL) { 3085 third_bd = (struct eth_tx_3rd_bd *) 3086 tx_data_bd; 3087 } 3088 3089 if (offset && (offset < segs->ds_len)) { 3090 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3091 segs->ds_addr, offset); 3092 3093 tx_data_bd = (struct eth_tx_bd *) 3094 ecore_chain_produce(&txq->tx_pbl); 3095 3096 memset(tx_data_bd, 0, 3097 sizeof(*tx_data_bd)); 3098 3099 if (second_bd == NULL) { 3100 second_bd = 3101 (struct eth_tx_2nd_bd *)tx_data_bd; 3102 } else if (third_bd == NULL) { 3103 third_bd = 3104 (struct eth_tx_3rd_bd *)tx_data_bd; 3105 } 3106 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3107 (segs->ds_addr + offset), \ 3108 (segs->ds_len - offset)); 3109 nbd++; 3110 offset = 0; 3111 } else { 3112 if (offset) 3113 offset = offset - segs->ds_len; 3114 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3115 segs->ds_addr, segs->ds_len); 3116 } 3117 segs++; 3118 nbd++; 3119 } 3120 3121 if (third_bd == NULL) { 3122 third_bd = (struct eth_tx_3rd_bd *) 3123 ecore_chain_produce(&txq->tx_pbl); 3124 memset(third_bd, 0, sizeof(*third_bd)); 3125 } 3126 3127 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3128 third_bd->data.bitfields |= 3129 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3130 } 3131 } else { 3132 segs++; 3133 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3134 tx_data_bd = (struct eth_tx_bd *) 3135 ecore_chain_produce(&txq->tx_pbl); 3136 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3137 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3138 segs->ds_len); 3139 segs++; 3140 nbd++; 3141 } 3142 first_bd->data.bitfields = 3143 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3144 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3145 first_bd->data.bitfields = 3146 htole16(first_bd->data.bitfields); 3147 } 3148 3149 3150 first_bd->data.nbds = nbd; 3151 3152 if (ha->dbg_trace_tso_pkt_len) { 3153 if (fp->tx_tso_max_nsegs < nsegs) 3154 fp->tx_tso_max_nsegs = nsegs; 3155 3156 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3157 fp->tx_tso_min_nsegs = nsegs; 3158 } 3159 3160 txq->sw_tx_ring[idx].nsegs = nsegs; 3161 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3162 3163 txq->tx_db.data.bd_prod = 3164 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3165 3166 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3167 3168 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 3169 return (0); 3170} 3171 3172static void 3173qlnx_stop(qlnx_host_t *ha) 3174{ 3175 struct ifnet *ifp = ha->ifp; 3176 device_t dev; 3177 int i; 3178 3179 dev = ha->pci_dev; 3180 3181 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3182 3183 /* 3184 * We simply lock and unlock each fp->tx_mtx to 3185 * propagate the if_drv_flags 3186 * state to each tx thread 3187 */ 3188 if (ha->state == QLNX_STATE_OPEN) { 3189 for (i = 0; i < ha->num_rss; i++) { 3190 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3191 3192 mtx_lock(&fp->tx_mtx); 3193 mtx_unlock(&fp->tx_mtx); 3194 3195 if (fp->fp_taskqueue != NULL) 3196 taskqueue_enqueue(fp->fp_taskqueue, 3197 &fp->fp_task); 3198 } 3199 } 3200 3201 qlnx_unload(ha); 3202 3203 return; 3204} 3205 3206static int 3207qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3208{ 3209 return(TX_RING_SIZE - 1); 3210} 3211 3212uint8_t * 3213qlnx_get_mac_addr(qlnx_host_t *ha) 3214{ 3215 struct ecore_hwfn *p_hwfn; 3216 3217 p_hwfn = &ha->cdev.hwfns[0]; 3218 return (p_hwfn->hw_info.hw_mac_addr); 3219} 3220 3221static uint32_t 3222qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3223{ 3224 uint32_t ifm_type = 0; 3225 3226 switch (if_link->media_type) { 3227 3228 case MEDIA_MODULE_FIBER: 3229 case MEDIA_UNSPECIFIED: 3230 if (if_link->speed == (100 * 1000)) 3231 ifm_type = QLNX_IFM_100G_SR4; 3232 else if (if_link->speed == (40 * 1000)) 3233 ifm_type = IFM_40G_SR4; 3234 else if (if_link->speed == (25 * 1000)) 3235 ifm_type = QLNX_IFM_25G_SR; 3236 break; 3237 3238 case MEDIA_DA_TWINAX: 3239 if (if_link->speed == (100 * 1000)) 3240 ifm_type = QLNX_IFM_100G_CR4; 3241 else if (if_link->speed == (40 * 1000)) 3242 ifm_type = IFM_40G_CR4; 3243 else if (if_link->speed == (25 * 1000)) 3244 ifm_type = QLNX_IFM_25G_CR; 3245 break; 3246 3247 default : 3248 ifm_type = IFM_UNKNOWN; 3249 break; 3250 } 3251 return (ifm_type); 3252} 3253 3254 3255 3256/***************************************************************************** 3257 * Interrupt Service Functions 3258 *****************************************************************************/ 3259 3260static int 3261qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3262 struct mbuf *mp_head, uint16_t len) 3263{ 3264 struct mbuf *mp, *mpf, *mpl; 3265 struct sw_rx_data *sw_rx_data; 3266 struct qlnx_rx_queue *rxq; 3267 uint16_t len_in_buffer; 3268 3269 rxq = fp->rxq; 3270 mpf = mpl = mp = NULL; 3271 3272 while (len) { 3273 3274 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3275 3276 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3277 mp = sw_rx_data->data; 3278 3279 if (mp == NULL) { 3280 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n", 3281 __func__)); 3282 fp->err_rx_mp_null++; 3283 rxq->sw_rx_cons = 3284 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3285 3286 if (mpf != NULL) 3287 m_freem(mpf); 3288 3289 return (-1); 3290 } 3291 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3292 BUS_DMASYNC_POSTREAD); 3293 3294 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3295 3296 QL_DPRINT1(ha, (ha->pci_dev, 3297 "%s: New buffer allocation failed, dropping" 3298 " incoming packet and reusing its buffer\n", 3299 __func__)); 3300 3301 qlnx_reuse_rx_data(rxq); 3302 fp->err_rx_alloc_errors++; 3303 3304 if (mpf != NULL) 3305 m_freem(mpf); 3306 3307 return (-1); 3308 } 3309 ecore_chain_consume(&rxq->rx_bd_ring); 3310 3311 if (len > rxq->rx_buf_size) 3312 len_in_buffer = rxq->rx_buf_size; 3313 else 3314 len_in_buffer = len; 3315 3316 len = len - len_in_buffer; 3317 3318 mp->m_flags &= ~M_PKTHDR; 3319 mp->m_next = NULL; 3320 mp->m_len = len_in_buffer; 3321 3322 if (mpf == NULL) 3323 mpf = mpl = mp; 3324 else { 3325 mpl->m_next = mp; 3326 mpl = mp; 3327 } 3328 } 3329 3330 if (mpf != NULL) 3331 mp_head->m_next = mpf; 3332 3333 return (0); 3334} 3335 3336static void 3337qlnx_tpa_start(qlnx_host_t *ha, 3338 struct qlnx_fastpath *fp, 3339 struct qlnx_rx_queue *rxq, 3340 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3341{ 3342 uint32_t agg_index; 3343 struct ifnet *ifp = ha->ifp; 3344 struct mbuf *mp; 3345 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3346 struct sw_rx_data *sw_rx_data; 3347 dma_addr_t addr; 3348 bus_dmamap_t map; 3349 struct eth_rx_bd *rx_bd; 3350 int i; 3351 device_t dev; 3352#if __FreeBSD_version >= 1100000 3353 uint8_t hash_type; 3354#endif /* #if __FreeBSD_version >= 1100000 */ 3355 3356 dev = ha->pci_dev; 3357 agg_index = cqe->tpa_agg_index; 3358 3359 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3360 "\t type = 0x%x\n" 3361 "\t bitfields = 0x%x\n" 3362 "\t seg_len = 0x%x\n" 3363 "\t pars_flags = 0x%x\n" 3364 "\t vlan_tag = 0x%x\n" 3365 "\t rss_hash = 0x%x\n" 3366 "\t len_on_first_bd = 0x%x\n" 3367 "\t placement_offset = 0x%x\n" 3368 "\t tpa_agg_index = 0x%x\n" 3369 "\t header_len = 0x%x\n" 3370 "\t ext_bd_len_list[0] = 0x%x\n" 3371 "\t ext_bd_len_list[1] = 0x%x\n" 3372 "\t ext_bd_len_list[2] = 0x%x\n" 3373 "\t ext_bd_len_list[3] = 0x%x\n" 3374 "\t ext_bd_len_list[4] = 0x%x\n", 3375 __func__, fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3376 cqe->pars_flags.flags, cqe->vlan_tag, 3377 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3378 cqe->tpa_agg_index, cqe->header_len, 3379 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3380 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3381 cqe->ext_bd_len_list[4])); 3382 3383 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3384 fp->err_rx_tpa_invalid_agg_num++; 3385 return; 3386 } 3387 3388 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3389 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3390 mp = sw_rx_data->data; 3391 3392 QL_DPRINT7(ha, (dev, "%s[%d]: mp = %p \n ", __func__, fp->rss_id, mp)); 3393 3394 if (mp == NULL) { 3395 QL_DPRINT7(ha, (dev, "%s[%d]: mp = NULL\n", __func__, 3396 fp->rss_id)); 3397 fp->err_rx_mp_null++; 3398 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3399 3400 return; 3401 } 3402 3403 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3404 3405 QL_DPRINT7(ha, (dev, "%s[%d]: CQE in CONS = %u has error," 3406 " flags = %x, dropping incoming packet\n", __func__, 3407 fp->rss_id, rxq->sw_rx_cons, 3408 le16toh(cqe->pars_flags.flags))); 3409 3410 fp->err_rx_hw_errors++; 3411 3412 qlnx_reuse_rx_data(rxq); 3413 3414 QLNX_INC_IERRORS(ifp); 3415 3416 return; 3417 } 3418 3419 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3420 3421 QL_DPRINT7(ha, (dev, "%s[%d]: New buffer allocation failed," 3422 " dropping incoming packet and reusing its buffer\n", 3423 __func__, fp->rss_id)); 3424 3425 fp->err_rx_alloc_errors++; 3426 QLNX_INC_IQDROPS(ifp); 3427 3428 /* 3429 * Load the tpa mbuf into the rx ring and save the 3430 * posted mbuf 3431 */ 3432 3433 map = sw_rx_data->map; 3434 addr = sw_rx_data->dma_addr; 3435 3436 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 3437 3438 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 3439 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 3440 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 3441 3442 rxq->tpa_info[agg_index].rx_buf.data = mp; 3443 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 3444 rxq->tpa_info[agg_index].rx_buf.map = map; 3445 3446 rx_bd = (struct eth_rx_bd *) 3447 ecore_chain_produce(&rxq->rx_bd_ring); 3448 3449 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 3450 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 3451 3452 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3453 BUS_DMASYNC_PREREAD); 3454 3455 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 3456 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3457 3458 ecore_chain_consume(&rxq->rx_bd_ring); 3459 3460 /* Now reuse any buffers posted in ext_bd_len_list */ 3461 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3462 3463 if (cqe->ext_bd_len_list[i] == 0) 3464 break; 3465 3466 qlnx_reuse_rx_data(rxq); 3467 } 3468 3469 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3470 return; 3471 } 3472 3473 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3474 3475 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state," 3476 " dropping incoming packet and reusing its buffer\n", 3477 __func__, fp->rss_id)); 3478 3479 QLNX_INC_IQDROPS(ifp); 3480 3481 /* if we already have mbuf head in aggregation free it */ 3482 if (rxq->tpa_info[agg_index].mpf) { 3483 m_freem(rxq->tpa_info[agg_index].mpf); 3484 rxq->tpa_info[agg_index].mpl = NULL; 3485 } 3486 rxq->tpa_info[agg_index].mpf = mp; 3487 rxq->tpa_info[agg_index].mpl = NULL; 3488 3489 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3490 ecore_chain_consume(&rxq->rx_bd_ring); 3491 3492 /* Now reuse any buffers posted in ext_bd_len_list */ 3493 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3494 3495 if (cqe->ext_bd_len_list[i] == 0) 3496 break; 3497 3498 qlnx_reuse_rx_data(rxq); 3499 } 3500 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3501 3502 return; 3503 } 3504 3505 /* 3506 * first process the ext_bd_len_list 3507 * if this fails then we simply drop the packet 3508 */ 3509 ecore_chain_consume(&rxq->rx_bd_ring); 3510 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3511 3512 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3513 3514 QL_DPRINT7(ha, (dev, "%s[%d]: 4\n ", __func__, fp->rss_id)); 3515 3516 if (cqe->ext_bd_len_list[i] == 0) 3517 break; 3518 3519 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3520 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3521 BUS_DMASYNC_POSTREAD); 3522 3523 mpc = sw_rx_data->data; 3524 3525 if (mpc == NULL) { 3526 QL_DPRINT7(ha, (ha->pci_dev, "%s[%d]: mpc = NULL\n", 3527 __func__, fp->rss_id)); 3528 fp->err_rx_mp_null++; 3529 if (mpf != NULL) 3530 m_freem(mpf); 3531 mpf = mpl = NULL; 3532 rxq->tpa_info[agg_index].agg_state = 3533 QLNX_AGG_STATE_ERROR; 3534 ecore_chain_consume(&rxq->rx_bd_ring); 3535 rxq->sw_rx_cons = 3536 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3537 continue; 3538 } 3539 3540 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3541 QL_DPRINT7(ha, (dev, 3542 "%s[%d]: New buffer allocation failed, dropping" 3543 " incoming packet and reusing its buffer\n", 3544 __func__, fp->rss_id)); 3545 3546 qlnx_reuse_rx_data(rxq); 3547 3548 if (mpf != NULL) 3549 m_freem(mpf); 3550 mpf = mpl = NULL; 3551 3552 rxq->tpa_info[agg_index].agg_state = 3553 QLNX_AGG_STATE_ERROR; 3554 3555 ecore_chain_consume(&rxq->rx_bd_ring); 3556 rxq->sw_rx_cons = 3557 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3558 3559 continue; 3560 } 3561 3562 mpc->m_flags &= ~M_PKTHDR; 3563 mpc->m_next = NULL; 3564 mpc->m_len = cqe->ext_bd_len_list[i]; 3565 3566 3567 if (mpf == NULL) { 3568 mpf = mpl = mpc; 3569 } else { 3570 mpl->m_len = ha->rx_buf_size; 3571 mpl->m_next = mpc; 3572 mpl = mpc; 3573 } 3574 3575 ecore_chain_consume(&rxq->rx_bd_ring); 3576 rxq->sw_rx_cons = 3577 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3578 } 3579 3580 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3581 3582 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state," 3583 " dropping incoming packet and reusing its buffer\n", 3584 __func__, fp->rss_id)); 3585 3586 QLNX_INC_IQDROPS(ifp); 3587 3588 rxq->tpa_info[agg_index].mpf = mp; 3589 rxq->tpa_info[agg_index].mpl = NULL; 3590 3591 return; 3592 } 3593 3594 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 3595 3596 if (mpf != NULL) { 3597 mp->m_len = ha->rx_buf_size; 3598 mp->m_next = mpf; 3599 rxq->tpa_info[agg_index].mpf = mp; 3600 rxq->tpa_info[agg_index].mpl = mpl; 3601 } else { 3602 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 3603 rxq->tpa_info[agg_index].mpf = mp; 3604 rxq->tpa_info[agg_index].mpl = mp; 3605 mp->m_next = NULL; 3606 } 3607 3608 mp->m_flags |= M_PKTHDR; 3609 3610 /* assign packet to this interface interface */ 3611 mp->m_pkthdr.rcvif = ifp; 3612 3613 /* assume no hardware checksum has complated */ 3614 mp->m_pkthdr.csum_flags = 0; 3615 3616 //mp->m_pkthdr.flowid = fp->rss_id; 3617 mp->m_pkthdr.flowid = cqe->rss_hash; 3618 3619#if __FreeBSD_version >= 1100000 3620 3621 hash_type = cqe->bitfields & 3622 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 3623 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 3624 3625 switch (hash_type) { 3626 3627 case RSS_HASH_TYPE_IPV4: 3628 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 3629 break; 3630 3631 case RSS_HASH_TYPE_TCP_IPV4: 3632 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 3633 break; 3634 3635 case RSS_HASH_TYPE_IPV6: 3636 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 3637 break; 3638 3639 case RSS_HASH_TYPE_TCP_IPV6: 3640 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 3641 break; 3642 3643 default: 3644 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 3645 break; 3646 } 3647 3648#else 3649 mp->m_flags |= M_FLOWID; 3650#endif 3651 3652 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 3653 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 3654 3655 mp->m_pkthdr.csum_data = 0xFFFF; 3656 3657 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 3658 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 3659 mp->m_flags |= M_VLANTAG; 3660 } 3661 3662 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 3663 3664 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n" "\tagg_state = %d\n" 3665 "\t mpf = %p mpl = %p\n", __func__, fp->rss_id, 3666 rxq->tpa_info[agg_index].agg_state, 3667 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl)); 3668 3669 return; 3670} 3671 3672static void 3673qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3674 struct qlnx_rx_queue *rxq, 3675 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 3676{ 3677 struct sw_rx_data *sw_rx_data; 3678 int i; 3679 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3680 struct mbuf *mp; 3681 uint32_t agg_index; 3682 device_t dev; 3683 3684 dev = ha->pci_dev; 3685 3686 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3687 "\t type = 0x%x\n" 3688 "\t tpa_agg_index = 0x%x\n" 3689 "\t len_list[0] = 0x%x\n" 3690 "\t len_list[1] = 0x%x\n" 3691 "\t len_list[2] = 0x%x\n" 3692 "\t len_list[3] = 0x%x\n" 3693 "\t len_list[4] = 0x%x\n" 3694 "\t len_list[5] = 0x%x\n", 3695 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index, 3696 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 3697 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5])); 3698 3699 agg_index = cqe->tpa_agg_index; 3700 3701 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3702 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id)); 3703 fp->err_rx_tpa_invalid_agg_num++; 3704 return; 3705 } 3706 3707 3708 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 3709 3710 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id)); 3711 3712 if (cqe->len_list[i] == 0) 3713 break; 3714 3715 if (rxq->tpa_info[agg_index].agg_state != 3716 QLNX_AGG_STATE_START) { 3717 qlnx_reuse_rx_data(rxq); 3718 continue; 3719 } 3720 3721 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3722 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3723 BUS_DMASYNC_POSTREAD); 3724 3725 mpc = sw_rx_data->data; 3726 3727 if (mpc == NULL) { 3728 3729 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n", 3730 __func__, fp->rss_id)); 3731 3732 fp->err_rx_mp_null++; 3733 if (mpf != NULL) 3734 m_freem(mpf); 3735 mpf = mpl = NULL; 3736 rxq->tpa_info[agg_index].agg_state = 3737 QLNX_AGG_STATE_ERROR; 3738 ecore_chain_consume(&rxq->rx_bd_ring); 3739 rxq->sw_rx_cons = 3740 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3741 continue; 3742 } 3743 3744 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3745 3746 QL_DPRINT7(ha, (dev, 3747 "%s[%d]: New buffer allocation failed, dropping" 3748 " incoming packet and reusing its buffer\n", 3749 __func__, fp->rss_id)); 3750 3751 qlnx_reuse_rx_data(rxq); 3752 3753 if (mpf != NULL) 3754 m_freem(mpf); 3755 mpf = mpl = NULL; 3756 3757 rxq->tpa_info[agg_index].agg_state = 3758 QLNX_AGG_STATE_ERROR; 3759 3760 ecore_chain_consume(&rxq->rx_bd_ring); 3761 rxq->sw_rx_cons = 3762 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3763 3764 continue; 3765 } 3766 3767 mpc->m_flags &= ~M_PKTHDR; 3768 mpc->m_next = NULL; 3769 mpc->m_len = cqe->len_list[i]; 3770 3771 3772 if (mpf == NULL) { 3773 mpf = mpl = mpc; 3774 } else { 3775 mpl->m_len = ha->rx_buf_size; 3776 mpl->m_next = mpc; 3777 mpl = mpc; 3778 } 3779 3780 ecore_chain_consume(&rxq->rx_bd_ring); 3781 rxq->sw_rx_cons = 3782 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3783 } 3784 3785 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n" "\tmpf = %p mpl = %p\n", 3786 __func__, fp->rss_id, mpf, mpl)); 3787 3788 if (mpf != NULL) { 3789 mp = rxq->tpa_info[agg_index].mpl; 3790 mp->m_len = ha->rx_buf_size; 3791 mp->m_next = mpf; 3792 rxq->tpa_info[agg_index].mpl = mpl; 3793 } 3794 3795 return; 3796} 3797 3798static int 3799qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3800 struct qlnx_rx_queue *rxq, 3801 struct eth_fast_path_rx_tpa_end_cqe *cqe) 3802{ 3803 struct sw_rx_data *sw_rx_data; 3804 int i; 3805 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3806 struct mbuf *mp; 3807 uint32_t agg_index; 3808 uint32_t len = 0; 3809 struct ifnet *ifp = ha->ifp; 3810 device_t dev; 3811 3812 dev = ha->pci_dev; 3813 3814 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3815 "\t type = 0x%x\n" 3816 "\t tpa_agg_index = 0x%x\n" 3817 "\t total_packet_len = 0x%x\n" 3818 "\t num_of_bds = 0x%x\n" 3819 "\t end_reason = 0x%x\n" 3820 "\t num_of_coalesced_segs = 0x%x\n" 3821 "\t ts_delta = 0x%x\n" 3822 "\t len_list[0] = 0x%x\n" 3823 "\t len_list[1] = 0x%x\n" 3824 "\t len_list[2] = 0x%x\n" 3825 "\t len_list[3] = 0x%x\n", 3826 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index, 3827 cqe->total_packet_len, cqe->num_of_bds, 3828 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 3829 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 3830 cqe->len_list[3])); 3831 3832 agg_index = cqe->tpa_agg_index; 3833 3834 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3835 3836 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id)); 3837 3838 fp->err_rx_tpa_invalid_agg_num++; 3839 return (0); 3840 } 3841 3842 3843 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 3844 3845 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id)); 3846 3847 if (cqe->len_list[i] == 0) 3848 break; 3849 3850 if (rxq->tpa_info[agg_index].agg_state != 3851 QLNX_AGG_STATE_START) { 3852 3853 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n ", __func__, 3854 fp->rss_id)); 3855 3856 qlnx_reuse_rx_data(rxq); 3857 continue; 3858 } 3859 3860 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3861 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3862 BUS_DMASYNC_POSTREAD); 3863 3864 mpc = sw_rx_data->data; 3865 3866 if (mpc == NULL) { 3867 3868 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n", 3869 __func__, fp->rss_id)); 3870 3871 fp->err_rx_mp_null++; 3872 if (mpf != NULL) 3873 m_freem(mpf); 3874 mpf = mpl = NULL; 3875 rxq->tpa_info[agg_index].agg_state = 3876 QLNX_AGG_STATE_ERROR; 3877 ecore_chain_consume(&rxq->rx_bd_ring); 3878 rxq->sw_rx_cons = 3879 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3880 continue; 3881 } 3882 3883 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3884 QL_DPRINT7(ha, (dev, 3885 "%s[%d]: New buffer allocation failed, dropping" 3886 " incoming packet and reusing its buffer\n", 3887 __func__, fp->rss_id)); 3888 3889 qlnx_reuse_rx_data(rxq); 3890 3891 if (mpf != NULL) 3892 m_freem(mpf); 3893 mpf = mpl = NULL; 3894 3895 rxq->tpa_info[agg_index].agg_state = 3896 QLNX_AGG_STATE_ERROR; 3897 3898 ecore_chain_consume(&rxq->rx_bd_ring); 3899 rxq->sw_rx_cons = 3900 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3901 3902 continue; 3903 } 3904 3905 mpc->m_flags &= ~M_PKTHDR; 3906 mpc->m_next = NULL; 3907 mpc->m_len = cqe->len_list[i]; 3908 3909 3910 if (mpf == NULL) { 3911 mpf = mpl = mpc; 3912 } else { 3913 mpl->m_len = ha->rx_buf_size; 3914 mpl->m_next = mpc; 3915 mpl = mpc; 3916 } 3917 3918 ecore_chain_consume(&rxq->rx_bd_ring); 3919 rxq->sw_rx_cons = 3920 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3921 } 3922 3923 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n ", __func__, fp->rss_id)); 3924 3925 if (mpf != NULL) { 3926 3927 QL_DPRINT7(ha, (dev, "%s[%d]: 6\n ", __func__, fp->rss_id)); 3928 3929 mp = rxq->tpa_info[agg_index].mpl; 3930 mp->m_len = ha->rx_buf_size; 3931 mp->m_next = mpf; 3932 } 3933 3934 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 3935 3936 QL_DPRINT7(ha, (dev, "%s[%d]: 7\n ", __func__, fp->rss_id)); 3937 3938 if (rxq->tpa_info[agg_index].mpf != NULL) 3939 m_freem(rxq->tpa_info[agg_index].mpf); 3940 rxq->tpa_info[agg_index].mpf = NULL; 3941 rxq->tpa_info[agg_index].mpl = NULL; 3942 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 3943 return (0); 3944 } 3945 3946 mp = rxq->tpa_info[agg_index].mpf; 3947 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 3948 mp->m_pkthdr.len = cqe->total_packet_len; 3949 3950 if (mp->m_next == NULL) 3951 mp->m_len = mp->m_pkthdr.len; 3952 else { 3953 /* compute the total packet length */ 3954 mpf = mp; 3955 while (mpf != NULL) { 3956 len += mpf->m_len; 3957 mpf = mpf->m_next; 3958 } 3959 3960 if (cqe->total_packet_len > len) { 3961 mpl = rxq->tpa_info[agg_index].mpl; 3962 mpl->m_len += (cqe->total_packet_len - len); 3963 } 3964 } 3965 3966 QLNX_INC_IPACKETS(ifp); 3967 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 3968 3969 QL_DPRINT7(ha, (dev, "%s[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n " 3970 "m_len = 0x%x m_pkthdr_len = 0x%x\n", 3971 __func__, fp->rss_id, mp->m_pkthdr.csum_data, 3972 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len)); 3973 3974 (*ifp->if_input)(ifp, mp); 3975 3976 rxq->tpa_info[agg_index].mpf = NULL; 3977 rxq->tpa_info[agg_index].mpl = NULL; 3978 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 3979 3980 return (cqe->num_of_coalesced_segs); 3981} 3982 3983static int 3984qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 3985 int lro_enable) 3986{ 3987 uint16_t hw_comp_cons, sw_comp_cons; 3988 int rx_pkt = 0; 3989 struct qlnx_rx_queue *rxq = fp->rxq; 3990 struct ifnet *ifp = ha->ifp; 3991 struct ecore_dev *cdev = &ha->cdev; 3992 struct ecore_hwfn *p_hwfn; 3993 3994#ifdef QLNX_SOFT_LRO 3995 struct lro_ctrl *lro; 3996 3997 lro = &rxq->lro; 3998#endif /* #ifdef QLNX_SOFT_LRO */ 3999 4000 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4001 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4002 4003 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4004 4005 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4006 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4007 * read before it is written by FW, then FW writes CQE and SB, and then 4008 * the CPU reads the hw_comp_cons, it will use an old CQE. 4009 */ 4010 4011 /* Loop to complete all indicated BDs */ 4012 while (sw_comp_cons != hw_comp_cons) { 4013 union eth_rx_cqe *cqe; 4014 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4015 struct sw_rx_data *sw_rx_data; 4016 register struct mbuf *mp; 4017 enum eth_rx_cqe_type cqe_type; 4018 uint16_t len, pad, len_on_first_bd; 4019 uint8_t *data; 4020#if __FreeBSD_version >= 1100000 4021 uint8_t hash_type; 4022#endif /* #if __FreeBSD_version >= 1100000 */ 4023 4024 /* Get the CQE from the completion ring */ 4025 cqe = (union eth_rx_cqe *) 4026 ecore_chain_consume(&rxq->rx_comp_ring); 4027 cqe_type = cqe->fast_path_regular.type; 4028 4029 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4030 QL_DPRINT3(ha, (ha->pci_dev, "Got a slowath CQE\n")); 4031 4032 ecore_eth_cqe_completion(p_hwfn, 4033 (struct eth_slow_path_rx_cqe *)cqe); 4034 goto next_cqe; 4035 } 4036 4037 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4038 4039 switch (cqe_type) { 4040 4041 case ETH_RX_CQE_TYPE_TPA_START: 4042 qlnx_tpa_start(ha, fp, rxq, 4043 &cqe->fast_path_tpa_start); 4044 fp->tpa_start++; 4045 break; 4046 4047 case ETH_RX_CQE_TYPE_TPA_CONT: 4048 qlnx_tpa_cont(ha, fp, rxq, 4049 &cqe->fast_path_tpa_cont); 4050 fp->tpa_cont++; 4051 break; 4052 4053 case ETH_RX_CQE_TYPE_TPA_END: 4054 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4055 &cqe->fast_path_tpa_end); 4056 fp->tpa_end++; 4057 break; 4058 4059 default: 4060 break; 4061 } 4062 4063 goto next_cqe; 4064 } 4065 4066 /* Get the data from the SW ring */ 4067 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4068 mp = sw_rx_data->data; 4069 4070 if (mp == NULL) { 4071 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n", 4072 __func__)); 4073 fp->err_rx_mp_null++; 4074 rxq->sw_rx_cons = 4075 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4076 goto next_cqe; 4077 } 4078 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4079 BUS_DMASYNC_POSTREAD); 4080 4081 /* non GRO */ 4082 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4083 len = le16toh(fp_cqe->pkt_len); 4084 pad = fp_cqe->placement_offset; 4085 4086 QL_DPRINT3(ha, 4087 (ha->pci_dev, "CQE type = %x, flags = %x, vlan = %x," 4088 " len %u, parsing flags = %d pad = %d\n", 4089 cqe_type, fp_cqe->bitfields, 4090 le16toh(fp_cqe->vlan_tag), 4091 len, le16toh(fp_cqe->pars_flags.flags), pad)); 4092 4093 data = mtod(mp, uint8_t *); 4094 data = data + pad; 4095 4096 if (0) 4097 qlnx_dump_buf8(ha, __func__, data, len); 4098 4099 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4100 * is always with a fixed size. If allocation fails, we take the 4101 * consumed BD and return it to the ring in the PROD position. 4102 * The packet that was received on that BD will be dropped (and 4103 * not passed to the upper stack). 4104 */ 4105 /* If this is an error packet then drop it */ 4106 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4107 CQE_FLAGS_ERR) { 4108 4109 QL_DPRINT1(ha, (ha->pci_dev, 4110 "CQE in CONS = %u has error, flags = %x," 4111 " dropping incoming packet\n", sw_comp_cons, 4112 le16toh(cqe->fast_path_regular.pars_flags.flags))); 4113 4114 fp->err_rx_hw_errors++; 4115 4116 qlnx_reuse_rx_data(rxq); 4117 4118 QLNX_INC_IERRORS(ifp); 4119 4120 goto next_cqe; 4121 } 4122 4123 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4124 4125 QL_DPRINT1(ha, (ha->pci_dev, 4126 "New buffer allocation failed, dropping" 4127 " incoming packet and reusing its buffer\n")); 4128 4129 qlnx_reuse_rx_data(rxq); 4130 4131 fp->err_rx_alloc_errors++; 4132 4133 QLNX_INC_IQDROPS(ifp); 4134 4135 goto next_cqe; 4136 } 4137 4138 ecore_chain_consume(&rxq->rx_bd_ring); 4139 4140 len_on_first_bd = fp_cqe->len_on_first_bd; 4141 m_adj(mp, pad); 4142 mp->m_pkthdr.len = len; 4143 4144 QL_DPRINT1(ha, 4145 (ha->pci_dev, "%s: len = %d len_on_first_bd = %d\n", 4146 __func__, len, len_on_first_bd)); 4147 4148 if ((len > 60 ) && (len > len_on_first_bd)) { 4149 4150 mp->m_len = len_on_first_bd; 4151 4152 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4153 (len - len_on_first_bd)) != 0) { 4154 4155 m_freem(mp); 4156 4157 QLNX_INC_IQDROPS(ifp); 4158 4159 goto next_cqe; 4160 } 4161 4162 } else if (len_on_first_bd < len) { 4163 fp->err_rx_jumbo_chain_pkts++; 4164 } else { 4165 mp->m_len = len; 4166 } 4167 4168 mp->m_flags |= M_PKTHDR; 4169 4170 /* assign packet to this interface interface */ 4171 mp->m_pkthdr.rcvif = ifp; 4172 4173 /* assume no hardware checksum has complated */ 4174 mp->m_pkthdr.csum_flags = 0; 4175 4176 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4177 4178#if __FreeBSD_version >= 1100000 4179 4180 hash_type = fp_cqe->bitfields & 4181 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4182 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4183 4184 switch (hash_type) { 4185 4186 case RSS_HASH_TYPE_IPV4: 4187 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4188 break; 4189 4190 case RSS_HASH_TYPE_TCP_IPV4: 4191 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4192 break; 4193 4194 case RSS_HASH_TYPE_IPV6: 4195 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4196 break; 4197 4198 case RSS_HASH_TYPE_TCP_IPV6: 4199 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4200 break; 4201 4202 default: 4203 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4204 break; 4205 } 4206 4207#else 4208 mp->m_flags |= M_FLOWID; 4209#endif 4210 4211 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4212 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4213 } 4214 4215 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4216 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4217 } 4218 4219 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4220 mp->m_pkthdr.csum_data = 0xFFFF; 4221 mp->m_pkthdr.csum_flags |= 4222 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4223 } 4224 4225 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4226 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4227 mp->m_flags |= M_VLANTAG; 4228 } 4229 4230 QLNX_INC_IPACKETS(ifp); 4231 QLNX_INC_IBYTES(ifp, len); 4232 4233#ifdef QLNX_SOFT_LRO 4234 4235 if (lro_enable) { 4236 4237#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4238 4239 tcp_lro_queue_mbuf(lro, mp); 4240 4241#else 4242 4243 if (tcp_lro_rx(lro, mp, 0)) 4244 (*ifp->if_input)(ifp, mp); 4245 4246#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4247 4248 } else { 4249 (*ifp->if_input)(ifp, mp); 4250 } 4251#else 4252 4253 (*ifp->if_input)(ifp, mp); 4254 4255#endif /* #ifdef QLNX_SOFT_LRO */ 4256 4257 rx_pkt++; 4258 4259 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4260 4261next_cqe: /* don't consume bd rx buffer */ 4262 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4263 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4264 4265 /* CR TPA - revisit how to handle budget in TPA perhaps 4266 increase on "end" */ 4267 if (rx_pkt == budget) 4268 break; 4269 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4270 4271 /* Update producers */ 4272 qlnx_update_rx_prod(p_hwfn, rxq); 4273 4274 return rx_pkt; 4275} 4276 4277/* 4278 * fast path interrupt 4279 */ 4280 4281static void 4282qlnx_fp_isr(void *arg) 4283{ 4284 qlnx_ivec_t *ivec = arg; 4285 qlnx_host_t *ha; 4286 struct qlnx_fastpath *fp = NULL; 4287 int idx; 4288 4289 ha = ivec->ha; 4290 4291 if (ha->state != QLNX_STATE_OPEN) { 4292 return; 4293 } 4294 4295 idx = ivec->rss_idx; 4296 4297 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4298 QL_DPRINT1(ha, (ha->pci_dev, "%s: illegal interrupt[%d]\n", 4299 __func__, idx)); 4300 ha->err_illegal_intr++; 4301 return; 4302 } 4303 fp = &ha->fp_array[idx]; 4304 4305 if (fp == NULL) { 4306 QL_DPRINT1(ha, (ha->pci_dev, "%s: fp_array[%d] NULL\n", 4307 __func__, idx)); 4308 ha->err_fp_null++; 4309 } else { 4310 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4311 if (fp->fp_taskqueue != NULL) 4312 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 4313 } 4314 4315 return; 4316} 4317 4318 4319/* 4320 * slow path interrupt processing function 4321 * can be invoked in polled mode or in interrupt mode via taskqueue. 4322 */ 4323void 4324qlnx_sp_isr(void *arg) 4325{ 4326 struct ecore_hwfn *p_hwfn; 4327 qlnx_host_t *ha; 4328 4329 p_hwfn = arg; 4330 4331 ha = (qlnx_host_t *)p_hwfn->p_dev; 4332 4333 ha->sp_interrupts++; 4334 4335 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 4336 4337 ecore_int_sp_dpc(p_hwfn); 4338 4339 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 4340 4341 return; 4342} 4343 4344/***************************************************************************** 4345 * Support Functions for DMA'able Memory 4346 *****************************************************************************/ 4347 4348static void 4349qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4350{ 4351 *((bus_addr_t *)arg) = 0; 4352 4353 if (error) { 4354 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4355 return; 4356 } 4357 4358 *((bus_addr_t *)arg) = segs[0].ds_addr; 4359 4360 return; 4361} 4362 4363static int 4364qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4365{ 4366 int ret = 0; 4367 device_t dev; 4368 bus_addr_t b_addr; 4369 4370 dev = ha->pci_dev; 4371 4372 ret = bus_dma_tag_create( 4373 ha->parent_tag,/* parent */ 4374 dma_buf->alignment, 4375 ((bus_size_t)(1ULL << 32)),/* boundary */ 4376 BUS_SPACE_MAXADDR, /* lowaddr */ 4377 BUS_SPACE_MAXADDR, /* highaddr */ 4378 NULL, NULL, /* filter, filterarg */ 4379 dma_buf->size, /* maxsize */ 4380 1, /* nsegments */ 4381 dma_buf->size, /* maxsegsize */ 4382 0, /* flags */ 4383 NULL, NULL, /* lockfunc, lockarg */ 4384 &dma_buf->dma_tag); 4385 4386 if (ret) { 4387 QL_DPRINT1(ha, 4388 (dev, "%s: could not create dma tag\n", __func__)); 4389 goto qlnx_alloc_dmabuf_exit; 4390 } 4391 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4392 (void **)&dma_buf->dma_b, 4393 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4394 &dma_buf->dma_map); 4395 if (ret) { 4396 bus_dma_tag_destroy(dma_buf->dma_tag); 4397 QL_DPRINT1(ha, 4398 (dev, "%s: bus_dmamem_alloc failed\n", __func__)); 4399 goto qlnx_alloc_dmabuf_exit; 4400 } 4401 4402 ret = bus_dmamap_load(dma_buf->dma_tag, 4403 dma_buf->dma_map, 4404 dma_buf->dma_b, 4405 dma_buf->size, 4406 qlnx_dmamap_callback, 4407 &b_addr, BUS_DMA_NOWAIT); 4408 4409 if (ret || !b_addr) { 4410 bus_dma_tag_destroy(dma_buf->dma_tag); 4411 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4412 dma_buf->dma_map); 4413 ret = -1; 4414 goto qlnx_alloc_dmabuf_exit; 4415 } 4416 4417 dma_buf->dma_addr = b_addr; 4418 4419qlnx_alloc_dmabuf_exit: 4420 4421 return ret; 4422} 4423 4424static void 4425qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4426{ 4427 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 4428 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 4429 bus_dma_tag_destroy(dma_buf->dma_tag); 4430 return; 4431} 4432 4433void * 4434qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 4435{ 4436 qlnx_dma_t dma_buf; 4437 qlnx_dma_t *dma_p; 4438 qlnx_host_t *ha; 4439 device_t dev; 4440 4441 ha = (qlnx_host_t *)ecore_dev; 4442 dev = ha->pci_dev; 4443 4444 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4445 4446 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 4447 4448 dma_buf.size = size + PAGE_SIZE; 4449 dma_buf.alignment = 8; 4450 4451 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 4452 return (NULL); 4453 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 4454 4455 *phys = dma_buf.dma_addr; 4456 4457 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 4458 4459 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 4460 4461 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__, 4462 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 4463 dma_buf.dma_b, (void *)dma_buf.dma_addr, size)); 4464 4465 return (dma_buf.dma_b); 4466} 4467 4468void 4469qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 4470 uint32_t size) 4471{ 4472 qlnx_dma_t dma_buf, *dma_p; 4473 qlnx_host_t *ha; 4474 device_t dev; 4475 4476 ha = (qlnx_host_t *)ecore_dev; 4477 dev = ha->pci_dev; 4478 4479 if (v_addr == NULL) 4480 return; 4481 4482 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4483 4484 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 4485 4486 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__, 4487 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 4488 dma_p->dma_b, (void *)dma_p->dma_addr, size)); 4489 4490 dma_buf = *dma_p; 4491 4492 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 4493 return; 4494} 4495 4496static int 4497qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 4498{ 4499 int ret; 4500 device_t dev; 4501 4502 dev = ha->pci_dev; 4503 4504 /* 4505 * Allocate parent DMA Tag 4506 */ 4507 ret = bus_dma_tag_create( 4508 bus_get_dma_tag(dev), /* parent */ 4509 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 4510 BUS_SPACE_MAXADDR, /* lowaddr */ 4511 BUS_SPACE_MAXADDR, /* highaddr */ 4512 NULL, NULL, /* filter, filterarg */ 4513 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 4514 0, /* nsegments */ 4515 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 4516 0, /* flags */ 4517 NULL, NULL, /* lockfunc, lockarg */ 4518 &ha->parent_tag); 4519 4520 if (ret) { 4521 QL_DPRINT1(ha, (dev, "%s: could not create parent dma tag\n", 4522 __func__)); 4523 return (-1); 4524 } 4525 4526 ha->flags.parent_tag = 1; 4527 4528 return (0); 4529} 4530 4531static void 4532qlnx_free_parent_dma_tag(qlnx_host_t *ha) 4533{ 4534 if (ha->parent_tag != NULL) { 4535 bus_dma_tag_destroy(ha->parent_tag); 4536 ha->parent_tag = NULL; 4537 } 4538 return; 4539} 4540 4541static int 4542qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 4543{ 4544 if (bus_dma_tag_create(NULL, /* parent */ 4545 1, 0, /* alignment, bounds */ 4546 BUS_SPACE_MAXADDR, /* lowaddr */ 4547 BUS_SPACE_MAXADDR, /* highaddr */ 4548 NULL, NULL, /* filter, filterarg */ 4549 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 4550 QLNX_MAX_SEGMENTS, /* nsegments */ 4551 (PAGE_SIZE * 4), /* maxsegsize */ 4552 BUS_DMA_ALLOCNOW, /* flags */ 4553 NULL, /* lockfunc */ 4554 NULL, /* lockfuncarg */ 4555 &ha->tx_tag)) { 4556 4557 QL_DPRINT1(ha, (ha->pci_dev, "%s: tx_tag alloc failed\n", 4558 __func__)); 4559 return (-1); 4560 } 4561 4562 return (0); 4563} 4564 4565static void 4566qlnx_free_tx_dma_tag(qlnx_host_t *ha) 4567{ 4568 if (ha->tx_tag != NULL) { 4569 bus_dma_tag_destroy(ha->tx_tag); 4570 ha->tx_tag = NULL; 4571 } 4572 return; 4573} 4574 4575static int 4576qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 4577{ 4578 if (bus_dma_tag_create(NULL, /* parent */ 4579 1, 0, /* alignment, bounds */ 4580 BUS_SPACE_MAXADDR, /* lowaddr */ 4581 BUS_SPACE_MAXADDR, /* highaddr */ 4582 NULL, NULL, /* filter, filterarg */ 4583 MJUM9BYTES, /* maxsize */ 4584 1, /* nsegments */ 4585 MJUM9BYTES, /* maxsegsize */ 4586 BUS_DMA_ALLOCNOW, /* flags */ 4587 NULL, /* lockfunc */ 4588 NULL, /* lockfuncarg */ 4589 &ha->rx_tag)) { 4590 4591 QL_DPRINT1(ha, (ha->pci_dev, "%s: rx_tag alloc failed\n", 4592 __func__)); 4593 4594 return (-1); 4595 } 4596 return (0); 4597} 4598 4599static void 4600qlnx_free_rx_dma_tag(qlnx_host_t *ha) 4601{ 4602 if (ha->rx_tag != NULL) { 4603 bus_dma_tag_destroy(ha->rx_tag); 4604 ha->rx_tag = NULL; 4605 } 4606 return; 4607} 4608 4609/********************************* 4610 * Exported functions 4611 *********************************/ 4612uint32_t 4613qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 4614{ 4615 uint32_t bar_size; 4616 4617 bar_id = bar_id * 2; 4618 4619 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 4620 SYS_RES_MEMORY, 4621 PCIR_BAR(bar_id)); 4622 4623 return (bar_size); 4624} 4625 4626uint32_t 4627qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 4628{ 4629 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4630 pci_reg, 1); 4631 return 0; 4632} 4633 4634uint32_t 4635qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 4636 uint16_t *reg_value) 4637{ 4638 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4639 pci_reg, 2); 4640 return 0; 4641} 4642 4643uint32_t 4644qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 4645 uint32_t *reg_value) 4646{ 4647 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4648 pci_reg, 4); 4649 return 0; 4650} 4651 4652void 4653qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 4654{ 4655 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4656 pci_reg, reg_value, 1); 4657 return; 4658} 4659 4660void 4661qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 4662 uint16_t reg_value) 4663{ 4664 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4665 pci_reg, reg_value, 2); 4666 return; 4667} 4668 4669void 4670qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 4671 uint32_t reg_value) 4672{ 4673 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4674 pci_reg, reg_value, 4); 4675 return; 4676} 4677 4678 4679int 4680qlnx_pci_find_capability(void *ecore_dev, int cap) 4681{ 4682 int reg; 4683 4684 if (pci_find_cap(((qlnx_host_t *)ecore_dev)->pci_dev, PCIY_EXPRESS, 4685 ®) == 0) 4686 return reg; 4687 else { 4688 QL_DPRINT1(((qlnx_host_t *)ecore_dev), 4689 (((qlnx_host_t *)ecore_dev)->pci_dev, 4690 "%s: failed\n", __func__)); 4691 return 0; 4692 } 4693} 4694 4695uint32_t 4696qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 4697{ 4698 uint32_t data32; 4699 struct ecore_dev *cdev; 4700 struct ecore_hwfn *p_hwfn; 4701 4702 p_hwfn = hwfn; 4703 4704 cdev = p_hwfn->p_dev; 4705 4706 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4707 (uint8_t *)(cdev->regview)) + reg_addr; 4708 4709 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr); 4710 4711 return (data32); 4712} 4713 4714void 4715qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 4716{ 4717 struct ecore_dev *cdev; 4718 struct ecore_hwfn *p_hwfn; 4719 4720 p_hwfn = hwfn; 4721 4722 cdev = p_hwfn->p_dev; 4723 4724 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4725 (uint8_t *)(cdev->regview)) + reg_addr; 4726 4727 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 4728 4729 return; 4730} 4731 4732void 4733qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 4734{ 4735 struct ecore_dev *cdev; 4736 struct ecore_hwfn *p_hwfn; 4737 4738 p_hwfn = hwfn; 4739 4740 cdev = p_hwfn->p_dev; 4741 4742 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4743 (uint8_t *)(cdev->regview)) + reg_addr; 4744 4745 bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 4746 4747 return; 4748} 4749 4750void 4751qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 4752{ 4753 struct ecore_dev *cdev; 4754 struct ecore_hwfn *p_hwfn; 4755 4756 p_hwfn = hwfn; 4757 4758 cdev = p_hwfn->p_dev; 4759 4760 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) - 4761 (uint8_t *)(cdev->doorbells)) + reg_addr; 4762 4763 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value); 4764 4765 return; 4766} 4767 4768uint32_t 4769qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 4770{ 4771 uint32_t data32; 4772 uint32_t offset; 4773 struct ecore_dev *cdev; 4774 4775 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 4776 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 4777 4778 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 4779 4780 return (data32); 4781} 4782 4783void 4784qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 4785{ 4786 uint32_t offset; 4787 struct ecore_dev *cdev; 4788 4789 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 4790 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 4791 4792 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 4793 4794 return; 4795} 4796 4797void * 4798qlnx_zalloc(uint32_t size) 4799{ 4800 caddr_t va; 4801 4802 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 4803 bzero(va, size); 4804 return ((void *)va); 4805} 4806 4807void 4808qlnx_barrier(void *p_hwfn) 4809{ 4810 qlnx_host_t *ha; 4811 4812 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 4813 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 4814} 4815 4816void 4817qlnx_link_update(void *p_hwfn) 4818{ 4819 qlnx_host_t *ha; 4820 int prev_link_state; 4821 4822 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 4823 4824 qlnx_fill_link(p_hwfn, &ha->if_link); 4825 4826 prev_link_state = ha->link_up; 4827 ha->link_up = ha->if_link.link_up; 4828 4829 if (prev_link_state != ha->link_up) { 4830 if (ha->link_up) { 4831 if_link_state_change(ha->ifp, LINK_STATE_UP); 4832 } else { 4833 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 4834 } 4835 } 4836 return; 4837} 4838 4839void 4840qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link) 4841{ 4842 struct ecore_mcp_link_params link_params; 4843 struct ecore_mcp_link_state link_state; 4844 4845 memset(if_link, 0, sizeof(*if_link)); 4846 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 4847 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 4848 4849 /* Prepare source inputs */ 4850 /* we only deal with physical functions */ 4851 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 4852 sizeof(link_params)); 4853 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 4854 sizeof(link_state)); 4855 4856 ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type); 4857 4858 /* Set the link parameters to pass to protocol driver */ 4859 if (link_state.link_up) { 4860 if_link->link_up = true; 4861 if_link->speed = link_state.speed; 4862 } 4863 4864 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 4865 4866 if (link_params.speed.autoneg) 4867 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 4868 4869 if (link_params.pause.autoneg || 4870 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 4871 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 4872 4873 if (link_params.pause.autoneg || link_params.pause.forced_rx || 4874 link_params.pause.forced_tx) 4875 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 4876 4877 if (link_params.speed.advertised_speeds & 4878 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 4879 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 4880 QLNX_LINK_CAP_1000baseT_Full; 4881 4882 if (link_params.speed.advertised_speeds & 4883 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 4884 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 4885 4886 if (link_params.speed.advertised_speeds & 4887 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 4888 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 4889 4890 if (link_params.speed.advertised_speeds & 4891 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 4892 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 4893 4894 if (link_params.speed.advertised_speeds & 4895 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 4896 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 4897 4898 if (link_params.speed.advertised_speeds & 4899 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 4900 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 4901 4902 if_link->advertised_caps = if_link->supported_caps; 4903 4904 if_link->autoneg = link_params.speed.autoneg; 4905 if_link->duplex = QLNX_LINK_DUPLEX; 4906 4907 /* Link partner capabilities */ 4908 4909 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 4910 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 4911 4912 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 4913 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 4914 4915 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 4916 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 4917 4918 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 4919 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 4920 4921 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 4922 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 4923 4924 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 4925 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 4926 4927 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 4928 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 4929 4930 if (link_state.an_complete) 4931 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 4932 4933 if (link_state.partner_adv_pause) 4934 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 4935 4936 if ((link_state.partner_adv_pause == 4937 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 4938 (link_state.partner_adv_pause == 4939 ECORE_LINK_PARTNER_BOTH_PAUSE)) 4940 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 4941 4942 return; 4943} 4944 4945static int 4946qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 4947{ 4948 int rc, i; 4949 4950 for (i = 0; i < cdev->num_hwfns; i++) { 4951 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 4952 p_hwfn->pf_params = *func_params; 4953 } 4954 4955 rc = ecore_resc_alloc(cdev); 4956 if (rc) 4957 goto qlnx_nic_setup_exit; 4958 4959 ecore_resc_setup(cdev); 4960 4961qlnx_nic_setup_exit: 4962 4963 return rc; 4964} 4965 4966static int 4967qlnx_nic_start(struct ecore_dev *cdev) 4968{ 4969 int rc; 4970 struct ecore_hw_init_params params; 4971 4972 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 4973 4974 params.p_tunn = NULL; 4975 params.b_hw_start = true; 4976 params.int_mode = cdev->int_mode; 4977 params.allow_npar_tx_switch = true; 4978 params.bin_fw_data = NULL; 4979 4980 rc = ecore_hw_init(cdev, ¶ms); 4981 if (rc) { 4982 ecore_resc_free(cdev); 4983 return rc; 4984 } 4985 4986 return 0; 4987} 4988 4989static int 4990qlnx_slowpath_start(qlnx_host_t *ha) 4991{ 4992 struct ecore_dev *cdev; 4993 struct ecore_pf_params pf_params; 4994 int rc; 4995 4996 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 4997 pf_params.eth_pf_params.num_cons = 4998 (ha->num_rss) * (ha->num_tc + 1); 4999 5000 cdev = &ha->cdev; 5001 5002 rc = qlnx_nic_setup(cdev, &pf_params); 5003 if (rc) 5004 goto qlnx_slowpath_start_exit; 5005 5006 cdev->int_mode = ECORE_INT_MODE_MSIX; 5007 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5008 5009#ifdef QLNX_MAX_COALESCE 5010 cdev->rx_coalesce_usecs = 255; 5011 cdev->tx_coalesce_usecs = 255; 5012#endif 5013 5014 rc = qlnx_nic_start(cdev); 5015 5016 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5017 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5018 5019qlnx_slowpath_start_exit: 5020 5021 return (rc); 5022} 5023 5024static int 5025qlnx_slowpath_stop(qlnx_host_t *ha) 5026{ 5027 struct ecore_dev *cdev; 5028 device_t dev = ha->pci_dev; 5029 int i; 5030 5031 cdev = &ha->cdev; 5032 5033 ecore_hw_stop(cdev); 5034 5035 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5036 5037 if (ha->sp_handle[i]) 5038 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5039 ha->sp_handle[i]); 5040 5041 ha->sp_handle[i] = NULL; 5042 5043 if (ha->sp_irq[i]) 5044 (void) bus_release_resource(dev, SYS_RES_IRQ, 5045 ha->sp_irq_rid[i], ha->sp_irq[i]); 5046 ha->sp_irq[i] = NULL; 5047 } 5048 5049 ecore_resc_free(cdev); 5050 5051 return 0; 5052} 5053 5054static void 5055qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5056 char ver_str[VER_SIZE]) 5057{ 5058 int i; 5059 5060 memcpy(cdev->name, name, NAME_SIZE); 5061 5062 for_each_hwfn(cdev, i) { 5063 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5064 } 5065 5066 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5067 5068 return ; 5069} 5070 5071void 5072qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5073{ 5074 enum ecore_mcp_protocol_type type; 5075 union ecore_mcp_protocol_stats *stats; 5076 struct ecore_eth_stats eth_stats; 5077 device_t dev; 5078 5079 dev = ((qlnx_host_t *)cdev)->pci_dev; 5080 stats = proto_stats; 5081 type = proto_type; 5082 5083 switch (type) { 5084 case ECORE_MCP_LAN_STATS: 5085 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5086 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5087 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5088 stats->lan_stats.fcs_err = -1; 5089 break; 5090 5091 default: 5092 ((qlnx_host_t *)cdev)->err_get_proto_invalid_type++; 5093 5094 QL_DPRINT1(((qlnx_host_t *)cdev), 5095 (dev, "%s: invalid protocol type 0x%x\n", __func__, 5096 type)); 5097 break; 5098 } 5099 return; 5100} 5101 5102static int 5103qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5104{ 5105 struct ecore_hwfn *p_hwfn; 5106 struct ecore_ptt *p_ptt; 5107 5108 p_hwfn = &ha->cdev.hwfns[0]; 5109 p_ptt = ecore_ptt_acquire(p_hwfn); 5110 5111 if (p_ptt == NULL) { 5112 QL_DPRINT1(ha, (ha->pci_dev, 5113 "%s : ecore_ptt_acquire failed\n", __func__)); 5114 return (-1); 5115 } 5116 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5117 5118 ecore_ptt_release(p_hwfn, p_ptt); 5119 5120 return (0); 5121} 5122 5123static int 5124qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5125{ 5126 struct ecore_hwfn *p_hwfn; 5127 struct ecore_ptt *p_ptt; 5128 5129 p_hwfn = &ha->cdev.hwfns[0]; 5130 p_ptt = ecore_ptt_acquire(p_hwfn); 5131 5132 if (p_ptt == NULL) { 5133 QL_DPRINT1(ha, (ha->pci_dev, 5134 "%s : ecore_ptt_acquire failed\n", __func__)); 5135 return (-1); 5136 } 5137 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5138 5139 ecore_ptt_release(p_hwfn, p_ptt); 5140 5141 return (0); 5142} 5143 5144static int 5145qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5146{ 5147 struct ecore_dev *cdev; 5148 5149 cdev = &ha->cdev; 5150 5151 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5152 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5153 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5154 5155 return 0; 5156} 5157 5158static void 5159qlnx_init_fp(qlnx_host_t *ha) 5160{ 5161 int rss_id, txq_array_index, tc; 5162 5163 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5164 5165 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5166 5167 fp->rss_id = rss_id; 5168 fp->edev = ha; 5169 fp->sb_info = &ha->sb_array[rss_id]; 5170 fp->rxq = &ha->rxq_array[rss_id]; 5171 fp->rxq->rxq_id = rss_id; 5172 5173 for (tc = 0; tc < ha->num_tc; tc++) { 5174 txq_array_index = tc * ha->num_rss + rss_id; 5175 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5176 fp->txq[tc]->index = txq_array_index; 5177 } 5178 5179 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5180 rss_id); 5181 5182 fp->tx_ring_full = 0; 5183 5184 /* reset all the statistics counters */ 5185 5186 fp->tx_pkts_processed = 0; 5187 fp->tx_pkts_freed = 0; 5188 fp->tx_pkts_transmitted = 0; 5189 fp->tx_pkts_completed = 0; 5190 fp->tx_lso_wnd_min_len = 0; 5191 fp->tx_defrag = 0; 5192 fp->tx_nsegs_gt_elem_left = 0; 5193 fp->tx_tso_max_nsegs = 0; 5194 fp->tx_tso_min_nsegs = 0; 5195 fp->err_tx_nsegs_gt_elem_left = 0; 5196 fp->err_tx_dmamap_create = 0; 5197 fp->err_tx_defrag_dmamap_load = 0; 5198 fp->err_tx_non_tso_max_seg = 0; 5199 fp->err_tx_dmamap_load = 0; 5200 fp->err_tx_defrag = 0; 5201 fp->err_tx_free_pkt_null = 0; 5202 fp->err_tx_cons_idx_conflict = 0; 5203 5204 fp->rx_pkts = 0; 5205 fp->err_m_getcl = 0; 5206 fp->err_m_getjcl = 0; 5207 } 5208 return; 5209} 5210 5211static void 5212qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5213{ 5214 struct ecore_dev *cdev; 5215 5216 cdev = &ha->cdev; 5217 5218 if (sb_info->sb_virt) { 5219 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5220 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5221 sb_info->sb_virt = NULL; 5222 } 5223} 5224 5225static int 5226qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5227 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5228{ 5229 struct ecore_hwfn *p_hwfn; 5230 int hwfn_index, rc; 5231 u16 rel_sb_id; 5232 5233 hwfn_index = sb_id % cdev->num_hwfns; 5234 p_hwfn = &cdev->hwfns[hwfn_index]; 5235 rel_sb_id = sb_id / cdev->num_hwfns; 5236 5237 QL_DPRINT2(((qlnx_host_t *)cdev), (((qlnx_host_t *)cdev)->pci_dev, 5238 "%s: hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x " 5239 "sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5240 __func__, hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5241 sb_virt_addr, (void *)sb_phy_addr)); 5242 5243 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5244 sb_virt_addr, sb_phy_addr, rel_sb_id); 5245 5246 return rc; 5247} 5248 5249/* This function allocates fast-path status block memory */ 5250static int 5251qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5252{ 5253 struct status_block *sb_virt; 5254 bus_addr_t sb_phys; 5255 int rc; 5256 uint32_t size; 5257 struct ecore_dev *cdev; 5258 5259 cdev = &ha->cdev; 5260 5261 size = sizeof(*sb_virt); 5262 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5263 5264 if (!sb_virt) { 5265 QL_DPRINT1(ha, (ha->pci_dev, 5266 "%s: Status block allocation failed\n", __func__)); 5267 return -ENOMEM; 5268 } 5269 5270 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5271 if (rc) { 5272 QL_DPRINT1(ha, (ha->pci_dev, "%s: failed\n", __func__)); 5273 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5274 } 5275 5276 return rc; 5277} 5278 5279static void 5280qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5281{ 5282 int i; 5283 struct sw_rx_data *rx_buf; 5284 5285 for (i = 0; i < rxq->num_rx_buffers; i++) { 5286 5287 rx_buf = &rxq->sw_rx_ring[i]; 5288 5289 if (rx_buf->data != NULL) { 5290 if (rx_buf->map != NULL) { 5291 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5292 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5293 rx_buf->map = NULL; 5294 } 5295 m_freem(rx_buf->data); 5296 rx_buf->data = NULL; 5297 } 5298 } 5299 return; 5300} 5301 5302static void 5303qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5304{ 5305 struct ecore_dev *cdev; 5306 int i; 5307 5308 cdev = &ha->cdev; 5309 5310 qlnx_free_rx_buffers(ha, rxq); 5311 5312 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5313 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5314 if (rxq->tpa_info[i].mpf != NULL) 5315 m_freem(rxq->tpa_info[i].mpf); 5316 } 5317 5318 bzero((void *)&rxq->sw_rx_ring[0], 5319 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5320 5321 /* Free the real RQ ring used by FW */ 5322 if (rxq->rx_bd_ring.p_virt_addr) { 5323 ecore_chain_free(cdev, &rxq->rx_bd_ring); 5324 rxq->rx_bd_ring.p_virt_addr = NULL; 5325 } 5326 5327 /* Free the real completion ring used by FW */ 5328 if (rxq->rx_comp_ring.p_virt_addr && 5329 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 5330 ecore_chain_free(cdev, &rxq->rx_comp_ring); 5331 rxq->rx_comp_ring.p_virt_addr = NULL; 5332 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 5333 } 5334 5335#ifdef QLNX_SOFT_LRO 5336 { 5337 struct lro_ctrl *lro; 5338 5339 lro = &rxq->lro; 5340 tcp_lro_free(lro); 5341 } 5342#endif /* #ifdef QLNX_SOFT_LRO */ 5343 5344 return; 5345} 5346 5347static int 5348qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5349{ 5350 register struct mbuf *mp; 5351 uint16_t rx_buf_size; 5352 struct sw_rx_data *sw_rx_data; 5353 struct eth_rx_bd *rx_bd; 5354 dma_addr_t dma_addr; 5355 bus_dmamap_t map; 5356 bus_dma_segment_t segs[1]; 5357 int nsegs; 5358 int ret; 5359 struct ecore_dev *cdev; 5360 5361 cdev = &ha->cdev; 5362 5363 rx_buf_size = rxq->rx_buf_size; 5364 5365 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5366 5367 if (mp == NULL) { 5368 QL_DPRINT1(ha, (ha->pci_dev, 5369 "%s : Failed to allocate Rx data\n", __func__)); 5370 return -ENOMEM; 5371 } 5372 5373 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5374 5375 map = (bus_dmamap_t)0; 5376 5377 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5378 BUS_DMA_NOWAIT); 5379 dma_addr = segs[0].ds_addr; 5380 5381 if (ret || !dma_addr || (nsegs != 1)) { 5382 m_freem(mp); 5383 QL_DPRINT1(ha, (ha->pci_dev, 5384 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5385 __func__, ret, (long long unsigned int)dma_addr, 5386 nsegs)); 5387 return -ENOMEM; 5388 } 5389 5390 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5391 sw_rx_data->data = mp; 5392 sw_rx_data->dma_addr = dma_addr; 5393 sw_rx_data->map = map; 5394 5395 /* Advance PROD and get BD pointer */ 5396 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 5397 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 5398 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 5399 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5400 5401 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5402 5403 return 0; 5404} 5405 5406static int 5407qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 5408 struct qlnx_agg_info *tpa) 5409{ 5410 struct mbuf *mp; 5411 dma_addr_t dma_addr; 5412 bus_dmamap_t map; 5413 bus_dma_segment_t segs[1]; 5414 int nsegs; 5415 int ret; 5416 struct sw_rx_data *rx_buf; 5417 5418 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5419 5420 if (mp == NULL) { 5421 QL_DPRINT1(ha, (ha->pci_dev, 5422 "%s : Failed to allocate Rx data\n", __func__)); 5423 return -ENOMEM; 5424 } 5425 5426 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5427 5428 map = (bus_dmamap_t)0; 5429 5430 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5431 BUS_DMA_NOWAIT); 5432 dma_addr = segs[0].ds_addr; 5433 5434 if (ret || !dma_addr || (nsegs != 1)) { 5435 m_freem(mp); 5436 QL_DPRINT1(ha, (ha->pci_dev, 5437 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5438 __func__, ret, (long long unsigned int)dma_addr, 5439 nsegs)); 5440 return -ENOMEM; 5441 } 5442 5443 rx_buf = &tpa->rx_buf; 5444 5445 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 5446 5447 rx_buf->data = mp; 5448 rx_buf->dma_addr = dma_addr; 5449 rx_buf->map = map; 5450 5451 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5452 5453 return (0); 5454} 5455 5456static void 5457qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 5458{ 5459 struct sw_rx_data *rx_buf; 5460 5461 rx_buf = &tpa->rx_buf; 5462 5463 if (rx_buf->data != NULL) { 5464 if (rx_buf->map != NULL) { 5465 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5466 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5467 rx_buf->map = NULL; 5468 } 5469 m_freem(rx_buf->data); 5470 rx_buf->data = NULL; 5471 } 5472 return; 5473} 5474 5475/* This function allocates all memory needed per Rx queue */ 5476static int 5477qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5478{ 5479 int i, rc, num_allocated; 5480 struct ifnet *ifp; 5481 struct ecore_dev *cdev; 5482 5483 cdev = &ha->cdev; 5484 ifp = ha->ifp; 5485 5486 rxq->num_rx_buffers = RX_RING_SIZE; 5487 5488 rxq->rx_buf_size = ha->rx_buf_size; 5489 5490 /* Allocate the parallel driver ring for Rx buffers */ 5491 bzero((void *)&rxq->sw_rx_ring[0], 5492 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5493 5494 /* Allocate FW Rx ring */ 5495 5496 rc = ecore_chain_alloc(cdev, 5497 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5498 ECORE_CHAIN_MODE_NEXT_PTR, 5499 ECORE_CHAIN_CNT_TYPE_U16, 5500 RX_RING_SIZE, 5501 sizeof(struct eth_rx_bd), 5502 &rxq->rx_bd_ring, NULL); 5503 5504 if (rc) 5505 goto err; 5506 5507 /* Allocate FW completion ring */ 5508 rc = ecore_chain_alloc(cdev, 5509 ECORE_CHAIN_USE_TO_CONSUME, 5510 ECORE_CHAIN_MODE_PBL, 5511 ECORE_CHAIN_CNT_TYPE_U16, 5512 RX_RING_SIZE, 5513 sizeof(union eth_rx_cqe), 5514 &rxq->rx_comp_ring, NULL); 5515 5516 if (rc) 5517 goto err; 5518 5519 /* Allocate buffers for the Rx ring */ 5520 5521 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5522 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 5523 &rxq->tpa_info[i]); 5524 if (rc) 5525 break; 5526 5527 } 5528 5529 for (i = 0; i < rxq->num_rx_buffers; i++) { 5530 rc = qlnx_alloc_rx_buffer(ha, rxq); 5531 if (rc) 5532 break; 5533 } 5534 num_allocated = i; 5535 if (!num_allocated) { 5536 QL_DPRINT1(ha, (ha->pci_dev, 5537 "%s: Rx buffers allocation failed\n", __func__)); 5538 goto err; 5539 } else if (num_allocated < rxq->num_rx_buffers) { 5540 QL_DPRINT1(ha, (ha->pci_dev, 5541 "%s: Allocated less buffers than" 5542 " desired (%d allocated)\n", __func__, num_allocated)); 5543 } 5544 5545#ifdef QLNX_SOFT_LRO 5546 5547 { 5548 struct lro_ctrl *lro; 5549 5550 lro = &rxq->lro; 5551 5552#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 5553 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 5554 QL_DPRINT1(ha, (ha->pci_dev, 5555 "%s: tcp_lro_init[%d] failed\n", 5556 __func__, rxq->rxq_id)); 5557 goto err; 5558 } 5559#else 5560 if (tcp_lro_init(lro)) { 5561 QL_DPRINT1(ha, (ha->pci_dev, 5562 "%s: tcp_lro_init[%d] failed\n", 5563 __func__, rxq->rxq_id)); 5564 goto err; 5565 } 5566#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 5567 5568 lro->ifp = ha->ifp; 5569 } 5570#endif /* #ifdef QLNX_SOFT_LRO */ 5571 return 0; 5572 5573err: 5574 qlnx_free_mem_rxq(ha, rxq); 5575 return -ENOMEM; 5576} 5577 5578 5579static void 5580qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5581 struct qlnx_tx_queue *txq) 5582{ 5583 struct ecore_dev *cdev; 5584 5585 cdev = &ha->cdev; 5586 5587 bzero((void *)&txq->sw_tx_ring[0], 5588 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5589 5590 /* Free the real RQ ring used by FW */ 5591 if (txq->tx_pbl.p_virt_addr) { 5592 ecore_chain_free(cdev, &txq->tx_pbl); 5593 txq->tx_pbl.p_virt_addr = NULL; 5594 } 5595 return; 5596} 5597 5598/* This function allocates all memory needed per Tx queue */ 5599static int 5600qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5601 struct qlnx_tx_queue *txq) 5602{ 5603 int ret = ECORE_SUCCESS; 5604 union eth_tx_bd_types *p_virt; 5605 struct ecore_dev *cdev; 5606 5607 cdev = &ha->cdev; 5608 5609 bzero((void *)&txq->sw_tx_ring[0], 5610 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5611 5612 /* Allocate the real Tx ring to be used by FW */ 5613 ret = ecore_chain_alloc(cdev, 5614 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5615 ECORE_CHAIN_MODE_PBL, 5616 ECORE_CHAIN_CNT_TYPE_U16, 5617 TX_RING_SIZE, 5618 sizeof(*p_virt), 5619 &txq->tx_pbl, NULL); 5620 5621 if (ret != ECORE_SUCCESS) { 5622 goto err; 5623 } 5624 5625 txq->num_tx_buffers = TX_RING_SIZE; 5626 5627 return 0; 5628 5629err: 5630 qlnx_free_mem_txq(ha, fp, txq); 5631 return -ENOMEM; 5632} 5633 5634static void 5635qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5636{ 5637 struct mbuf *mp; 5638 struct ifnet *ifp = ha->ifp; 5639 5640 if (mtx_initialized(&fp->tx_mtx)) { 5641 5642 if (fp->tx_br != NULL) { 5643 5644 mtx_lock(&fp->tx_mtx); 5645 5646 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 5647 fp->tx_pkts_freed++; 5648 m_freem(mp); 5649 } 5650 5651 mtx_unlock(&fp->tx_mtx); 5652 5653 buf_ring_free(fp->tx_br, M_DEVBUF); 5654 fp->tx_br = NULL; 5655 } 5656 mtx_destroy(&fp->tx_mtx); 5657 } 5658 return; 5659} 5660 5661static void 5662qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5663{ 5664 int tc; 5665 5666 qlnx_free_mem_sb(ha, fp->sb_info); 5667 5668 qlnx_free_mem_rxq(ha, fp->rxq); 5669 5670 for (tc = 0; tc < ha->num_tc; tc++) 5671 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 5672 5673 return; 5674} 5675 5676static int 5677qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5678{ 5679 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 5680 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 5681 5682 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 5683 5684 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 5685 M_NOWAIT, &fp->tx_mtx); 5686 if (fp->tx_br == NULL) { 5687 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " 5688 " fp[%d, %d]\n", ha->dev_unit, fp->rss_id)); 5689 return -ENOMEM; 5690 } 5691 return 0; 5692} 5693 5694static int 5695qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5696{ 5697 int rc, tc; 5698 5699 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 5700 if (rc) 5701 goto err; 5702 5703 if (ha->rx_jumbo_buf_eq_mtu) { 5704 if (ha->max_frame_size <= MCLBYTES) 5705 ha->rx_buf_size = MCLBYTES; 5706 else if (ha->max_frame_size <= MJUMPAGESIZE) 5707 ha->rx_buf_size = MJUMPAGESIZE; 5708 else if (ha->max_frame_size <= MJUM9BYTES) 5709 ha->rx_buf_size = MJUM9BYTES; 5710 else if (ha->max_frame_size <= MJUM16BYTES) 5711 ha->rx_buf_size = MJUM16BYTES; 5712 } else { 5713 if (ha->max_frame_size <= MCLBYTES) 5714 ha->rx_buf_size = MCLBYTES; 5715 else 5716 ha->rx_buf_size = MJUMPAGESIZE; 5717 } 5718 5719 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 5720 if (rc) 5721 goto err; 5722 5723 for (tc = 0; tc < ha->num_tc; tc++) { 5724 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 5725 if (rc) 5726 goto err; 5727 } 5728 5729 return 0; 5730 5731err: 5732 qlnx_free_mem_fp(ha, fp); 5733 return -ENOMEM; 5734} 5735 5736static void 5737qlnx_free_mem_load(qlnx_host_t *ha) 5738{ 5739 int i; 5740 struct ecore_dev *cdev; 5741 5742 cdev = &ha->cdev; 5743 5744 for (i = 0; i < ha->num_rss; i++) { 5745 struct qlnx_fastpath *fp = &ha->fp_array[i]; 5746 5747 qlnx_free_mem_fp(ha, fp); 5748 } 5749 return; 5750} 5751 5752static int 5753qlnx_alloc_mem_load(qlnx_host_t *ha) 5754{ 5755 int rc = 0, rss_id; 5756 5757 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5758 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5759 5760 rc = qlnx_alloc_mem_fp(ha, fp); 5761 if (rc) 5762 break; 5763 } 5764 return (rc); 5765} 5766 5767static int 5768qlnx_start_vport(struct ecore_dev *cdev, 5769 u8 vport_id, 5770 u16 mtu, 5771 u8 drop_ttl0_flg, 5772 u8 inner_vlan_removal_en_flg, 5773 u8 tx_switching, 5774 u8 hw_lro_enable) 5775{ 5776 int rc, i; 5777 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 5778 qlnx_host_t *ha; 5779 5780 ha = (qlnx_host_t *)cdev; 5781 5782 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 5783 vport_start_params.tx_switching = 0; 5784 vport_start_params.handle_ptp_pkts = 0; 5785 vport_start_params.only_untagged = 0; 5786 vport_start_params.drop_ttl0 = drop_ttl0_flg; 5787 5788 vport_start_params.tpa_mode = 5789 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 5790 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 5791 5792 vport_start_params.vport_id = vport_id; 5793 vport_start_params.mtu = mtu; 5794 5795 5796 QL_DPRINT2(ha, (ha->pci_dev, "%s: setting mtu to %d\n", __func__, mtu)); 5797 5798 for_each_hwfn(cdev, i) { 5799 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5800 5801 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 5802 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 5803 5804 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 5805 5806 if (rc) { 5807 QL_DPRINT1(ha, (ha->pci_dev, 5808 "%s: Failed to start VPORT V-PORT %d " 5809 "with MTU %d\n", __func__, vport_id, mtu)); 5810 return -ENOMEM; 5811 } 5812 5813 ecore_hw_start_fastpath(p_hwfn); 5814 5815 QL_DPRINT2(ha, (ha->pci_dev, 5816 "%s: Started V-PORT %d with MTU %d\n", 5817 __func__, vport_id, mtu)); 5818 } 5819 return 0; 5820} 5821 5822 5823static int 5824qlnx_update_vport(struct ecore_dev *cdev, 5825 struct qlnx_update_vport_params *params) 5826{ 5827 struct ecore_sp_vport_update_params sp_params; 5828 int rc, i, j, fp_index; 5829 struct ecore_hwfn *p_hwfn; 5830 struct ecore_rss_params *rss; 5831 qlnx_host_t *ha = (qlnx_host_t *)cdev; 5832 struct qlnx_fastpath *fp; 5833 5834 memset(&sp_params, 0, sizeof(sp_params)); 5835 /* Translate protocol params into sp params */ 5836 sp_params.vport_id = params->vport_id; 5837 5838 sp_params.update_vport_active_rx_flg = 5839 params->update_vport_active_rx_flg; 5840 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 5841 5842 sp_params.update_vport_active_tx_flg = 5843 params->update_vport_active_tx_flg; 5844 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 5845 5846 sp_params.update_inner_vlan_removal_flg = 5847 params->update_inner_vlan_removal_flg; 5848 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 5849 5850 sp_params.sge_tpa_params = params->sge_tpa_params; 5851 5852 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 5853 * We need to re-fix the rss values per engine for CMT. 5854 */ 5855 5856 sp_params.rss_params = params->rss_params; 5857 5858 for_each_hwfn(cdev, i) { 5859 5860 p_hwfn = &cdev->hwfns[i]; 5861 5862 if ((cdev->num_hwfns > 1) && 5863 params->rss_params->update_rss_config && 5864 params->rss_params->rss_enable) { 5865 5866 rss = params->rss_params; 5867 5868 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 5869 5870 fp_index = ((cdev->num_hwfns * j) + i) % 5871 ha->num_rss; 5872 5873 fp = &ha->fp_array[fp_index]; 5874 rss->rss_ind_table[j] = fp->rxq->handle; 5875 } 5876 5877 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 5878 QL_DPRINT3(ha, (ha->pci_dev, 5879 "%p %p %p %p %p %p %p %p \n", 5880 rss->rss_ind_table[j], 5881 rss->rss_ind_table[j+1], 5882 rss->rss_ind_table[j+2], 5883 rss->rss_ind_table[j+3], 5884 rss->rss_ind_table[j+4], 5885 rss->rss_ind_table[j+5], 5886 rss->rss_ind_table[j+6], 5887 rss->rss_ind_table[j+7])); 5888 j += 8; 5889 } 5890 } 5891 5892 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 5893 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 5894 ECORE_SPQ_MODE_EBLOCK, NULL); 5895 if (rc) { 5896 QL_DPRINT1(ha, (ha->pci_dev, 5897 "%s:Failed to update VPORT\n", __func__)); 5898 return rc; 5899 } 5900 5901 QL_DPRINT2(ha, (ha->pci_dev, 5902 "%s: Updated V-PORT %d: tx_active_flag %d," 5903 "rx_active_flag %d [tx_update %d], [rx_update %d]\n", 5904 __func__, 5905 params->vport_id, params->vport_active_tx_flg, 5906 params->vport_active_rx_flg, 5907 params->update_vport_active_tx_flg, 5908 params->update_vport_active_rx_flg)); 5909 } 5910 5911 return 0; 5912} 5913 5914static void 5915qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 5916{ 5917 struct eth_rx_bd *rx_bd_cons = 5918 ecore_chain_consume(&rxq->rx_bd_ring); 5919 struct eth_rx_bd *rx_bd_prod = 5920 ecore_chain_produce(&rxq->rx_bd_ring); 5921 struct sw_rx_data *sw_rx_data_cons = 5922 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 5923 struct sw_rx_data *sw_rx_data_prod = 5924 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5925 5926 sw_rx_data_prod->data = sw_rx_data_cons->data; 5927 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 5928 5929 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 5930 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5931 5932 return; 5933} 5934 5935static void 5936qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 5937{ 5938 5939 uint16_t bd_prod; 5940 uint16_t cqe_prod; 5941 union { 5942 struct eth_rx_prod_data rx_prod_data; 5943 uint32_t data32; 5944 } rx_prods; 5945 5946 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 5947 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 5948 5949 /* Update producers */ 5950 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 5951 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 5952 5953 /* Make sure that the BD and SGE data is updated before updating the 5954 * producers since FW might read the BD/SGE right after the producer 5955 * is updated. 5956 */ 5957 wmb(); 5958 5959 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 5960 sizeof(rx_prods), &rx_prods.data32); 5961 5962 /* mmiowb is needed to synchronize doorbell writes from more than one 5963 * processor. It guarantees that the write arrives to the device before 5964 * the napi lock is released and another qlnx_poll is called (possibly 5965 * on another CPU). Without this barrier, the next doorbell can bypass 5966 * this doorbell. This is applicable to IA64/Altix systems. 5967 */ 5968 wmb(); 5969 5970 return; 5971} 5972 5973static uint32_t qlnx_hash_key[] = { 5974 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 5975 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 5976 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 5977 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 5978 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 5979 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 5980 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 5981 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 5982 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 5983 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 5984 5985static int 5986qlnx_start_queues(qlnx_host_t *ha) 5987{ 5988 int rc, tc, i, vport_id = 0, 5989 drop_ttl0_flg = 1, vlan_removal_en = 1, 5990 tx_switching = 0, hw_lro_enable = 0; 5991 struct ecore_dev *cdev = &ha->cdev; 5992 struct ecore_rss_params *rss_params = &ha->rss_params; 5993 struct qlnx_update_vport_params vport_update_params; 5994 struct ifnet *ifp; 5995 struct ecore_hwfn *p_hwfn; 5996 struct ecore_sge_tpa_params tpa_params; 5997 struct ecore_queue_start_common_params qparams; 5998 struct qlnx_fastpath *fp; 5999 6000 ifp = ha->ifp; 6001 6002 if (!ha->num_rss) { 6003 QL_DPRINT1(ha, (ha->pci_dev, 6004 "%s: Cannot update V-VPORT as active as there" 6005 " are no Rx queues\n", __func__)); 6006 return -EINVAL; 6007 } 6008 6009#ifndef QLNX_SOFT_LRO 6010 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 6011#endif /* #ifndef QLNX_SOFT_LRO */ 6012 6013 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 6014 vlan_removal_en, tx_switching, hw_lro_enable); 6015 6016 if (rc) { 6017 QL_DPRINT1(ha, (ha->pci_dev, 6018 "%s: Start V-PORT failed %d\n", __func__, rc)); 6019 return rc; 6020 } 6021 6022 QL_DPRINT2(ha, (ha->pci_dev, 6023 "%s: Start vport ramrod passed," 6024 " vport_id = %d, MTU = %d, vlan_removal_en = %d\n", __func__, 6025 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en)); 6026 6027 for_each_rss(i) { 6028 struct ecore_rxq_start_ret_params rx_ret_params; 6029 struct ecore_txq_start_ret_params tx_ret_params; 6030 6031 fp = &ha->fp_array[i]; 6032 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6033 6034 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6035 bzero(&rx_ret_params, 6036 sizeof (struct ecore_rxq_start_ret_params)); 6037 6038 qparams.queue_id = i ; 6039 qparams.vport_id = vport_id; 6040 qparams.stats_id = vport_id; 6041 qparams.p_sb = fp->sb_info; 6042 qparams.sb_idx = RX_PI; 6043 6044 6045 rc = ecore_eth_rx_queue_start(p_hwfn, 6046 p_hwfn->hw_info.opaque_fid, 6047 &qparams, 6048 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6049 /* bd_chain_phys_addr */ 6050 fp->rxq->rx_bd_ring.p_phys_addr, 6051 /* cqe_pbl_addr */ 6052 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6053 /* cqe_pbl_size */ 6054 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6055 &rx_ret_params); 6056 6057 if (rc) { 6058 QL_DPRINT1(ha, (ha->pci_dev, 6059 "%s: Start RXQ #%d failed %d\n", __func__, 6060 i, rc)); 6061 return rc; 6062 } 6063 6064 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6065 fp->rxq->handle = rx_ret_params.p_handle; 6066 fp->rxq->hw_cons_ptr = 6067 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6068 6069 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6070 6071 for (tc = 0; tc < ha->num_tc; tc++) { 6072 struct qlnx_tx_queue *txq = fp->txq[tc]; 6073 6074 bzero(&qparams, 6075 sizeof(struct ecore_queue_start_common_params)); 6076 bzero(&tx_ret_params, 6077 sizeof (struct ecore_txq_start_ret_params)); 6078 6079 qparams.queue_id = txq->index / cdev->num_hwfns ; 6080 qparams.vport_id = vport_id; 6081 qparams.stats_id = vport_id; 6082 qparams.p_sb = fp->sb_info; 6083 qparams.sb_idx = TX_PI(tc); 6084 6085 rc = ecore_eth_tx_queue_start(p_hwfn, 6086 p_hwfn->hw_info.opaque_fid, 6087 &qparams, tc, 6088 /* bd_chain_phys_addr */ 6089 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6090 ecore_chain_get_page_cnt(&txq->tx_pbl), 6091 &tx_ret_params); 6092 6093 if (rc) { 6094 QL_DPRINT1(ha, (ha->pci_dev, 6095 "%s: Start TXQ #%d failed %d\n", 6096 __func__, txq->index, rc)); 6097 return rc; 6098 } 6099 6100 txq->doorbell_addr = tx_ret_params.p_doorbell; 6101 txq->handle = tx_ret_params.p_handle; 6102 6103 txq->hw_cons_ptr = 6104 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6105 SET_FIELD(txq->tx_db.data.params, 6106 ETH_DB_DATA_DEST, DB_DEST_XCM); 6107 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6108 DB_AGG_CMD_SET); 6109 SET_FIELD(txq->tx_db.data.params, 6110 ETH_DB_DATA_AGG_VAL_SEL, 6111 DQ_XCM_ETH_TX_BD_PROD_CMD); 6112 6113 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6114 } 6115 } 6116 6117 /* Fill struct with RSS params */ 6118 if (ha->num_rss > 1) { 6119 6120 rss_params->update_rss_config = 1; 6121 rss_params->rss_enable = 1; 6122 rss_params->update_rss_capabilities = 1; 6123 rss_params->update_rss_ind_table = 1; 6124 rss_params->update_rss_key = 1; 6125 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6126 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6127 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6128 6129 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6130 fp = &ha->fp_array[(i % ha->num_rss)]; 6131 rss_params->rss_ind_table[i] = fp->rxq->handle; 6132 } 6133 6134 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6135 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6136 6137 } else { 6138 memset(rss_params, 0, sizeof(*rss_params)); 6139 } 6140 6141 6142 /* Prepare and send the vport enable */ 6143 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6144 vport_update_params.vport_id = vport_id; 6145 vport_update_params.update_vport_active_tx_flg = 1; 6146 vport_update_params.vport_active_tx_flg = 1; 6147 vport_update_params.update_vport_active_rx_flg = 1; 6148 vport_update_params.vport_active_rx_flg = 1; 6149 vport_update_params.rss_params = rss_params; 6150 vport_update_params.update_inner_vlan_removal_flg = 1; 6151 vport_update_params.inner_vlan_removal_flg = 1; 6152 6153 if (hw_lro_enable) { 6154 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6155 6156 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6157 6158 tpa_params.update_tpa_en_flg = 1; 6159 tpa_params.tpa_ipv4_en_flg = 1; 6160 tpa_params.tpa_ipv6_en_flg = 1; 6161 6162 tpa_params.update_tpa_param_flg = 1; 6163 tpa_params.tpa_pkt_split_flg = 0; 6164 tpa_params.tpa_hdr_data_split_flg = 0; 6165 tpa_params.tpa_gro_consistent_flg = 0; 6166 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6167 tpa_params.tpa_max_size = (uint16_t)(-1); 6168 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 6169 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 6170 6171 vport_update_params.sge_tpa_params = &tpa_params; 6172 } 6173 6174 rc = qlnx_update_vport(cdev, &vport_update_params); 6175 if (rc) { 6176 QL_DPRINT1(ha, (ha->pci_dev, 6177 "%s: Update V-PORT failed %d\n", __func__, rc)); 6178 return rc; 6179 } 6180 6181 return 0; 6182} 6183 6184static int 6185qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6186 struct qlnx_tx_queue *txq) 6187{ 6188 uint16_t hw_bd_cons; 6189 uint16_t ecore_cons_idx; 6190 6191 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 6192 6193 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6194 6195 while (hw_bd_cons != 6196 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6197 6198 mtx_lock(&fp->tx_mtx); 6199 6200 (void)qlnx_tx_int(ha, fp, txq); 6201 6202 mtx_unlock(&fp->tx_mtx); 6203 6204 qlnx_mdelay(__func__, 2); 6205 6206 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6207 } 6208 6209 QL_DPRINT2(ha, (ha->pci_dev, "%s[%d, %d]: done\n", __func__, 6210 fp->rss_id, txq->index)); 6211 6212 return 0; 6213} 6214 6215static int 6216qlnx_stop_queues(qlnx_host_t *ha) 6217{ 6218 struct qlnx_update_vport_params vport_update_params; 6219 struct ecore_dev *cdev; 6220 struct qlnx_fastpath *fp; 6221 int rc, tc, i; 6222 6223 cdev = &ha->cdev; 6224 6225 /* Disable the vport */ 6226 6227 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6228 6229 vport_update_params.vport_id = 0; 6230 vport_update_params.update_vport_active_tx_flg = 1; 6231 vport_update_params.vport_active_tx_flg = 0; 6232 vport_update_params.update_vport_active_rx_flg = 1; 6233 vport_update_params.vport_active_rx_flg = 0; 6234 vport_update_params.rss_params = &ha->rss_params; 6235 vport_update_params.rss_params->update_rss_config = 0; 6236 vport_update_params.rss_params->rss_enable = 0; 6237 vport_update_params.update_inner_vlan_removal_flg = 0; 6238 vport_update_params.inner_vlan_removal_flg = 0; 6239 6240 rc = qlnx_update_vport(cdev, &vport_update_params); 6241 if (rc) { 6242 QL_DPRINT1(ha, (ha->pci_dev, "%s:Failed to update vport\n", 6243 __func__)); 6244 return rc; 6245 } 6246 6247 /* Flush Tx queues. If needed, request drain from MCP */ 6248 for_each_rss(i) { 6249 fp = &ha->fp_array[i]; 6250 6251 for (tc = 0; tc < ha->num_tc; tc++) { 6252 struct qlnx_tx_queue *txq = fp->txq[tc]; 6253 6254 rc = qlnx_drain_txq(ha, fp, txq); 6255 if (rc) 6256 return rc; 6257 } 6258 } 6259 6260 /* Stop all Queues in reverse order*/ 6261 for (i = ha->num_rss - 1; i >= 0; i--) { 6262 6263 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6264 6265 fp = &ha->fp_array[i]; 6266 6267 /* Stop the Tx Queue(s)*/ 6268 for (tc = 0; tc < ha->num_tc; tc++) { 6269 int tx_queue_id; 6270 6271 tx_queue_id = tc * ha->num_rss + i; 6272 rc = ecore_eth_tx_queue_stop(p_hwfn, 6273 fp->txq[tc]->handle); 6274 6275 if (rc) { 6276 QL_DPRINT1(ha, (ha->pci_dev, 6277 "%s: Failed to stop TXQ #%d\n", 6278 __func__, tx_queue_id)); 6279 return rc; 6280 } 6281 } 6282 6283 /* Stop the Rx Queue*/ 6284 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6285 false); 6286 if (rc) { 6287 QL_DPRINT1(ha, (ha->pci_dev, 6288 "%s: Failed to stop RXQ #%d\n", __func__, i)); 6289 return rc; 6290 } 6291 } 6292 6293 /* Stop the vport */ 6294 for_each_hwfn(cdev, i) { 6295 6296 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6297 6298 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6299 6300 if (rc) { 6301 QL_DPRINT1(ha, (ha->pci_dev, 6302 "%s: Failed to stop VPORT\n", __func__)); 6303 return rc; 6304 } 6305 } 6306 6307 return rc; 6308} 6309 6310static int 6311qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6312 enum ecore_filter_opcode opcode, 6313 unsigned char mac[ETH_ALEN]) 6314{ 6315 struct ecore_filter_ucast ucast; 6316 struct ecore_dev *cdev; 6317 int rc; 6318 6319 cdev = &ha->cdev; 6320 6321 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6322 6323 ucast.opcode = opcode; 6324 ucast.type = ECORE_FILTER_MAC; 6325 ucast.is_rx_filter = 1; 6326 ucast.vport_to_add_to = 0; 6327 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6328 6329 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6330 6331 return (rc); 6332} 6333 6334static int 6335qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6336{ 6337 struct ecore_filter_ucast ucast; 6338 struct ecore_dev *cdev; 6339 int rc; 6340 6341 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6342 6343 ucast.opcode = ECORE_FILTER_REPLACE; 6344 ucast.type = ECORE_FILTER_MAC; 6345 ucast.is_rx_filter = 1; 6346 6347 cdev = &ha->cdev; 6348 6349 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6350 6351 return (rc); 6352} 6353 6354static int 6355qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6356{ 6357 struct ecore_filter_mcast *mcast; 6358 struct ecore_dev *cdev; 6359 int rc, i; 6360 6361 cdev = &ha->cdev; 6362 6363 mcast = &ha->ecore_mcast; 6364 bzero(mcast, sizeof(struct ecore_filter_mcast)); 6365 6366 mcast->opcode = ECORE_FILTER_REMOVE; 6367 6368 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 6369 6370 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 6371 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 6372 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 6373 6374 memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN); 6375 mcast->num_mc_addrs++; 6376 } 6377 } 6378 mcast = &ha->ecore_mcast; 6379 6380 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 6381 6382 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 6383 ha->nmcast = 0; 6384 6385 return (rc); 6386} 6387 6388static int 6389qlnx_clean_filters(qlnx_host_t *ha) 6390{ 6391 int rc = 0; 6392 6393 /* Remove all unicast macs */ 6394 rc = qlnx_remove_all_ucast_mac(ha); 6395 if (rc) 6396 return rc; 6397 6398 /* Remove all multicast macs */ 6399 rc = qlnx_remove_all_mcast_mac(ha); 6400 if (rc) 6401 return rc; 6402 6403 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 6404 6405 return (rc); 6406} 6407 6408static int 6409qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 6410{ 6411 struct ecore_filter_accept_flags accept; 6412 int rc = 0; 6413 struct ecore_dev *cdev; 6414 6415 cdev = &ha->cdev; 6416 6417 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 6418 6419 accept.update_rx_mode_config = 1; 6420 accept.rx_accept_filter = filter; 6421 6422 accept.update_tx_mode_config = 1; 6423 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 6424 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 6425 6426 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 6427 ECORE_SPQ_MODE_CB, NULL); 6428 6429 return (rc); 6430} 6431 6432static int 6433qlnx_set_rx_mode(qlnx_host_t *ha) 6434{ 6435 int rc = 0; 6436 uint8_t filter; 6437 6438 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 6439 if (rc) 6440 return rc; 6441 6442 rc = qlnx_remove_all_mcast_mac(ha); 6443 if (rc) 6444 return rc; 6445 6446 filter = ECORE_ACCEPT_UCAST_MATCHED | 6447 ECORE_ACCEPT_MCAST_MATCHED | 6448 ECORE_ACCEPT_BCAST; 6449 ha->filter = filter; 6450 6451 rc = qlnx_set_rx_accept_filter(ha, filter); 6452 6453 return (rc); 6454} 6455 6456static int 6457qlnx_set_link(qlnx_host_t *ha, bool link_up) 6458{ 6459 int i, rc = 0; 6460 struct ecore_dev *cdev; 6461 struct ecore_hwfn *hwfn; 6462 struct ecore_ptt *ptt; 6463 6464 cdev = &ha->cdev; 6465 6466 for_each_hwfn(cdev, i) { 6467 6468 hwfn = &cdev->hwfns[i]; 6469 6470 ptt = ecore_ptt_acquire(hwfn); 6471 if (!ptt) 6472 return -EBUSY; 6473 6474 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 6475 6476 ecore_ptt_release(hwfn, ptt); 6477 6478 if (rc) 6479 return rc; 6480 } 6481 return (rc); 6482} 6483 6484#if __FreeBSD_version >= 1100000 6485static uint64_t 6486qlnx_get_counter(if_t ifp, ift_counter cnt) 6487{ 6488 qlnx_host_t *ha; 6489 uint64_t count; 6490 6491 ha = (qlnx_host_t *)if_getsoftc(ifp); 6492 6493 switch (cnt) { 6494 6495 case IFCOUNTER_IPACKETS: 6496 count = ha->hw_stats.common.rx_ucast_pkts + 6497 ha->hw_stats.common.rx_mcast_pkts + 6498 ha->hw_stats.common.rx_bcast_pkts; 6499 break; 6500 6501 case IFCOUNTER_IERRORS: 6502 count = ha->hw_stats.common.rx_crc_errors + 6503 ha->hw_stats.common.rx_align_errors + 6504 ha->hw_stats.common.rx_oversize_packets + 6505 ha->hw_stats.common.rx_undersize_packets; 6506 break; 6507 6508 case IFCOUNTER_OPACKETS: 6509 count = ha->hw_stats.common.tx_ucast_pkts + 6510 ha->hw_stats.common.tx_mcast_pkts + 6511 ha->hw_stats.common.tx_bcast_pkts; 6512 break; 6513 6514 case IFCOUNTER_OERRORS: 6515 count = ha->hw_stats.common.tx_err_drop_pkts; 6516 break; 6517 6518 case IFCOUNTER_COLLISIONS: 6519 return (0); 6520 6521 case IFCOUNTER_IBYTES: 6522 count = ha->hw_stats.common.rx_ucast_bytes + 6523 ha->hw_stats.common.rx_mcast_bytes + 6524 ha->hw_stats.common.rx_bcast_bytes; 6525 break; 6526 6527 case IFCOUNTER_OBYTES: 6528 count = ha->hw_stats.common.tx_ucast_bytes + 6529 ha->hw_stats.common.tx_mcast_bytes + 6530 ha->hw_stats.common.tx_bcast_bytes; 6531 break; 6532 6533 case IFCOUNTER_IMCASTS: 6534 count = ha->hw_stats.common.rx_mcast_bytes; 6535 break; 6536 6537 case IFCOUNTER_OMCASTS: 6538 count = ha->hw_stats.common.tx_mcast_bytes; 6539 break; 6540 6541 case IFCOUNTER_IQDROPS: 6542 case IFCOUNTER_OQDROPS: 6543 case IFCOUNTER_NOPROTO: 6544 6545 default: 6546 return (if_get_counter_default(ifp, cnt)); 6547 } 6548 return (count); 6549} 6550#endif 6551 6552 6553static void 6554qlnx_timer(void *arg) 6555{ 6556 qlnx_host_t *ha; 6557 6558 ha = (qlnx_host_t *)arg; 6559 6560 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 6561 6562 if (ha->storm_stats_enable) 6563 qlnx_sample_storm_stats(ha); 6564 6565 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6566 6567 return; 6568} 6569 6570static int 6571qlnx_load(qlnx_host_t *ha) 6572{ 6573 int i; 6574 int rc = 0; 6575 struct ecore_dev *cdev; 6576 device_t dev; 6577 6578 cdev = &ha->cdev; 6579 dev = ha->pci_dev; 6580 6581 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 6582 6583 rc = qlnx_alloc_mem_arrays(ha); 6584 if (rc) 6585 goto qlnx_load_exit0; 6586 6587 qlnx_init_fp(ha); 6588 6589 rc = qlnx_alloc_mem_load(ha); 6590 if (rc) 6591 goto qlnx_load_exit1; 6592 6593 QL_DPRINT2(ha, (dev, "%s: Allocated %d RSS queues on %d TC/s\n", 6594 __func__, ha->num_rss, ha->num_tc)); 6595 6596 for (i = 0; i < ha->num_rss; i++) { 6597 6598 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 6599 (INTR_TYPE_NET | INTR_MPSAFE), 6600 NULL, qlnx_fp_isr, &ha->irq_vec[i], 6601 &ha->irq_vec[i].handle))) { 6602 6603 QL_DPRINT1(ha, (dev, "could not setup interrupt\n")); 6604 6605 goto qlnx_load_exit2; 6606 } 6607 6608 QL_DPRINT2(ha, (dev, "%s: rss_id = %d irq_rid %d" 6609 " irq %p handle %p\n", __func__, i, 6610 ha->irq_vec[i].irq_rid, 6611 ha->irq_vec[i].irq, ha->irq_vec[i].handle)); 6612 6613 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 6614 } 6615 6616 rc = qlnx_start_queues(ha); 6617 if (rc) 6618 goto qlnx_load_exit2; 6619 6620 QL_DPRINT2(ha, (dev, "%s: Start VPORT, RXQ and TXQ succeeded\n", 6621 __func__)); 6622 6623 /* Add primary mac and set Rx filters */ 6624 rc = qlnx_set_rx_mode(ha); 6625 if (rc) 6626 goto qlnx_load_exit2; 6627 6628 /* Ask for link-up using current configuration */ 6629 qlnx_set_link(ha, true); 6630 6631 ha->state = QLNX_STATE_OPEN; 6632 6633 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 6634 6635 if (ha->flags.callout_init) 6636 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6637 6638 goto qlnx_load_exit0; 6639 6640qlnx_load_exit2: 6641 qlnx_free_mem_load(ha); 6642 6643qlnx_load_exit1: 6644 ha->num_rss = 0; 6645 6646qlnx_load_exit0: 6647 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit [%d]\n", __func__, rc)); 6648 return rc; 6649} 6650 6651static void 6652qlnx_drain_soft_lro(qlnx_host_t *ha) 6653{ 6654#ifdef QLNX_SOFT_LRO 6655 6656 struct ifnet *ifp; 6657 int i; 6658 6659 ifp = ha->ifp; 6660 6661 6662 if (ifp->if_capenable & IFCAP_LRO) { 6663 6664 for (i = 0; i < ha->num_rss; i++) { 6665 6666 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6667 struct lro_ctrl *lro; 6668 6669 lro = &fp->rxq->lro; 6670 6671#if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 6672 6673 tcp_lro_flush_all(lro); 6674 6675#else 6676 struct lro_entry *queued; 6677 6678 while ((!SLIST_EMPTY(&lro->lro_active))){ 6679 queued = SLIST_FIRST(&lro->lro_active); 6680 SLIST_REMOVE_HEAD(&lro->lro_active, next); 6681 tcp_lro_flush(lro, queued); 6682 } 6683 6684#endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 6685 6686 } 6687 } 6688 6689#endif /* #ifdef QLNX_SOFT_LRO */ 6690 6691 return; 6692} 6693 6694static void 6695qlnx_unload(qlnx_host_t *ha) 6696{ 6697 struct ecore_dev *cdev; 6698 device_t dev; 6699 int i; 6700 6701 cdev = &ha->cdev; 6702 dev = ha->pci_dev; 6703 6704 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 6705 6706 if (ha->state == QLNX_STATE_OPEN) { 6707 6708 qlnx_set_link(ha, false); 6709 qlnx_clean_filters(ha); 6710 qlnx_stop_queues(ha); 6711 ecore_hw_stop_fastpath(cdev); 6712 6713 for (i = 0; i < ha->num_rss; i++) { 6714 if (ha->irq_vec[i].handle) { 6715 (void)bus_teardown_intr(dev, 6716 ha->irq_vec[i].irq, 6717 ha->irq_vec[i].handle); 6718 ha->irq_vec[i].handle = NULL; 6719 } 6720 } 6721 6722 qlnx_drain_fp_taskqueues(ha); 6723 qlnx_drain_soft_lro(ha); 6724 qlnx_free_mem_load(ha); 6725 } 6726 6727 if (ha->flags.callout_init) 6728 callout_drain(&ha->qlnx_callout); 6729 6730 qlnx_mdelay(__func__, 1000); 6731 6732 ha->state = QLNX_STATE_CLOSED; 6733 6734 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 6735 return; 6736} 6737 6738static int 6739qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 6740{ 6741 int rval = -1; 6742 struct ecore_hwfn *p_hwfn; 6743 struct ecore_ptt *p_ptt; 6744 6745 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 6746 6747 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 6748 p_ptt = ecore_ptt_acquire(p_hwfn); 6749 6750 if (!p_ptt) { 6751 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n", 6752 __func__)); 6753 return (rval); 6754 } 6755 6756 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 6757 6758 if (rval == DBG_STATUS_OK) 6759 rval = 0; 6760 else { 6761 QL_DPRINT1(ha, (ha->pci_dev, 6762 "%s : ecore_dbg_grc_get_dump_buf_size failed [0x%x]\n", 6763 __func__, rval)); 6764 } 6765 6766 ecore_ptt_release(p_hwfn, p_ptt); 6767 6768 return (rval); 6769} 6770 6771static int 6772qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 6773{ 6774 int rval = -1; 6775 struct ecore_hwfn *p_hwfn; 6776 struct ecore_ptt *p_ptt; 6777 6778 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 6779 6780 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 6781 p_ptt = ecore_ptt_acquire(p_hwfn); 6782 6783 if (!p_ptt) { 6784 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n", 6785 __func__)); 6786 return (rval); 6787 } 6788 6789 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 6790 6791 if (rval == DBG_STATUS_OK) 6792 rval = 0; 6793 else { 6794 QL_DPRINT1(ha, (ha->pci_dev, "%s : " 6795 "ecore_dbg_idle_chk_get_dump_buf_size failed [0x%x]\n", 6796 __func__, rval)); 6797 } 6798 6799 ecore_ptt_release(p_hwfn, p_ptt); 6800 6801 return (rval); 6802} 6803 6804 6805static void 6806qlnx_sample_storm_stats(qlnx_host_t *ha) 6807{ 6808 int i, index; 6809 struct ecore_dev *cdev; 6810 qlnx_storm_stats_t *s_stats; 6811 uint32_t reg; 6812 struct ecore_ptt *p_ptt; 6813 struct ecore_hwfn *hwfn; 6814 6815 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 6816 ha->storm_stats_enable = 0; 6817 return; 6818 } 6819 6820 cdev = &ha->cdev; 6821 6822 for_each_hwfn(cdev, i) { 6823 6824 hwfn = &cdev->hwfns[i]; 6825 6826 p_ptt = ecore_ptt_acquire(hwfn); 6827 if (!p_ptt) 6828 return; 6829 6830 index = ha->storm_stats_index + 6831 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 6832 6833 s_stats = &ha->storm_stats[index]; 6834 6835 /* XSTORM */ 6836 reg = XSEM_REG_FAST_MEMORY + 6837 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6838 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6839 6840 reg = XSEM_REG_FAST_MEMORY + 6841 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6842 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6843 6844 reg = XSEM_REG_FAST_MEMORY + 6845 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6846 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6847 6848 reg = XSEM_REG_FAST_MEMORY + 6849 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6850 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6851 6852 /* YSTORM */ 6853 reg = YSEM_REG_FAST_MEMORY + 6854 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6855 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6856 6857 reg = YSEM_REG_FAST_MEMORY + 6858 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6859 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6860 6861 reg = YSEM_REG_FAST_MEMORY + 6862 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6863 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6864 6865 reg = YSEM_REG_FAST_MEMORY + 6866 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6867 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6868 6869 /* PSTORM */ 6870 reg = PSEM_REG_FAST_MEMORY + 6871 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6872 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6873 6874 reg = PSEM_REG_FAST_MEMORY + 6875 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6876 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6877 6878 reg = PSEM_REG_FAST_MEMORY + 6879 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6880 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6881 6882 reg = PSEM_REG_FAST_MEMORY + 6883 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6884 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6885 6886 /* TSTORM */ 6887 reg = TSEM_REG_FAST_MEMORY + 6888 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6889 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6890 6891 reg = TSEM_REG_FAST_MEMORY + 6892 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6893 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6894 6895 reg = TSEM_REG_FAST_MEMORY + 6896 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6897 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6898 6899 reg = TSEM_REG_FAST_MEMORY + 6900 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6901 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6902 6903 /* MSTORM */ 6904 reg = MSEM_REG_FAST_MEMORY + 6905 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6906 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6907 6908 reg = MSEM_REG_FAST_MEMORY + 6909 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6910 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6911 6912 reg = MSEM_REG_FAST_MEMORY + 6913 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6914 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6915 6916 reg = MSEM_REG_FAST_MEMORY + 6917 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6918 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6919 6920 /* USTORM */ 6921 reg = USEM_REG_FAST_MEMORY + 6922 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6923 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6924 6925 reg = USEM_REG_FAST_MEMORY + 6926 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6927 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6928 6929 reg = USEM_REG_FAST_MEMORY + 6930 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6931 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6932 6933 reg = USEM_REG_FAST_MEMORY + 6934 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6935 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6936 6937 ecore_ptt_release(hwfn, p_ptt); 6938 } 6939 6940 ha->storm_stats_index++; 6941 6942 return; 6943} 6944 6945/* 6946 * Name: qlnx_dump_buf8 6947 * Function: dumps a buffer as bytes 6948 */ 6949static void 6950qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 6951{ 6952 device_t dev; 6953 uint32_t i = 0; 6954 uint8_t *buf; 6955 6956 dev = ha->pci_dev; 6957 buf = dbuf; 6958 6959 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 6960 6961 while (len >= 16) { 6962 device_printf(dev,"0x%08x:" 6963 " %02x %02x %02x %02x %02x %02x %02x %02x" 6964 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 6965 buf[0], buf[1], buf[2], buf[3], 6966 buf[4], buf[5], buf[6], buf[7], 6967 buf[8], buf[9], buf[10], buf[11], 6968 buf[12], buf[13], buf[14], buf[15]); 6969 i += 16; 6970 len -= 16; 6971 buf += 16; 6972 } 6973 switch (len) { 6974 case 1: 6975 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 6976 break; 6977 case 2: 6978 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 6979 break; 6980 case 3: 6981 device_printf(dev,"0x%08x: %02x %02x %02x\n", 6982 i, buf[0], buf[1], buf[2]); 6983 break; 6984 case 4: 6985 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 6986 buf[0], buf[1], buf[2], buf[3]); 6987 break; 6988 case 5: 6989 device_printf(dev,"0x%08x:" 6990 " %02x %02x %02x %02x %02x\n", i, 6991 buf[0], buf[1], buf[2], buf[3], buf[4]); 6992 break; 6993 case 6: 6994 device_printf(dev,"0x%08x:" 6995 " %02x %02x %02x %02x %02x %02x\n", i, 6996 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 6997 break; 6998 case 7: 6999 device_printf(dev,"0x%08x:" 7000 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7001 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7002 break; 7003 case 8: 7004 device_printf(dev,"0x%08x:" 7005 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7006 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7007 buf[7]); 7008 break; 7009 case 9: 7010 device_printf(dev,"0x%08x:" 7011 " %02x %02x %02x %02x %02x %02x %02x %02x" 7012 " %02x\n", i, 7013 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7014 buf[7], buf[8]); 7015 break; 7016 case 10: 7017 device_printf(dev,"0x%08x:" 7018 " %02x %02x %02x %02x %02x %02x %02x %02x" 7019 " %02x %02x\n", i, 7020 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7021 buf[7], buf[8], buf[9]); 7022 break; 7023 case 11: 7024 device_printf(dev,"0x%08x:" 7025 " %02x %02x %02x %02x %02x %02x %02x %02x" 7026 " %02x %02x %02x\n", i, 7027 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7028 buf[7], buf[8], buf[9], buf[10]); 7029 break; 7030 case 12: 7031 device_printf(dev,"0x%08x:" 7032 " %02x %02x %02x %02x %02x %02x %02x %02x" 7033 " %02x %02x %02x %02x\n", i, 7034 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7035 buf[7], buf[8], buf[9], buf[10], buf[11]); 7036 break; 7037 case 13: 7038 device_printf(dev,"0x%08x:" 7039 " %02x %02x %02x %02x %02x %02x %02x %02x" 7040 " %02x %02x %02x %02x %02x\n", i, 7041 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7042 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7043 break; 7044 case 14: 7045 device_printf(dev,"0x%08x:" 7046 " %02x %02x %02x %02x %02x %02x %02x %02x" 7047 " %02x %02x %02x %02x %02x %02x\n", i, 7048 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7049 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7050 buf[13]); 7051 break; 7052 case 15: 7053 device_printf(dev,"0x%08x:" 7054 " %02x %02x %02x %02x %02x %02x %02x %02x" 7055 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7056 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7057 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7058 buf[13], buf[14]); 7059 break; 7060 default: 7061 break; 7062 } 7063 7064 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7065 7066 return; 7067} 7068 7069