t4_main.c revision 309560
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/t4_main.c 309560 2016-12-05 20:43:25Z jhb $"); 30 31#include "opt_ddb.h" 32#include "opt_inet.h" 33#include "opt_inet6.h" 34#include "opt_rss.h" 35 36#include <sys/param.h> 37#include <sys/conf.h> 38#include <sys/priv.h> 39#include <sys/kernel.h> 40#include <sys/bus.h> 41#include <sys/module.h> 42#include <sys/malloc.h> 43#include <sys/queue.h> 44#include <sys/taskqueue.h> 45#include <sys/pciio.h> 46#include <dev/pci/pcireg.h> 47#include <dev/pci/pcivar.h> 48#include <dev/pci/pci_private.h> 49#include <sys/firmware.h> 50#include <sys/sbuf.h> 51#include <sys/smp.h> 52#include <sys/socket.h> 53#include <sys/sockio.h> 54#include <sys/sysctl.h> 55#include <net/ethernet.h> 56#include <net/if.h> 57#include <net/if_types.h> 58#include <net/if_dl.h> 59#include <net/if_vlan_var.h> 60#ifdef RSS 61#include <net/rss_config.h> 62#endif 63#if defined(__i386__) || defined(__amd64__) 64#include <vm/vm.h> 65#include <vm/pmap.h> 66#endif 67#ifdef DDB 68#include <ddb/ddb.h> 69#include <ddb/db_lex.h> 70#endif 71 72#include "common/common.h" 73#include "common/t4_msg.h" 74#include "common/t4_regs.h" 75#include "common/t4_regs_values.h" 76#include "t4_ioctl.h" 77#include "t4_l2t.h" 78#include "t4_mp_ring.h" 79#include "t4_if.h" 80 81/* T4 bus driver interface */ 82static int t4_probe(device_t); 83static int t4_attach(device_t); 84static int t4_detach(device_t); 85static int t4_ready(device_t); 86static int t4_read_port_device(device_t, int, device_t *); 87static device_method_t t4_methods[] = { 88 DEVMETHOD(device_probe, t4_probe), 89 DEVMETHOD(device_attach, t4_attach), 90 DEVMETHOD(device_detach, t4_detach), 91 92 DEVMETHOD(t4_is_main_ready, t4_ready), 93 DEVMETHOD(t4_read_port_device, t4_read_port_device), 94 95 DEVMETHOD_END 96}; 97static driver_t t4_driver = { 98 "t4nex", 99 t4_methods, 100 sizeof(struct adapter) 101}; 102 103 104/* T4 port (cxgbe) interface */ 105static int cxgbe_probe(device_t); 106static int cxgbe_attach(device_t); 107static int cxgbe_detach(device_t); 108device_method_t cxgbe_methods[] = { 109 DEVMETHOD(device_probe, cxgbe_probe), 110 DEVMETHOD(device_attach, cxgbe_attach), 111 DEVMETHOD(device_detach, cxgbe_detach), 112 { 0, 0 } 113}; 114static driver_t cxgbe_driver = { 115 "cxgbe", 116 cxgbe_methods, 117 sizeof(struct port_info) 118}; 119 120/* T4 VI (vcxgbe) interface */ 121static int vcxgbe_probe(device_t); 122static int vcxgbe_attach(device_t); 123static int vcxgbe_detach(device_t); 124static device_method_t vcxgbe_methods[] = { 125 DEVMETHOD(device_probe, vcxgbe_probe), 126 DEVMETHOD(device_attach, vcxgbe_attach), 127 DEVMETHOD(device_detach, vcxgbe_detach), 128 { 0, 0 } 129}; 130static driver_t vcxgbe_driver = { 131 "vcxgbe", 132 vcxgbe_methods, 133 sizeof(struct vi_info) 134}; 135 136static d_ioctl_t t4_ioctl; 137 138static struct cdevsw t4_cdevsw = { 139 .d_version = D_VERSION, 140 .d_ioctl = t4_ioctl, 141 .d_name = "t4nex", 142}; 143 144/* T5 bus driver interface */ 145static int t5_probe(device_t); 146static device_method_t t5_methods[] = { 147 DEVMETHOD(device_probe, t5_probe), 148 DEVMETHOD(device_attach, t4_attach), 149 DEVMETHOD(device_detach, t4_detach), 150 151 DEVMETHOD(t4_is_main_ready, t4_ready), 152 DEVMETHOD(t4_read_port_device, t4_read_port_device), 153 154 DEVMETHOD_END 155}; 156static driver_t t5_driver = { 157 "t5nex", 158 t5_methods, 159 sizeof(struct adapter) 160}; 161 162 163/* T5 port (cxl) interface */ 164static driver_t cxl_driver = { 165 "cxl", 166 cxgbe_methods, 167 sizeof(struct port_info) 168}; 169 170/* T5 VI (vcxl) interface */ 171static driver_t vcxl_driver = { 172 "vcxl", 173 vcxgbe_methods, 174 sizeof(struct vi_info) 175}; 176 177/* T6 bus driver interface */ 178static int t6_probe(device_t); 179static device_method_t t6_methods[] = { 180 DEVMETHOD(device_probe, t6_probe), 181 DEVMETHOD(device_attach, t4_attach), 182 DEVMETHOD(device_detach, t4_detach), 183 184 DEVMETHOD(t4_is_main_ready, t4_ready), 185 DEVMETHOD(t4_read_port_device, t4_read_port_device), 186 187 DEVMETHOD_END 188}; 189static driver_t t6_driver = { 190 "t6nex", 191 t6_methods, 192 sizeof(struct adapter) 193}; 194 195 196/* T6 port (cc) interface */ 197static driver_t cc_driver = { 198 "cc", 199 cxgbe_methods, 200 sizeof(struct port_info) 201}; 202 203/* T6 VI (vcc) interface */ 204static driver_t vcc_driver = { 205 "vcc", 206 vcxgbe_methods, 207 sizeof(struct vi_info) 208}; 209 210/* ifnet + media interface */ 211static void cxgbe_init(void *); 212static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 213static int cxgbe_transmit(struct ifnet *, struct mbuf *); 214static void cxgbe_qflush(struct ifnet *); 215static int cxgbe_media_change(struct ifnet *); 216static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 217 218MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 219 220/* 221 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 222 * then ADAPTER_LOCK, then t4_uld_list_lock. 223 */ 224static struct sx t4_list_lock; 225SLIST_HEAD(, adapter) t4_list; 226#ifdef TCP_OFFLOAD 227static struct sx t4_uld_list_lock; 228SLIST_HEAD(, uld_info) t4_uld_list; 229#endif 230 231/* 232 * Tunables. See tweak_tunables() too. 233 * 234 * Each tunable is set to a default value here if it's known at compile-time. 235 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 236 * provide a reasonable default when the driver is loaded. 237 * 238 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 239 * T5 are under hw.cxl. 240 */ 241 242/* 243 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 244 */ 245#define NTXQ_10G 16 246int t4_ntxq10g = -1; 247TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 248 249#define NRXQ_10G 8 250int t4_nrxq10g = -1; 251TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 252 253#define NTXQ_1G 4 254int t4_ntxq1g = -1; 255TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 256 257#define NRXQ_1G 2 258int t4_nrxq1g = -1; 259TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 260 261#define NTXQ_VI 1 262static int t4_ntxq_vi = -1; 263TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 264 265#define NRXQ_VI 1 266static int t4_nrxq_vi = -1; 267TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 268 269static int t4_rsrv_noflowq = 0; 270TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 271 272#ifdef TCP_OFFLOAD 273#define NOFLDTXQ_10G 8 274static int t4_nofldtxq10g = -1; 275TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 276 277#define NOFLDRXQ_10G 2 278static int t4_nofldrxq10g = -1; 279TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 280 281#define NOFLDTXQ_1G 2 282static int t4_nofldtxq1g = -1; 283TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 284 285#define NOFLDRXQ_1G 1 286static int t4_nofldrxq1g = -1; 287TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 288 289#define NOFLDTXQ_VI 1 290static int t4_nofldtxq_vi = -1; 291TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 292 293#define NOFLDRXQ_VI 1 294static int t4_nofldrxq_vi = -1; 295TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 296#endif 297 298#ifdef DEV_NETMAP 299#define NNMTXQ_VI 2 300static int t4_nnmtxq_vi = -1; 301TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 302 303#define NNMRXQ_VI 2 304static int t4_nnmrxq_vi = -1; 305TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 306#endif 307 308/* 309 * Holdoff parameters for 10G and 1G ports. 310 */ 311#define TMR_IDX_10G 1 312int t4_tmr_idx_10g = TMR_IDX_10G; 313TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 314 315#define PKTC_IDX_10G (-1) 316int t4_pktc_idx_10g = PKTC_IDX_10G; 317TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 318 319#define TMR_IDX_1G 1 320int t4_tmr_idx_1g = TMR_IDX_1G; 321TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 322 323#define PKTC_IDX_1G (-1) 324int t4_pktc_idx_1g = PKTC_IDX_1G; 325TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 326 327/* 328 * Size (# of entries) of each tx and rx queue. 329 */ 330unsigned int t4_qsize_txq = TX_EQ_QSIZE; 331TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 332 333unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 334TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 335 336/* 337 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 338 */ 339int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 340TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 341 342/* 343 * Configuration file. 344 */ 345#define DEFAULT_CF "default" 346#define FLASH_CF "flash" 347#define UWIRE_CF "uwire" 348#define FPGA_CF "fpga" 349static char t4_cfg_file[32] = DEFAULT_CF; 350TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 351 352/* 353 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 354 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 355 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 356 * mark or when signalled to do so, 0 to never emit PAUSE. 357 */ 358static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 359TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 360 361/* 362 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 363 * encouraged respectively). 364 */ 365static unsigned int t4_fw_install = 1; 366TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 367 368/* 369 * ASIC features that will be used. Disable the ones you don't want so that the 370 * chip resources aren't wasted on features that will not be used. 371 */ 372static int t4_nbmcaps_allowed = 0; 373TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 374 375static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 376TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 377 378static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 379 FW_CAPS_CONFIG_SWITCH_EGRESS; 380TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 381 382static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 383TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 384 385static int t4_toecaps_allowed = -1; 386TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 387 388static int t4_rdmacaps_allowed = -1; 389TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 390 391static int t4_cryptocaps_allowed = 0; 392TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed); 393 394static int t4_iscsicaps_allowed = -1; 395TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 396 397static int t4_fcoecaps_allowed = 0; 398TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 399 400static int t5_write_combine = 0; 401TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 402 403static int t4_num_vis = 1; 404TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 405 406/* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 407static int vi_mac_funcs[] = { 408 FW_VI_FUNC_OFLD, 409 FW_VI_FUNC_IWARP, 410 FW_VI_FUNC_OPENISCSI, 411 FW_VI_FUNC_OPENFCOE, 412 FW_VI_FUNC_FOISCSI, 413 FW_VI_FUNC_FOFCOE, 414}; 415 416struct intrs_and_queues { 417 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 418 uint16_t nirq; /* Total # of vectors */ 419 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 420 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 421 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 422 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 423 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 424 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 425 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 426 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 427 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 428 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 429 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 430 431 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 432 uint16_t ntxq_vi; /* # of NIC txq's */ 433 uint16_t nrxq_vi; /* # of NIC rxq's */ 434 uint16_t nofldtxq_vi; /* # of TOE txq's */ 435 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 436 uint16_t nnmtxq_vi; /* # of netmap txq's */ 437 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 438}; 439 440struct filter_entry { 441 uint32_t valid:1; /* filter allocated and valid */ 442 uint32_t locked:1; /* filter is administratively locked */ 443 uint32_t pending:1; /* filter action is pending firmware reply */ 444 uint32_t smtidx:8; /* Source MAC Table index for smac */ 445 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 446 447 struct t4_filter_specification fs; 448}; 449 450static void setup_memwin(struct adapter *); 451static void position_memwin(struct adapter *, int, uint32_t); 452static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 453static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 454 int); 455static inline int write_via_memwin(struct adapter *, int, uint32_t, 456 const uint32_t *, int); 457static int validate_mem_range(struct adapter *, uint32_t, int); 458static int fwmtype_to_hwmtype(int); 459static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 460 uint32_t *); 461static int fixup_devlog_params(struct adapter *); 462static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 463 struct intrs_and_queues *); 464static int prep_firmware(struct adapter *); 465static int partition_resources(struct adapter *, const struct firmware *, 466 const char *); 467static int get_params__pre_init(struct adapter *); 468static int get_params__post_init(struct adapter *); 469static int set_params__post_init(struct adapter *); 470static void t4_set_desc(struct adapter *); 471static void build_medialist(struct port_info *, struct ifmedia *); 472static int cxgbe_init_synchronized(struct vi_info *); 473static int cxgbe_uninit_synchronized(struct vi_info *); 474static void quiesce_txq(struct adapter *, struct sge_txq *); 475static void quiesce_wrq(struct adapter *, struct sge_wrq *); 476static void quiesce_iq(struct adapter *, struct sge_iq *); 477static void quiesce_fl(struct adapter *, struct sge_fl *); 478static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 479 driver_intr_t *, void *, char *); 480static int t4_free_irq(struct adapter *, struct irq *); 481static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 482static void vi_refresh_stats(struct adapter *, struct vi_info *); 483static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 484static void cxgbe_tick(void *); 485static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 486static void cxgbe_sysctls(struct port_info *); 487static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 488static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 489static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 490static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 491static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 492static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 493static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 494static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 495static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 496static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 497static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 498#ifdef SBUF_DRAIN 499static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 500static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 501static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 502static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 503static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 504static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 505static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 506static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 507static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 508static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 509static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 510static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 511static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 512static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 513static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 514static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 515static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 516static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 517static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 518static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 519static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 520static int sysctl_tids(SYSCTL_HANDLER_ARGS); 521static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 522static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 523static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 524static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 525static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 526static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 527static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 528#endif 529#ifdef TCP_OFFLOAD 530static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 531static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 532static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 533#endif 534static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 535static uint32_t mode_to_fconf(uint32_t); 536static uint32_t mode_to_iconf(uint32_t); 537static int check_fspec_against_fconf_iconf(struct adapter *, 538 struct t4_filter_specification *); 539static int get_filter_mode(struct adapter *, uint32_t *); 540static int set_filter_mode(struct adapter *, uint32_t); 541static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 542static int get_filter(struct adapter *, struct t4_filter *); 543static int set_filter(struct adapter *, struct t4_filter *); 544static int del_filter(struct adapter *, struct t4_filter *); 545static void clear_filter(struct filter_entry *); 546static int set_filter_wr(struct adapter *, int); 547static int del_filter_wr(struct adapter *, int); 548static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 549 struct mbuf *); 550static int get_sge_context(struct adapter *, struct t4_sge_context *); 551static int load_fw(struct adapter *, struct t4_data *); 552static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 553static int read_i2c(struct adapter *, struct t4_i2c_data *); 554#ifdef TCP_OFFLOAD 555static int toe_capability(struct vi_info *, int); 556#endif 557static int mod_event(module_t, int, void *); 558static int notify_siblings(device_t, int); 559 560struct { 561 uint16_t device; 562 char *desc; 563} t4_pciids[] = { 564 {0xa000, "Chelsio Terminator 4 FPGA"}, 565 {0x4400, "Chelsio T440-dbg"}, 566 {0x4401, "Chelsio T420-CR"}, 567 {0x4402, "Chelsio T422-CR"}, 568 {0x4403, "Chelsio T440-CR"}, 569 {0x4404, "Chelsio T420-BCH"}, 570 {0x4405, "Chelsio T440-BCH"}, 571 {0x4406, "Chelsio T440-CH"}, 572 {0x4407, "Chelsio T420-SO"}, 573 {0x4408, "Chelsio T420-CX"}, 574 {0x4409, "Chelsio T420-BT"}, 575 {0x440a, "Chelsio T404-BT"}, 576 {0x440e, "Chelsio T440-LP-CR"}, 577}, t5_pciids[] = { 578 {0xb000, "Chelsio Terminator 5 FPGA"}, 579 {0x5400, "Chelsio T580-dbg"}, 580 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 581 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 582 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 583 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 584 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 585 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 586 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 587 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 588 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 589 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 590 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 591 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 592 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 593#ifdef notyet 594 {0x5404, "Chelsio T520-BCH"}, 595 {0x5405, "Chelsio T540-BCH"}, 596 {0x5406, "Chelsio T540-CH"}, 597 {0x5408, "Chelsio T520-CX"}, 598 {0x540b, "Chelsio B520-SR"}, 599 {0x540c, "Chelsio B504-BT"}, 600 {0x540f, "Chelsio Amsterdam"}, 601 {0x5413, "Chelsio T580-CHR"}, 602#endif 603}, t6_pciids[] = { 604 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ 605 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ 606 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ 607 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ 608 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ 609 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ 610 {0x6410, "Chelsio T62100-DBG"}, /* 2 x 40/50/100G, debug */ 611}; 612 613#ifdef TCP_OFFLOAD 614/* 615 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 616 * exactly the same for both rxq and ofld_rxq. 617 */ 618CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 619CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 620#endif 621CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 622 623static int 624t4_probe(device_t dev) 625{ 626 int i; 627 uint16_t v = pci_get_vendor(dev); 628 uint16_t d = pci_get_device(dev); 629 uint8_t f = pci_get_function(dev); 630 631 if (v != PCI_VENDOR_ID_CHELSIO) 632 return (ENXIO); 633 634 /* Attach only to PF0 of the FPGA */ 635 if (d == 0xa000 && f != 0) 636 return (ENXIO); 637 638 for (i = 0; i < nitems(t4_pciids); i++) { 639 if (d == t4_pciids[i].device) { 640 device_set_desc(dev, t4_pciids[i].desc); 641 return (BUS_PROBE_DEFAULT); 642 } 643 } 644 645 return (ENXIO); 646} 647 648static int 649t5_probe(device_t dev) 650{ 651 int i; 652 uint16_t v = pci_get_vendor(dev); 653 uint16_t d = pci_get_device(dev); 654 uint8_t f = pci_get_function(dev); 655 656 if (v != PCI_VENDOR_ID_CHELSIO) 657 return (ENXIO); 658 659 /* Attach only to PF0 of the FPGA */ 660 if (d == 0xb000 && f != 0) 661 return (ENXIO); 662 663 for (i = 0; i < nitems(t5_pciids); i++) { 664 if (d == t5_pciids[i].device) { 665 device_set_desc(dev, t5_pciids[i].desc); 666 return (BUS_PROBE_DEFAULT); 667 } 668 } 669 670 return (ENXIO); 671} 672 673static int 674t6_probe(device_t dev) 675{ 676 int i; 677 uint16_t v = pci_get_vendor(dev); 678 uint16_t d = pci_get_device(dev); 679 680 if (v != PCI_VENDOR_ID_CHELSIO) 681 return (ENXIO); 682 683 for (i = 0; i < nitems(t6_pciids); i++) { 684 if (d == t6_pciids[i].device) { 685 device_set_desc(dev, t6_pciids[i].desc); 686 return (BUS_PROBE_DEFAULT); 687 } 688 } 689 690 return (ENXIO); 691} 692 693static void 694t5_attribute_workaround(device_t dev) 695{ 696 device_t root_port; 697 uint32_t v; 698 699 /* 700 * The T5 chips do not properly echo the No Snoop and Relaxed 701 * Ordering attributes when replying to a TLP from a Root 702 * Port. As a workaround, find the parent Root Port and 703 * disable No Snoop and Relaxed Ordering. Note that this 704 * affects all devices under this root port. 705 */ 706 root_port = pci_find_pcie_root_port(dev); 707 if (root_port == NULL) { 708 device_printf(dev, "Unable to find parent root port\n"); 709 return; 710 } 711 712 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 713 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 714 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 715 0) 716 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 717 device_get_nameunit(root_port)); 718} 719 720static const struct devnames devnames[] = { 721 { 722 .nexus_name = "t4nex", 723 .ifnet_name = "cxgbe", 724 .vi_ifnet_name = "vcxgbe", 725 .pf03_drv_name = "t4iov", 726 .vf_nexus_name = "t4vf", 727 .vf_ifnet_name = "cxgbev" 728 }, { 729 .nexus_name = "t5nex", 730 .ifnet_name = "cxl", 731 .vi_ifnet_name = "vcxl", 732 .pf03_drv_name = "t5iov", 733 .vf_nexus_name = "t5vf", 734 .vf_ifnet_name = "cxlv" 735 }, { 736 .nexus_name = "t6nex", 737 .ifnet_name = "cc", 738 .vi_ifnet_name = "vcc", 739 .pf03_drv_name = "t6iov", 740 .vf_nexus_name = "t6vf", 741 .vf_ifnet_name = "ccv" 742 } 743}; 744 745void 746t4_init_devnames(struct adapter *sc) 747{ 748 int id; 749 750 id = chip_id(sc); 751 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) 752 sc->names = &devnames[id - CHELSIO_T4]; 753 else { 754 device_printf(sc->dev, "chip id %d is not supported.\n", id); 755 sc->names = NULL; 756 } 757} 758 759static int 760t4_attach(device_t dev) 761{ 762 struct adapter *sc; 763 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 764 struct make_dev_args mda; 765 struct intrs_and_queues iaq; 766 struct sge *s; 767 uint8_t *buf; 768#ifdef TCP_OFFLOAD 769 int ofld_rqidx, ofld_tqidx; 770#endif 771#ifdef DEV_NETMAP 772 int nm_rqidx, nm_tqidx; 773#endif 774 int num_vis; 775 776 sc = device_get_softc(dev); 777 sc->dev = dev; 778 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 779 780 if ((pci_get_device(dev) & 0xff00) == 0x5400) 781 t5_attribute_workaround(dev); 782 pci_enable_busmaster(dev); 783 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 784 uint32_t v; 785 786 pci_set_max_read_req(dev, 4096); 787 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 788 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 789 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 790 791 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 792 } 793 794 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 795 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 796 sc->traceq = -1; 797 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 798 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 799 device_get_nameunit(dev)); 800 801 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 802 device_get_nameunit(dev)); 803 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 804 t4_add_adapter(sc); 805 806 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 807 TAILQ_INIT(&sc->sfl); 808 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 809 810 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 811 812 rc = t4_map_bars_0_and_4(sc); 813 if (rc != 0) 814 goto done; /* error message displayed already */ 815 816 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 817 818 /* Prepare the adapter for operation. */ 819 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 820 rc = -t4_prep_adapter(sc, buf); 821 free(buf, M_CXGBE); 822 if (rc != 0) { 823 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 824 goto done; 825 } 826 827 /* 828 * This is the real PF# to which we're attaching. Works from within PCI 829 * passthrough environments too, where pci_get_function() could return a 830 * different PF# depending on the passthrough configuration. We need to 831 * use the real PF# in all our communication with the firmware. 832 */ 833 j = t4_read_reg(sc, A_PL_WHOAMI); 834 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); 835 sc->mbox = sc->pf; 836 837 t4_init_devnames(sc); 838 if (sc->names == NULL) { 839 rc = ENOTSUP; 840 goto done; /* error message displayed already */ 841 } 842 843 /* 844 * Do this really early, with the memory windows set up even before the 845 * character device. The userland tool's register i/o and mem read 846 * will work even in "recovery mode". 847 */ 848 setup_memwin(sc); 849 if (t4_init_devlog_params(sc, 0) == 0) 850 fixup_devlog_params(sc); 851 make_dev_args_init(&mda); 852 mda.mda_devsw = &t4_cdevsw; 853 mda.mda_uid = UID_ROOT; 854 mda.mda_gid = GID_WHEEL; 855 mda.mda_mode = 0600; 856 mda.mda_si_drv1 = sc; 857 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 858 if (rc != 0) 859 device_printf(dev, "failed to create nexus char device: %d.\n", 860 rc); 861 862 /* Go no further if recovery mode has been requested. */ 863 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 864 device_printf(dev, "recovery mode.\n"); 865 goto done; 866 } 867 868#if defined(__i386__) 869 if ((cpu_feature & CPUID_CX8) == 0) { 870 device_printf(dev, "64 bit atomics not available.\n"); 871 rc = ENOTSUP; 872 goto done; 873 } 874#endif 875 876 /* Prepare the firmware for operation */ 877 rc = prep_firmware(sc); 878 if (rc != 0) 879 goto done; /* error message displayed already */ 880 881 rc = get_params__post_init(sc); 882 if (rc != 0) 883 goto done; /* error message displayed already */ 884 885 rc = set_params__post_init(sc); 886 if (rc != 0) 887 goto done; /* error message displayed already */ 888 889 rc = t4_map_bar_2(sc); 890 if (rc != 0) 891 goto done; /* error message displayed already */ 892 893 rc = t4_create_dma_tag(sc); 894 if (rc != 0) 895 goto done; /* error message displayed already */ 896 897 /* 898 * Number of VIs to create per-port. The first VI is the "main" regular 899 * VI for the port. The rest are additional virtual interfaces on the 900 * same physical port. Note that the main VI does not have native 901 * netmap support but the extra VIs do. 902 * 903 * Limit the number of VIs per port to the number of available 904 * MAC addresses per port. 905 */ 906 if (t4_num_vis >= 1) 907 num_vis = t4_num_vis; 908 else 909 num_vis = 1; 910 if (num_vis > nitems(vi_mac_funcs)) { 911 num_vis = nitems(vi_mac_funcs); 912 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 913 } 914 915 /* 916 * First pass over all the ports - allocate VIs and initialize some 917 * basic parameters like mac address, port type, etc. We also figure 918 * out whether a port is 10G or 1G and use that information when 919 * calculating how many interrupts to attempt to allocate. 920 */ 921 n10g = n1g = 0; 922 for_each_port(sc, i) { 923 struct port_info *pi; 924 925 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 926 sc->port[i] = pi; 927 928 /* These must be set before t4_port_init */ 929 pi->adapter = sc; 930 pi->port_id = i; 931 /* 932 * XXX: vi[0] is special so we can't delay this allocation until 933 * pi->nvi's final value is known. 934 */ 935 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 936 M_ZERO | M_WAITOK); 937 938 /* 939 * Allocate the "main" VI and initialize parameters 940 * like mac addr. 941 */ 942 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 943 if (rc != 0) { 944 device_printf(dev, "unable to initialize port %d: %d\n", 945 i, rc); 946 free(pi->vi, M_CXGBE); 947 free(pi, M_CXGBE); 948 sc->port[i] = NULL; 949 goto done; 950 } 951 952 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 953 pi->link_cfg.requested_fc |= t4_pause_settings; 954 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 955 pi->link_cfg.fc |= t4_pause_settings; 956 957 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 958 if (rc != 0) { 959 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 960 free(pi->vi, M_CXGBE); 961 free(pi, M_CXGBE); 962 sc->port[i] = NULL; 963 goto done; 964 } 965 966 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 967 device_get_nameunit(dev), i); 968 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 969 sc->chan_map[pi->tx_chan] = i; 970 971 pi->tc = malloc(sizeof(struct tx_sched_class) * 972 sc->chip_params->nsched_cls, M_CXGBE, M_ZERO | M_WAITOK); 973 974 if (port_top_speed(pi) >= 10) { 975 n10g++; 976 } else { 977 n1g++; 978 } 979 980 pi->linkdnrc = -1; 981 982 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1); 983 if (pi->dev == NULL) { 984 device_printf(dev, 985 "failed to add device for port %d.\n", i); 986 rc = ENXIO; 987 goto done; 988 } 989 pi->vi[0].dev = pi->dev; 990 device_set_softc(pi->dev, pi); 991 } 992 993 /* 994 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 995 */ 996 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 997 if (rc != 0) 998 goto done; /* error message displayed already */ 999 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0) 1000 num_vis = 1; 1001 1002 sc->intr_type = iaq.intr_type; 1003 sc->intr_count = iaq.nirq; 1004 1005 s = &sc->sge; 1006 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 1007 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 1008 if (num_vis > 1) { 1009 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi; 1010 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi; 1011 } 1012 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 1013 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 1014 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 1015#ifdef TCP_OFFLOAD 1016 if (is_offload(sc)) { 1017 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 1018 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 1019 if (num_vis > 1) { 1020 s->nofldrxq += (n10g + n1g) * (num_vis - 1) * 1021 iaq.nofldrxq_vi; 1022 s->nofldtxq += (n10g + n1g) * (num_vis - 1) * 1023 iaq.nofldtxq_vi; 1024 } 1025 s->neq += s->nofldtxq + s->nofldrxq; 1026 s->niq += s->nofldrxq; 1027 1028 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 1029 M_CXGBE, M_ZERO | M_WAITOK); 1030 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 1031 M_CXGBE, M_ZERO | M_WAITOK); 1032 } 1033#endif 1034#ifdef DEV_NETMAP 1035 if (num_vis > 1) { 1036 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi; 1037 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi; 1038 } 1039 s->neq += s->nnmtxq + s->nnmrxq; 1040 s->niq += s->nnmrxq; 1041 1042 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 1043 M_CXGBE, M_ZERO | M_WAITOK); 1044 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 1045 M_CXGBE, M_ZERO | M_WAITOK); 1046#endif 1047 1048 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 1049 M_ZERO | M_WAITOK); 1050 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 1051 M_ZERO | M_WAITOK); 1052 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 1053 M_ZERO | M_WAITOK); 1054 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 1055 M_ZERO | M_WAITOK); 1056 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 1057 M_ZERO | M_WAITOK); 1058 1059 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 1060 M_ZERO | M_WAITOK); 1061 1062 t4_init_l2t(sc, M_WAITOK); 1063 1064 /* 1065 * Second pass over the ports. This time we know the number of rx and 1066 * tx queues that each port should get. 1067 */ 1068 rqidx = tqidx = 0; 1069#ifdef TCP_OFFLOAD 1070 ofld_rqidx = ofld_tqidx = 0; 1071#endif 1072#ifdef DEV_NETMAP 1073 nm_rqidx = nm_tqidx = 0; 1074#endif 1075 for_each_port(sc, i) { 1076 struct port_info *pi = sc->port[i]; 1077 struct vi_info *vi; 1078 1079 if (pi == NULL) 1080 continue; 1081 1082 pi->nvi = num_vis; 1083 for_each_vi(pi, j, vi) { 1084 vi->pi = pi; 1085 vi->qsize_rxq = t4_qsize_rxq; 1086 vi->qsize_txq = t4_qsize_txq; 1087 1088 vi->first_rxq = rqidx; 1089 vi->first_txq = tqidx; 1090 if (port_top_speed(pi) >= 10) { 1091 vi->tmr_idx = t4_tmr_idx_10g; 1092 vi->pktc_idx = t4_pktc_idx_10g; 1093 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 1094 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi; 1095 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi; 1096 } else { 1097 vi->tmr_idx = t4_tmr_idx_1g; 1098 vi->pktc_idx = t4_pktc_idx_1g; 1099 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1100 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi; 1101 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi; 1102 } 1103 rqidx += vi->nrxq; 1104 tqidx += vi->ntxq; 1105 1106 if (j == 0 && vi->ntxq > 1) 1107 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1108 else 1109 vi->rsrv_noflowq = 0; 1110 1111#ifdef TCP_OFFLOAD 1112 vi->first_ofld_rxq = ofld_rqidx; 1113 vi->first_ofld_txq = ofld_tqidx; 1114 if (port_top_speed(pi) >= 10) { 1115 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1116 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1117 iaq.nofldrxq_vi; 1118 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1119 iaq.nofldtxq_vi; 1120 } else { 1121 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1122 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1123 iaq.nofldrxq_vi; 1124 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1125 iaq.nofldtxq_vi; 1126 } 1127 ofld_rqidx += vi->nofldrxq; 1128 ofld_tqidx += vi->nofldtxq; 1129#endif 1130#ifdef DEV_NETMAP 1131 if (j > 0) { 1132 vi->first_nm_rxq = nm_rqidx; 1133 vi->first_nm_txq = nm_tqidx; 1134 vi->nnmrxq = iaq.nnmrxq_vi; 1135 vi->nnmtxq = iaq.nnmtxq_vi; 1136 nm_rqidx += vi->nnmrxq; 1137 nm_tqidx += vi->nnmtxq; 1138 } 1139#endif 1140 } 1141 } 1142 1143 rc = t4_setup_intr_handlers(sc); 1144 if (rc != 0) { 1145 device_printf(dev, 1146 "failed to setup interrupt handlers: %d\n", rc); 1147 goto done; 1148 } 1149 1150 rc = bus_generic_attach(dev); 1151 if (rc != 0) { 1152 device_printf(dev, 1153 "failed to attach all child ports: %d\n", rc); 1154 goto done; 1155 } 1156 1157 device_printf(dev, 1158 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1159 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1160 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1161 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1162 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1163 1164 t4_set_desc(sc); 1165 1166 notify_siblings(dev, 0); 1167 1168done: 1169 if (rc != 0 && sc->cdev) { 1170 /* cdev was created and so cxgbetool works; recover that way. */ 1171 device_printf(dev, 1172 "error during attach, adapter is now in recovery mode.\n"); 1173 rc = 0; 1174 } 1175 1176 if (rc != 0) 1177 t4_detach_common(dev); 1178 else 1179 t4_sysctls(sc); 1180 1181 return (rc); 1182} 1183 1184static int 1185t4_ready(device_t dev) 1186{ 1187 struct adapter *sc; 1188 1189 sc = device_get_softc(dev); 1190 if (sc->flags & FW_OK) 1191 return (0); 1192 return (ENXIO); 1193} 1194 1195static int 1196t4_read_port_device(device_t dev, int port, device_t *child) 1197{ 1198 struct adapter *sc; 1199 struct port_info *pi; 1200 1201 sc = device_get_softc(dev); 1202 if (port < 0 || port >= MAX_NPORTS) 1203 return (EINVAL); 1204 pi = sc->port[port]; 1205 if (pi == NULL || pi->dev == NULL) 1206 return (ENXIO); 1207 *child = pi->dev; 1208 return (0); 1209} 1210 1211static int 1212notify_siblings(device_t dev, int detaching) 1213{ 1214 device_t sibling; 1215 int error, i; 1216 1217 error = 0; 1218 for (i = 0; i < PCI_FUNCMAX; i++) { 1219 if (i == pci_get_function(dev)) 1220 continue; 1221 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), 1222 pci_get_slot(dev), i); 1223 if (sibling == NULL || !device_is_attached(sibling)) 1224 continue; 1225 if (detaching) 1226 error = T4_DETACH_CHILD(sibling); 1227 else 1228 (void)T4_ATTACH_CHILD(sibling); 1229 if (error) 1230 break; 1231 } 1232 return (error); 1233} 1234 1235/* 1236 * Idempotent 1237 */ 1238static int 1239t4_detach(device_t dev) 1240{ 1241 struct adapter *sc; 1242 int rc; 1243 1244 sc = device_get_softc(dev); 1245 1246 rc = notify_siblings(dev, 1); 1247 if (rc) { 1248 device_printf(dev, 1249 "failed to detach sibling devices: %d\n", rc); 1250 return (rc); 1251 } 1252 1253 return (t4_detach_common(dev)); 1254} 1255 1256int 1257t4_detach_common(device_t dev) 1258{ 1259 struct adapter *sc; 1260 struct port_info *pi; 1261 int i, rc; 1262 1263 sc = device_get_softc(dev); 1264 1265 if (sc->flags & FULL_INIT_DONE) { 1266 if (!(sc->flags & IS_VF)) 1267 t4_intr_disable(sc); 1268 } 1269 1270 if (sc->cdev) { 1271 destroy_dev(sc->cdev); 1272 sc->cdev = NULL; 1273 } 1274 1275 if (device_is_attached(dev)) { 1276 rc = bus_generic_detach(dev); 1277 if (rc) { 1278 device_printf(dev, 1279 "failed to detach child devices: %d\n", rc); 1280 return (rc); 1281 } 1282 } 1283 1284 for (i = 0; i < sc->intr_count; i++) 1285 t4_free_irq(sc, &sc->irq[i]); 1286 1287 for (i = 0; i < MAX_NPORTS; i++) { 1288 pi = sc->port[i]; 1289 if (pi) { 1290 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1291 if (pi->dev) 1292 device_delete_child(dev, pi->dev); 1293 1294 mtx_destroy(&pi->pi_lock); 1295 free(pi->vi, M_CXGBE); 1296 free(pi->tc, M_CXGBE); 1297 free(pi, M_CXGBE); 1298 } 1299 } 1300 1301 if (sc->flags & FULL_INIT_DONE) 1302 adapter_full_uninit(sc); 1303 1304 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1305 t4_fw_bye(sc, sc->mbox); 1306 1307 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1308 pci_release_msi(dev); 1309 1310 if (sc->regs_res) 1311 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1312 sc->regs_res); 1313 1314 if (sc->udbs_res) 1315 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1316 sc->udbs_res); 1317 1318 if (sc->msix_res) 1319 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1320 sc->msix_res); 1321 1322 if (sc->l2t) 1323 t4_free_l2t(sc->l2t); 1324 1325#ifdef TCP_OFFLOAD 1326 free(sc->sge.ofld_rxq, M_CXGBE); 1327 free(sc->sge.ofld_txq, M_CXGBE); 1328#endif 1329#ifdef DEV_NETMAP 1330 free(sc->sge.nm_rxq, M_CXGBE); 1331 free(sc->sge.nm_txq, M_CXGBE); 1332#endif 1333 free(sc->irq, M_CXGBE); 1334 free(sc->sge.rxq, M_CXGBE); 1335 free(sc->sge.txq, M_CXGBE); 1336 free(sc->sge.ctrlq, M_CXGBE); 1337 free(sc->sge.iqmap, M_CXGBE); 1338 free(sc->sge.eqmap, M_CXGBE); 1339 free(sc->tids.ftid_tab, M_CXGBE); 1340 t4_destroy_dma_tag(sc); 1341 if (mtx_initialized(&sc->sc_lock)) { 1342 sx_xlock(&t4_list_lock); 1343 SLIST_REMOVE(&t4_list, sc, adapter, link); 1344 sx_xunlock(&t4_list_lock); 1345 mtx_destroy(&sc->sc_lock); 1346 } 1347 1348 callout_drain(&sc->sfl_callout); 1349 if (mtx_initialized(&sc->tids.ftid_lock)) 1350 mtx_destroy(&sc->tids.ftid_lock); 1351 if (mtx_initialized(&sc->sfl_lock)) 1352 mtx_destroy(&sc->sfl_lock); 1353 if (mtx_initialized(&sc->ifp_lock)) 1354 mtx_destroy(&sc->ifp_lock); 1355 if (mtx_initialized(&sc->reg_lock)) 1356 mtx_destroy(&sc->reg_lock); 1357 1358 for (i = 0; i < NUM_MEMWIN; i++) { 1359 struct memwin *mw = &sc->memwin[i]; 1360 1361 if (rw_initialized(&mw->mw_lock)) 1362 rw_destroy(&mw->mw_lock); 1363 } 1364 1365 bzero(sc, sizeof(*sc)); 1366 1367 return (0); 1368} 1369 1370static int 1371cxgbe_probe(device_t dev) 1372{ 1373 char buf[128]; 1374 struct port_info *pi = device_get_softc(dev); 1375 1376 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1377 device_set_desc_copy(dev, buf); 1378 1379 return (BUS_PROBE_DEFAULT); 1380} 1381 1382#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1383 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1384 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1385#define T4_CAP_ENABLE (T4_CAP) 1386 1387static int 1388cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1389{ 1390 struct ifnet *ifp; 1391 struct sbuf *sb; 1392 1393 vi->xact_addr_filt = -1; 1394 callout_init(&vi->tick, 1); 1395 1396 /* Allocate an ifnet and set it up */ 1397 ifp = if_alloc(IFT_ETHER); 1398 if (ifp == NULL) { 1399 device_printf(dev, "Cannot allocate ifnet\n"); 1400 return (ENOMEM); 1401 } 1402 vi->ifp = ifp; 1403 ifp->if_softc = vi; 1404 1405 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1406 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1407 1408 ifp->if_init = cxgbe_init; 1409 ifp->if_ioctl = cxgbe_ioctl; 1410 ifp->if_transmit = cxgbe_transmit; 1411 ifp->if_qflush = cxgbe_qflush; 1412 ifp->if_get_counter = cxgbe_get_counter; 1413 1414 ifp->if_capabilities = T4_CAP; 1415#ifdef TCP_OFFLOAD 1416 if (vi->nofldrxq != 0) 1417 ifp->if_capabilities |= IFCAP_TOE; 1418#endif 1419 ifp->if_capenable = T4_CAP_ENABLE; 1420 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1421 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1422 1423 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1424 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1425 ifp->if_hw_tsomaxsegsize = 65536; 1426 1427 /* Initialize ifmedia for this VI */ 1428 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1429 cxgbe_media_status); 1430 build_medialist(vi->pi, &vi->media); 1431 1432 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1433 EVENTHANDLER_PRI_ANY); 1434 1435 ether_ifattach(ifp, vi->hw_addr); 1436#ifdef DEV_NETMAP 1437 if (vi->nnmrxq != 0) 1438 cxgbe_nm_attach(vi); 1439#endif 1440 sb = sbuf_new_auto(); 1441 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1442#ifdef TCP_OFFLOAD 1443 if (ifp->if_capabilities & IFCAP_TOE) 1444 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1445 vi->nofldtxq, vi->nofldrxq); 1446#endif 1447#ifdef DEV_NETMAP 1448 if (ifp->if_capabilities & IFCAP_NETMAP) 1449 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1450 vi->nnmtxq, vi->nnmrxq); 1451#endif 1452 sbuf_finish(sb); 1453 device_printf(dev, "%s\n", sbuf_data(sb)); 1454 sbuf_delete(sb); 1455 1456 vi_sysctls(vi); 1457 1458 return (0); 1459} 1460 1461static int 1462cxgbe_attach(device_t dev) 1463{ 1464 struct port_info *pi = device_get_softc(dev); 1465 struct adapter *sc = pi->adapter; 1466 struct vi_info *vi; 1467 int i, rc; 1468 1469 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1470 1471 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1472 if (rc) 1473 return (rc); 1474 1475 for_each_vi(pi, i, vi) { 1476 if (i == 0) 1477 continue; 1478 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); 1479 if (vi->dev == NULL) { 1480 device_printf(dev, "failed to add VI %d\n", i); 1481 continue; 1482 } 1483 device_set_softc(vi->dev, vi); 1484 } 1485 1486 cxgbe_sysctls(pi); 1487 1488 bus_generic_attach(dev); 1489 1490 return (0); 1491} 1492 1493static void 1494cxgbe_vi_detach(struct vi_info *vi) 1495{ 1496 struct ifnet *ifp = vi->ifp; 1497 1498 ether_ifdetach(ifp); 1499 1500 if (vi->vlan_c) 1501 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1502 1503 /* Let detach proceed even if these fail. */ 1504#ifdef DEV_NETMAP 1505 if (ifp->if_capabilities & IFCAP_NETMAP) 1506 cxgbe_nm_detach(vi); 1507#endif 1508 cxgbe_uninit_synchronized(vi); 1509 callout_drain(&vi->tick); 1510 vi_full_uninit(vi); 1511 1512 ifmedia_removeall(&vi->media); 1513 if_free(vi->ifp); 1514 vi->ifp = NULL; 1515} 1516 1517static int 1518cxgbe_detach(device_t dev) 1519{ 1520 struct port_info *pi = device_get_softc(dev); 1521 struct adapter *sc = pi->adapter; 1522 int rc; 1523 1524 /* Detach the extra VIs first. */ 1525 rc = bus_generic_detach(dev); 1526 if (rc) 1527 return (rc); 1528 device_delete_children(dev); 1529 1530 doom_vi(sc, &pi->vi[0]); 1531 1532 if (pi->flags & HAS_TRACEQ) { 1533 sc->traceq = -1; /* cloner should not create ifnet */ 1534 t4_tracer_port_detach(sc); 1535 } 1536 1537 cxgbe_vi_detach(&pi->vi[0]); 1538 callout_drain(&pi->tick); 1539 1540 end_synchronized_op(sc, 0); 1541 1542 return (0); 1543} 1544 1545static void 1546cxgbe_init(void *arg) 1547{ 1548 struct vi_info *vi = arg; 1549 struct adapter *sc = vi->pi->adapter; 1550 1551 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1552 return; 1553 cxgbe_init_synchronized(vi); 1554 end_synchronized_op(sc, 0); 1555} 1556 1557static int 1558cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1559{ 1560 int rc = 0, mtu, flags, can_sleep; 1561 struct vi_info *vi = ifp->if_softc; 1562 struct adapter *sc = vi->pi->adapter; 1563 struct ifreq *ifr = (struct ifreq *)data; 1564 uint32_t mask; 1565 1566 switch (cmd) { 1567 case SIOCSIFMTU: 1568 mtu = ifr->ifr_mtu; 1569 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1570 return (EINVAL); 1571 1572 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1573 if (rc) 1574 return (rc); 1575 ifp->if_mtu = mtu; 1576 if (vi->flags & VI_INIT_DONE) { 1577 t4_update_fl_bufsize(ifp); 1578 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1579 rc = update_mac_settings(ifp, XGMAC_MTU); 1580 } 1581 end_synchronized_op(sc, 0); 1582 break; 1583 1584 case SIOCSIFFLAGS: 1585 can_sleep = 0; 1586redo_sifflags: 1587 rc = begin_synchronized_op(sc, vi, 1588 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1589 if (rc) 1590 return (rc); 1591 1592 if (ifp->if_flags & IFF_UP) { 1593 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1594 flags = vi->if_flags; 1595 if ((ifp->if_flags ^ flags) & 1596 (IFF_PROMISC | IFF_ALLMULTI)) { 1597 if (can_sleep == 1) { 1598 end_synchronized_op(sc, 0); 1599 can_sleep = 0; 1600 goto redo_sifflags; 1601 } 1602 rc = update_mac_settings(ifp, 1603 XGMAC_PROMISC | XGMAC_ALLMULTI); 1604 } 1605 } else { 1606 if (can_sleep == 0) { 1607 end_synchronized_op(sc, LOCK_HELD); 1608 can_sleep = 1; 1609 goto redo_sifflags; 1610 } 1611 rc = cxgbe_init_synchronized(vi); 1612 } 1613 vi->if_flags = ifp->if_flags; 1614 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1615 if (can_sleep == 0) { 1616 end_synchronized_op(sc, LOCK_HELD); 1617 can_sleep = 1; 1618 goto redo_sifflags; 1619 } 1620 rc = cxgbe_uninit_synchronized(vi); 1621 } 1622 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1623 break; 1624 1625 case SIOCADDMULTI: 1626 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1627 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1628 if (rc) 1629 return (rc); 1630 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1631 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1632 end_synchronized_op(sc, LOCK_HELD); 1633 break; 1634 1635 case SIOCSIFCAP: 1636 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1637 if (rc) 1638 return (rc); 1639 1640 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1641 if (mask & IFCAP_TXCSUM) { 1642 ifp->if_capenable ^= IFCAP_TXCSUM; 1643 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1644 1645 if (IFCAP_TSO4 & ifp->if_capenable && 1646 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1647 ifp->if_capenable &= ~IFCAP_TSO4; 1648 if_printf(ifp, 1649 "tso4 disabled due to -txcsum.\n"); 1650 } 1651 } 1652 if (mask & IFCAP_TXCSUM_IPV6) { 1653 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1654 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1655 1656 if (IFCAP_TSO6 & ifp->if_capenable && 1657 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1658 ifp->if_capenable &= ~IFCAP_TSO6; 1659 if_printf(ifp, 1660 "tso6 disabled due to -txcsum6.\n"); 1661 } 1662 } 1663 if (mask & IFCAP_RXCSUM) 1664 ifp->if_capenable ^= IFCAP_RXCSUM; 1665 if (mask & IFCAP_RXCSUM_IPV6) 1666 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1667 1668 /* 1669 * Note that we leave CSUM_TSO alone (it is always set). The 1670 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1671 * sending a TSO request our way, so it's sufficient to toggle 1672 * IFCAP_TSOx only. 1673 */ 1674 if (mask & IFCAP_TSO4) { 1675 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1676 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1677 if_printf(ifp, "enable txcsum first.\n"); 1678 rc = EAGAIN; 1679 goto fail; 1680 } 1681 ifp->if_capenable ^= IFCAP_TSO4; 1682 } 1683 if (mask & IFCAP_TSO6) { 1684 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1685 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1686 if_printf(ifp, "enable txcsum6 first.\n"); 1687 rc = EAGAIN; 1688 goto fail; 1689 } 1690 ifp->if_capenable ^= IFCAP_TSO6; 1691 } 1692 if (mask & IFCAP_LRO) { 1693#if defined(INET) || defined(INET6) 1694 int i; 1695 struct sge_rxq *rxq; 1696 1697 ifp->if_capenable ^= IFCAP_LRO; 1698 for_each_rxq(vi, i, rxq) { 1699 if (ifp->if_capenable & IFCAP_LRO) 1700 rxq->iq.flags |= IQ_LRO_ENABLED; 1701 else 1702 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1703 } 1704#endif 1705 } 1706#ifdef TCP_OFFLOAD 1707 if (mask & IFCAP_TOE) { 1708 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1709 1710 rc = toe_capability(vi, enable); 1711 if (rc != 0) 1712 goto fail; 1713 1714 ifp->if_capenable ^= mask; 1715 } 1716#endif 1717 if (mask & IFCAP_VLAN_HWTAGGING) { 1718 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1719 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1720 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1721 } 1722 if (mask & IFCAP_VLAN_MTU) { 1723 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1724 1725 /* Need to find out how to disable auto-mtu-inflation */ 1726 } 1727 if (mask & IFCAP_VLAN_HWTSO) 1728 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1729 if (mask & IFCAP_VLAN_HWCSUM) 1730 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1731 1732#ifdef VLAN_CAPABILITIES 1733 VLAN_CAPABILITIES(ifp); 1734#endif 1735fail: 1736 end_synchronized_op(sc, 0); 1737 break; 1738 1739 case SIOCSIFMEDIA: 1740 case SIOCGIFMEDIA: 1741 case SIOCGIFXMEDIA: 1742 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1743 break; 1744 1745 case SIOCGI2C: { 1746 struct ifi2creq i2c; 1747 1748 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1749 if (rc != 0) 1750 break; 1751 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1752 rc = EPERM; 1753 break; 1754 } 1755 if (i2c.len > sizeof(i2c.data)) { 1756 rc = EINVAL; 1757 break; 1758 } 1759 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1760 if (rc) 1761 return (rc); 1762 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1763 i2c.offset, i2c.len, &i2c.data[0]); 1764 end_synchronized_op(sc, 0); 1765 if (rc == 0) 1766 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1767 break; 1768 } 1769 1770 default: 1771 rc = ether_ioctl(ifp, cmd, data); 1772 } 1773 1774 return (rc); 1775} 1776 1777static int 1778cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1779{ 1780 struct vi_info *vi = ifp->if_softc; 1781 struct port_info *pi = vi->pi; 1782 struct adapter *sc = pi->adapter; 1783 struct sge_txq *txq; 1784 void *items[1]; 1785 int rc; 1786 1787 M_ASSERTPKTHDR(m); 1788 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1789 1790 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1791 m_freem(m); 1792 return (ENETDOWN); 1793 } 1794 1795 rc = parse_pkt(sc, &m); 1796 if (__predict_false(rc != 0)) { 1797 MPASS(m == NULL); /* was freed already */ 1798 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1799 return (rc); 1800 } 1801 1802 /* Select a txq. */ 1803 txq = &sc->sge.txq[vi->first_txq]; 1804 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1805 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1806 vi->rsrv_noflowq); 1807 1808 items[0] = m; 1809 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1810 if (__predict_false(rc != 0)) 1811 m_freem(m); 1812 1813 return (rc); 1814} 1815 1816static void 1817cxgbe_qflush(struct ifnet *ifp) 1818{ 1819 struct vi_info *vi = ifp->if_softc; 1820 struct sge_txq *txq; 1821 int i; 1822 1823 /* queues do not exist if !VI_INIT_DONE. */ 1824 if (vi->flags & VI_INIT_DONE) { 1825 for_each_txq(vi, i, txq) { 1826 TXQ_LOCK(txq); 1827 txq->eq.flags &= ~EQ_ENABLED; 1828 TXQ_UNLOCK(txq); 1829 while (!mp_ring_is_idle(txq->r)) { 1830 mp_ring_check_drainage(txq->r, 0); 1831 pause("qflush", 1); 1832 } 1833 } 1834 } 1835 if_qflush(ifp); 1836} 1837 1838static uint64_t 1839vi_get_counter(struct ifnet *ifp, ift_counter c) 1840{ 1841 struct vi_info *vi = ifp->if_softc; 1842 struct fw_vi_stats_vf *s = &vi->stats; 1843 1844 vi_refresh_stats(vi->pi->adapter, vi); 1845 1846 switch (c) { 1847 case IFCOUNTER_IPACKETS: 1848 return (s->rx_bcast_frames + s->rx_mcast_frames + 1849 s->rx_ucast_frames); 1850 case IFCOUNTER_IERRORS: 1851 return (s->rx_err_frames); 1852 case IFCOUNTER_OPACKETS: 1853 return (s->tx_bcast_frames + s->tx_mcast_frames + 1854 s->tx_ucast_frames + s->tx_offload_frames); 1855 case IFCOUNTER_OERRORS: 1856 return (s->tx_drop_frames); 1857 case IFCOUNTER_IBYTES: 1858 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1859 s->rx_ucast_bytes); 1860 case IFCOUNTER_OBYTES: 1861 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1862 s->tx_ucast_bytes + s->tx_offload_bytes); 1863 case IFCOUNTER_IMCASTS: 1864 return (s->rx_mcast_frames); 1865 case IFCOUNTER_OMCASTS: 1866 return (s->tx_mcast_frames); 1867 case IFCOUNTER_OQDROPS: { 1868 uint64_t drops; 1869 1870 drops = 0; 1871 if (vi->flags & VI_INIT_DONE) { 1872 int i; 1873 struct sge_txq *txq; 1874 1875 for_each_txq(vi, i, txq) 1876 drops += counter_u64_fetch(txq->r->drops); 1877 } 1878 1879 return (drops); 1880 1881 } 1882 1883 default: 1884 return (if_get_counter_default(ifp, c)); 1885 } 1886} 1887 1888uint64_t 1889cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1890{ 1891 struct vi_info *vi = ifp->if_softc; 1892 struct port_info *pi = vi->pi; 1893 struct adapter *sc = pi->adapter; 1894 struct port_stats *s = &pi->stats; 1895 1896 if (pi->nvi > 1 || sc->flags & IS_VF) 1897 return (vi_get_counter(ifp, c)); 1898 1899 cxgbe_refresh_stats(sc, pi); 1900 1901 switch (c) { 1902 case IFCOUNTER_IPACKETS: 1903 return (s->rx_frames); 1904 1905 case IFCOUNTER_IERRORS: 1906 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1907 s->rx_fcs_err + s->rx_len_err); 1908 1909 case IFCOUNTER_OPACKETS: 1910 return (s->tx_frames); 1911 1912 case IFCOUNTER_OERRORS: 1913 return (s->tx_error_frames); 1914 1915 case IFCOUNTER_IBYTES: 1916 return (s->rx_octets); 1917 1918 case IFCOUNTER_OBYTES: 1919 return (s->tx_octets); 1920 1921 case IFCOUNTER_IMCASTS: 1922 return (s->rx_mcast_frames); 1923 1924 case IFCOUNTER_OMCASTS: 1925 return (s->tx_mcast_frames); 1926 1927 case IFCOUNTER_IQDROPS: 1928 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1929 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1930 s->rx_trunc3 + pi->tnl_cong_drops); 1931 1932 case IFCOUNTER_OQDROPS: { 1933 uint64_t drops; 1934 1935 drops = s->tx_drop; 1936 if (vi->flags & VI_INIT_DONE) { 1937 int i; 1938 struct sge_txq *txq; 1939 1940 for_each_txq(vi, i, txq) 1941 drops += counter_u64_fetch(txq->r->drops); 1942 } 1943 1944 return (drops); 1945 1946 } 1947 1948 default: 1949 return (if_get_counter_default(ifp, c)); 1950 } 1951} 1952 1953static int 1954cxgbe_media_change(struct ifnet *ifp) 1955{ 1956 struct vi_info *vi = ifp->if_softc; 1957 1958 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1959 1960 return (EOPNOTSUPP); 1961} 1962 1963static void 1964cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1965{ 1966 struct vi_info *vi = ifp->if_softc; 1967 struct port_info *pi = vi->pi; 1968 struct ifmedia_entry *cur; 1969 int speed = pi->link_cfg.speed; 1970 1971 cur = vi->media.ifm_cur; 1972 1973 ifmr->ifm_status = IFM_AVALID; 1974 if (!pi->link_cfg.link_ok) 1975 return; 1976 1977 ifmr->ifm_status |= IFM_ACTIVE; 1978 1979 /* active and current will differ iff current media is autoselect. */ 1980 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1981 return; 1982 1983 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1984 if (speed == 10000) 1985 ifmr->ifm_active |= IFM_10G_T; 1986 else if (speed == 1000) 1987 ifmr->ifm_active |= IFM_1000_T; 1988 else if (speed == 100) 1989 ifmr->ifm_active |= IFM_100_TX; 1990 else if (speed == 10) 1991 ifmr->ifm_active |= IFM_10_T; 1992 else 1993 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1994 speed)); 1995} 1996 1997static int 1998vcxgbe_probe(device_t dev) 1999{ 2000 char buf[128]; 2001 struct vi_info *vi = device_get_softc(dev); 2002 2003 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 2004 vi - vi->pi->vi); 2005 device_set_desc_copy(dev, buf); 2006 2007 return (BUS_PROBE_DEFAULT); 2008} 2009 2010static int 2011vcxgbe_attach(device_t dev) 2012{ 2013 struct vi_info *vi; 2014 struct port_info *pi; 2015 struct adapter *sc; 2016 int func, index, rc; 2017 u32 param, val; 2018 2019 vi = device_get_softc(dev); 2020 pi = vi->pi; 2021 sc = pi->adapter; 2022 2023 index = vi - pi->vi; 2024 KASSERT(index < nitems(vi_mac_funcs), 2025 ("%s: VI %s doesn't have a MAC func", __func__, 2026 device_get_nameunit(dev))); 2027 func = vi_mac_funcs[index]; 2028 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 2029 vi->hw_addr, &vi->rss_size, func, 0); 2030 if (rc < 0) { 2031 device_printf(dev, "Failed to allocate virtual interface " 2032 "for port %d: %d\n", pi->port_id, -rc); 2033 return (-rc); 2034 } 2035 vi->viid = rc; 2036 if (chip_id(sc) <= CHELSIO_T5) 2037 vi->smt_idx = (rc & 0x7f) << 1; 2038 else 2039 vi->smt_idx = (rc & 0x7f); 2040 2041 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2042 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 2043 V_FW_PARAMS_PARAM_YZ(vi->viid); 2044 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2045 if (rc) 2046 vi->rss_base = 0xffff; 2047 else { 2048 /* MPASS((val >> 16) == rss_size); */ 2049 vi->rss_base = val & 0xffff; 2050 } 2051 2052 rc = cxgbe_vi_attach(dev, vi); 2053 if (rc) { 2054 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2055 return (rc); 2056 } 2057 return (0); 2058} 2059 2060static int 2061vcxgbe_detach(device_t dev) 2062{ 2063 struct vi_info *vi; 2064 struct adapter *sc; 2065 2066 vi = device_get_softc(dev); 2067 sc = vi->pi->adapter; 2068 2069 doom_vi(sc, vi); 2070 2071 cxgbe_vi_detach(vi); 2072 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2073 2074 end_synchronized_op(sc, 0); 2075 2076 return (0); 2077} 2078 2079void 2080t4_fatal_err(struct adapter *sc) 2081{ 2082 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 2083 t4_intr_disable(sc); 2084 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 2085 device_get_nameunit(sc->dev)); 2086} 2087 2088void 2089t4_add_adapter(struct adapter *sc) 2090{ 2091 sx_xlock(&t4_list_lock); 2092 SLIST_INSERT_HEAD(&t4_list, sc, link); 2093 sx_xunlock(&t4_list_lock); 2094} 2095 2096int 2097t4_map_bars_0_and_4(struct adapter *sc) 2098{ 2099 sc->regs_rid = PCIR_BAR(0); 2100 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2101 &sc->regs_rid, RF_ACTIVE); 2102 if (sc->regs_res == NULL) { 2103 device_printf(sc->dev, "cannot map registers.\n"); 2104 return (ENXIO); 2105 } 2106 sc->bt = rman_get_bustag(sc->regs_res); 2107 sc->bh = rman_get_bushandle(sc->regs_res); 2108 sc->mmio_len = rman_get_size(sc->regs_res); 2109 setbit(&sc->doorbells, DOORBELL_KDB); 2110 2111 sc->msix_rid = PCIR_BAR(4); 2112 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2113 &sc->msix_rid, RF_ACTIVE); 2114 if (sc->msix_res == NULL) { 2115 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 2116 return (ENXIO); 2117 } 2118 2119 return (0); 2120} 2121 2122int 2123t4_map_bar_2(struct adapter *sc) 2124{ 2125 2126 /* 2127 * T4: only iWARP driver uses the userspace doorbells. There is no need 2128 * to map it if RDMA is disabled. 2129 */ 2130 if (is_t4(sc) && sc->rdmacaps == 0) 2131 return (0); 2132 2133 sc->udbs_rid = PCIR_BAR(2); 2134 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2135 &sc->udbs_rid, RF_ACTIVE); 2136 if (sc->udbs_res == NULL) { 2137 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 2138 return (ENXIO); 2139 } 2140 sc->udbs_base = rman_get_virtual(sc->udbs_res); 2141 2142 if (chip_id(sc) >= CHELSIO_T5) { 2143 setbit(&sc->doorbells, DOORBELL_UDB); 2144#if defined(__i386__) || defined(__amd64__) 2145 if (t5_write_combine) { 2146 int rc, mode; 2147 2148 /* 2149 * Enable write combining on BAR2. This is the 2150 * userspace doorbell BAR and is split into 128B 2151 * (UDBS_SEG_SIZE) doorbell regions, each associated 2152 * with an egress queue. The first 64B has the doorbell 2153 * and the second 64B can be used to submit a tx work 2154 * request with an implicit doorbell. 2155 */ 2156 2157 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 2158 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 2159 if (rc == 0) { 2160 clrbit(&sc->doorbells, DOORBELL_UDB); 2161 setbit(&sc->doorbells, DOORBELL_WCWR); 2162 setbit(&sc->doorbells, DOORBELL_UDBWC); 2163 } else { 2164 device_printf(sc->dev, 2165 "couldn't enable write combining: %d\n", 2166 rc); 2167 } 2168 2169 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); 2170 t4_write_reg(sc, A_SGE_STAT_CFG, 2171 V_STATSOURCE_T5(7) | mode); 2172 } 2173#endif 2174 } 2175 2176 return (0); 2177} 2178 2179struct memwin_init { 2180 uint32_t base; 2181 uint32_t aperture; 2182}; 2183 2184static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2185 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2186 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2187 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2188}; 2189 2190static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2191 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2192 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2193 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2194}; 2195 2196static void 2197setup_memwin(struct adapter *sc) 2198{ 2199 const struct memwin_init *mw_init; 2200 struct memwin *mw; 2201 int i; 2202 uint32_t bar0; 2203 2204 if (is_t4(sc)) { 2205 /* 2206 * Read low 32b of bar0 indirectly via the hardware backdoor 2207 * mechanism. Works from within PCI passthrough environments 2208 * too, where rman_get_start() can return a different value. We 2209 * need to program the T4 memory window decoders with the actual 2210 * addresses that will be coming across the PCIe link. 2211 */ 2212 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2213 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2214 2215 mw_init = &t4_memwin[0]; 2216 } else { 2217 /* T5+ use the relative offset inside the PCIe BAR */ 2218 bar0 = 0; 2219 2220 mw_init = &t5_memwin[0]; 2221 } 2222 2223 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2224 rw_init(&mw->mw_lock, "memory window access"); 2225 mw->mw_base = mw_init->base; 2226 mw->mw_aperture = mw_init->aperture; 2227 mw->mw_curpos = 0; 2228 t4_write_reg(sc, 2229 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2230 (mw->mw_base + bar0) | V_BIR(0) | 2231 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2232 rw_wlock(&mw->mw_lock); 2233 position_memwin(sc, i, 0); 2234 rw_wunlock(&mw->mw_lock); 2235 } 2236 2237 /* flush */ 2238 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2239} 2240 2241/* 2242 * Positions the memory window at the given address in the card's address space. 2243 * There are some alignment requirements and the actual position may be at an 2244 * address prior to the requested address. mw->mw_curpos always has the actual 2245 * position of the window. 2246 */ 2247static void 2248position_memwin(struct adapter *sc, int idx, uint32_t addr) 2249{ 2250 struct memwin *mw; 2251 uint32_t pf; 2252 uint32_t reg; 2253 2254 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2255 mw = &sc->memwin[idx]; 2256 rw_assert(&mw->mw_lock, RA_WLOCKED); 2257 2258 if (is_t4(sc)) { 2259 pf = 0; 2260 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2261 } else { 2262 pf = V_PFNUM(sc->pf); 2263 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2264 } 2265 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2266 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2267 t4_read_reg(sc, reg); /* flush */ 2268} 2269 2270static int 2271rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2272 int len, int rw) 2273{ 2274 struct memwin *mw; 2275 uint32_t mw_end, v; 2276 2277 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2278 2279 /* Memory can only be accessed in naturally aligned 4 byte units */ 2280 if (addr & 3 || len & 3 || len <= 0) 2281 return (EINVAL); 2282 2283 mw = &sc->memwin[idx]; 2284 while (len > 0) { 2285 rw_rlock(&mw->mw_lock); 2286 mw_end = mw->mw_curpos + mw->mw_aperture; 2287 if (addr >= mw_end || addr < mw->mw_curpos) { 2288 /* Will need to reposition the window */ 2289 if (!rw_try_upgrade(&mw->mw_lock)) { 2290 rw_runlock(&mw->mw_lock); 2291 rw_wlock(&mw->mw_lock); 2292 } 2293 rw_assert(&mw->mw_lock, RA_WLOCKED); 2294 position_memwin(sc, idx, addr); 2295 rw_downgrade(&mw->mw_lock); 2296 mw_end = mw->mw_curpos + mw->mw_aperture; 2297 } 2298 rw_assert(&mw->mw_lock, RA_RLOCKED); 2299 while (addr < mw_end && len > 0) { 2300 if (rw == 0) { 2301 v = t4_read_reg(sc, mw->mw_base + addr - 2302 mw->mw_curpos); 2303 *val++ = le32toh(v); 2304 } else { 2305 v = *val++; 2306 t4_write_reg(sc, mw->mw_base + addr - 2307 mw->mw_curpos, htole32(v)); 2308 } 2309 addr += 4; 2310 len -= 4; 2311 } 2312 rw_runlock(&mw->mw_lock); 2313 } 2314 2315 return (0); 2316} 2317 2318static inline int 2319read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2320 int len) 2321{ 2322 2323 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2324} 2325 2326static inline int 2327write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2328 const uint32_t *val, int len) 2329{ 2330 2331 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2332} 2333 2334static int 2335t4_range_cmp(const void *a, const void *b) 2336{ 2337 return ((const struct t4_range *)a)->start - 2338 ((const struct t4_range *)b)->start; 2339} 2340 2341/* 2342 * Verify that the memory range specified by the addr/len pair is valid within 2343 * the card's address space. 2344 */ 2345static int 2346validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2347{ 2348 struct t4_range mem_ranges[4], *r, *next; 2349 uint32_t em, addr_len; 2350 int i, n, remaining; 2351 2352 /* Memory can only be accessed in naturally aligned 4 byte units */ 2353 if (addr & 3 || len & 3 || len <= 0) 2354 return (EINVAL); 2355 2356 /* Enabled memories */ 2357 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2358 2359 r = &mem_ranges[0]; 2360 n = 0; 2361 bzero(r, sizeof(mem_ranges)); 2362 if (em & F_EDRAM0_ENABLE) { 2363 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2364 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2365 if (r->size > 0) { 2366 r->start = G_EDRAM0_BASE(addr_len) << 20; 2367 if (addr >= r->start && 2368 addr + len <= r->start + r->size) 2369 return (0); 2370 r++; 2371 n++; 2372 } 2373 } 2374 if (em & F_EDRAM1_ENABLE) { 2375 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2376 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2377 if (r->size > 0) { 2378 r->start = G_EDRAM1_BASE(addr_len) << 20; 2379 if (addr >= r->start && 2380 addr + len <= r->start + r->size) 2381 return (0); 2382 r++; 2383 n++; 2384 } 2385 } 2386 if (em & F_EXT_MEM_ENABLE) { 2387 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2388 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2389 if (r->size > 0) { 2390 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2391 if (addr >= r->start && 2392 addr + len <= r->start + r->size) 2393 return (0); 2394 r++; 2395 n++; 2396 } 2397 } 2398 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2399 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2400 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2401 if (r->size > 0) { 2402 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2403 if (addr >= r->start && 2404 addr + len <= r->start + r->size) 2405 return (0); 2406 r++; 2407 n++; 2408 } 2409 } 2410 MPASS(n <= nitems(mem_ranges)); 2411 2412 if (n > 1) { 2413 /* Sort and merge the ranges. */ 2414 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2415 2416 /* Start from index 0 and examine the next n - 1 entries. */ 2417 r = &mem_ranges[0]; 2418 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2419 2420 MPASS(r->size > 0); /* r is a valid entry. */ 2421 next = r + 1; 2422 MPASS(next->size > 0); /* and so is the next one. */ 2423 2424 while (r->start + r->size >= next->start) { 2425 /* Merge the next one into the current entry. */ 2426 r->size = max(r->start + r->size, 2427 next->start + next->size) - r->start; 2428 n--; /* One fewer entry in total. */ 2429 if (--remaining == 0) 2430 goto done; /* short circuit */ 2431 next++; 2432 } 2433 if (next != r + 1) { 2434 /* 2435 * Some entries were merged into r and next 2436 * points to the first valid entry that couldn't 2437 * be merged. 2438 */ 2439 MPASS(next->size > 0); /* must be valid */ 2440 memcpy(r + 1, next, remaining * sizeof(*r)); 2441#ifdef INVARIANTS 2442 /* 2443 * This so that the foo->size assertion in the 2444 * next iteration of the loop do the right 2445 * thing for entries that were pulled up and are 2446 * no longer valid. 2447 */ 2448 MPASS(n < nitems(mem_ranges)); 2449 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2450 sizeof(struct t4_range)); 2451#endif 2452 } 2453 } 2454done: 2455 /* Done merging the ranges. */ 2456 MPASS(n > 0); 2457 r = &mem_ranges[0]; 2458 for (i = 0; i < n; i++, r++) { 2459 if (addr >= r->start && 2460 addr + len <= r->start + r->size) 2461 return (0); 2462 } 2463 } 2464 2465 return (EFAULT); 2466} 2467 2468static int 2469fwmtype_to_hwmtype(int mtype) 2470{ 2471 2472 switch (mtype) { 2473 case FW_MEMTYPE_EDC0: 2474 return (MEM_EDC0); 2475 case FW_MEMTYPE_EDC1: 2476 return (MEM_EDC1); 2477 case FW_MEMTYPE_EXTMEM: 2478 return (MEM_MC0); 2479 case FW_MEMTYPE_EXTMEM1: 2480 return (MEM_MC1); 2481 default: 2482 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2483 } 2484} 2485 2486/* 2487 * Verify that the memory range specified by the memtype/offset/len pair is 2488 * valid and lies entirely within the memtype specified. The global address of 2489 * the start of the range is returned in addr. 2490 */ 2491static int 2492validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2493 uint32_t *addr) 2494{ 2495 uint32_t em, addr_len, maddr; 2496 2497 /* Memory can only be accessed in naturally aligned 4 byte units */ 2498 if (off & 3 || len & 3 || len == 0) 2499 return (EINVAL); 2500 2501 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2502 switch (fwmtype_to_hwmtype(mtype)) { 2503 case MEM_EDC0: 2504 if (!(em & F_EDRAM0_ENABLE)) 2505 return (EINVAL); 2506 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2507 maddr = G_EDRAM0_BASE(addr_len) << 20; 2508 break; 2509 case MEM_EDC1: 2510 if (!(em & F_EDRAM1_ENABLE)) 2511 return (EINVAL); 2512 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2513 maddr = G_EDRAM1_BASE(addr_len) << 20; 2514 break; 2515 case MEM_MC: 2516 if (!(em & F_EXT_MEM_ENABLE)) 2517 return (EINVAL); 2518 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2519 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2520 break; 2521 case MEM_MC1: 2522 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2523 return (EINVAL); 2524 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2525 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2526 break; 2527 default: 2528 return (EINVAL); 2529 } 2530 2531 *addr = maddr + off; /* global address */ 2532 return (validate_mem_range(sc, *addr, len)); 2533} 2534 2535static int 2536fixup_devlog_params(struct adapter *sc) 2537{ 2538 struct devlog_params *dparams = &sc->params.devlog; 2539 int rc; 2540 2541 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2542 dparams->size, &dparams->addr); 2543 2544 return (rc); 2545} 2546 2547static int 2548cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2549 struct intrs_and_queues *iaq) 2550{ 2551 int rc, itype, navail, nrxq10g, nrxq1g, n; 2552 int nofldrxq10g = 0, nofldrxq1g = 0; 2553 2554 bzero(iaq, sizeof(*iaq)); 2555 2556 iaq->ntxq10g = t4_ntxq10g; 2557 iaq->ntxq1g = t4_ntxq1g; 2558 iaq->ntxq_vi = t4_ntxq_vi; 2559 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2560 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2561 iaq->nrxq_vi = t4_nrxq_vi; 2562 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2563#ifdef TCP_OFFLOAD 2564 if (is_offload(sc)) { 2565 iaq->nofldtxq10g = t4_nofldtxq10g; 2566 iaq->nofldtxq1g = t4_nofldtxq1g; 2567 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2568 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2569 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2570 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2571 } 2572#endif 2573#ifdef DEV_NETMAP 2574 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2575 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2576#endif 2577 2578 for (itype = INTR_MSIX; itype; itype >>= 1) { 2579 2580 if ((itype & t4_intr_types) == 0) 2581 continue; /* not allowed */ 2582 2583 if (itype == INTR_MSIX) 2584 navail = pci_msix_count(sc->dev); 2585 else if (itype == INTR_MSI) 2586 navail = pci_msi_count(sc->dev); 2587 else 2588 navail = 1; 2589restart: 2590 if (navail == 0) 2591 continue; 2592 2593 iaq->intr_type = itype; 2594 iaq->intr_flags_10g = 0; 2595 iaq->intr_flags_1g = 0; 2596 2597 /* 2598 * Best option: an interrupt vector for errors, one for the 2599 * firmware event queue, and one for every rxq (NIC and TOE) of 2600 * every VI. The VIs that support netmap use the same 2601 * interrupts for the NIC rx queues and the netmap rx queues 2602 * because only one set of queues is active at a time. 2603 */ 2604 iaq->nirq = T4_EXTRA_INTR; 2605 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2606 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2607 iaq->nirq += (n10g + n1g) * (num_vis - 1) * 2608 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */ 2609 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi; 2610 if (iaq->nirq <= navail && 2611 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2612 iaq->intr_flags_10g = INTR_ALL; 2613 iaq->intr_flags_1g = INTR_ALL; 2614 goto allocate; 2615 } 2616 2617 /* Disable the VIs (and netmap) if there aren't enough intrs */ 2618 if (num_vis > 1) { 2619 device_printf(sc->dev, "virtual interfaces disabled " 2620 "because num_vis=%u with current settings " 2621 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, " 2622 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2623 "nnmrxq_vi=%u) would need %u interrupts but " 2624 "only %u are available.\n", num_vis, nrxq10g, 2625 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi, 2626 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq, 2627 navail); 2628 num_vis = 1; 2629 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2630 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2631 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2632 goto restart; 2633 } 2634 2635 /* 2636 * Second best option: a vector for errors, one for the firmware 2637 * event queue, and vectors for either all the NIC rx queues or 2638 * all the TOE rx queues. The queues that don't get vectors 2639 * will forward their interrupts to those that do. 2640 */ 2641 iaq->nirq = T4_EXTRA_INTR; 2642 if (nrxq10g >= nofldrxq10g) { 2643 iaq->intr_flags_10g = INTR_RXQ; 2644 iaq->nirq += n10g * nrxq10g; 2645 } else { 2646 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2647 iaq->nirq += n10g * nofldrxq10g; 2648 } 2649 if (nrxq1g >= nofldrxq1g) { 2650 iaq->intr_flags_1g = INTR_RXQ; 2651 iaq->nirq += n1g * nrxq1g; 2652 } else { 2653 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2654 iaq->nirq += n1g * nofldrxq1g; 2655 } 2656 if (iaq->nirq <= navail && 2657 (itype != INTR_MSI || powerof2(iaq->nirq))) 2658 goto allocate; 2659 2660 /* 2661 * Next best option: an interrupt vector for errors, one for the 2662 * firmware event queue, and at least one per main-VI. At this 2663 * point we know we'll have to downsize nrxq and/or nofldrxq to 2664 * fit what's available to us. 2665 */ 2666 iaq->nirq = T4_EXTRA_INTR; 2667 iaq->nirq += n10g + n1g; 2668 if (iaq->nirq <= navail) { 2669 int leftover = navail - iaq->nirq; 2670 2671 if (n10g > 0) { 2672 int target = max(nrxq10g, nofldrxq10g); 2673 2674 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2675 INTR_RXQ : INTR_OFLD_RXQ; 2676 2677 n = 1; 2678 while (n < target && leftover >= n10g) { 2679 leftover -= n10g; 2680 iaq->nirq += n10g; 2681 n++; 2682 } 2683 iaq->nrxq10g = min(n, nrxq10g); 2684#ifdef TCP_OFFLOAD 2685 iaq->nofldrxq10g = min(n, nofldrxq10g); 2686#endif 2687 } 2688 2689 if (n1g > 0) { 2690 int target = max(nrxq1g, nofldrxq1g); 2691 2692 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2693 INTR_RXQ : INTR_OFLD_RXQ; 2694 2695 n = 1; 2696 while (n < target && leftover >= n1g) { 2697 leftover -= n1g; 2698 iaq->nirq += n1g; 2699 n++; 2700 } 2701 iaq->nrxq1g = min(n, nrxq1g); 2702#ifdef TCP_OFFLOAD 2703 iaq->nofldrxq1g = min(n, nofldrxq1g); 2704#endif 2705 } 2706 2707 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2708 goto allocate; 2709 } 2710 2711 /* 2712 * Least desirable option: one interrupt vector for everything. 2713 */ 2714 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2715 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2716#ifdef TCP_OFFLOAD 2717 if (is_offload(sc)) 2718 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2719#endif 2720allocate: 2721 navail = iaq->nirq; 2722 rc = 0; 2723 if (itype == INTR_MSIX) 2724 rc = pci_alloc_msix(sc->dev, &navail); 2725 else if (itype == INTR_MSI) 2726 rc = pci_alloc_msi(sc->dev, &navail); 2727 2728 if (rc == 0) { 2729 if (navail == iaq->nirq) 2730 return (0); 2731 2732 /* 2733 * Didn't get the number requested. Use whatever number 2734 * the kernel is willing to allocate (it's in navail). 2735 */ 2736 device_printf(sc->dev, "fewer vectors than requested, " 2737 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2738 itype, iaq->nirq, navail); 2739 pci_release_msi(sc->dev); 2740 goto restart; 2741 } 2742 2743 device_printf(sc->dev, 2744 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2745 itype, rc, iaq->nirq, navail); 2746 } 2747 2748 device_printf(sc->dev, 2749 "failed to find a usable interrupt type. " 2750 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2751 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2752 2753 return (ENXIO); 2754} 2755 2756#define FW_VERSION(chip) ( \ 2757 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2758 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2759 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2760 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2761#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2762 2763struct fw_info { 2764 uint8_t chip; 2765 char *kld_name; 2766 char *fw_mod_name; 2767 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2768} fw_info[] = { 2769 { 2770 .chip = CHELSIO_T4, 2771 .kld_name = "t4fw_cfg", 2772 .fw_mod_name = "t4fw", 2773 .fw_hdr = { 2774 .chip = FW_HDR_CHIP_T4, 2775 .fw_ver = htobe32_const(FW_VERSION(T4)), 2776 .intfver_nic = FW_INTFVER(T4, NIC), 2777 .intfver_vnic = FW_INTFVER(T4, VNIC), 2778 .intfver_ofld = FW_INTFVER(T4, OFLD), 2779 .intfver_ri = FW_INTFVER(T4, RI), 2780 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2781 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2782 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2783 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2784 }, 2785 }, { 2786 .chip = CHELSIO_T5, 2787 .kld_name = "t5fw_cfg", 2788 .fw_mod_name = "t5fw", 2789 .fw_hdr = { 2790 .chip = FW_HDR_CHIP_T5, 2791 .fw_ver = htobe32_const(FW_VERSION(T5)), 2792 .intfver_nic = FW_INTFVER(T5, NIC), 2793 .intfver_vnic = FW_INTFVER(T5, VNIC), 2794 .intfver_ofld = FW_INTFVER(T5, OFLD), 2795 .intfver_ri = FW_INTFVER(T5, RI), 2796 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2797 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2798 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2799 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2800 }, 2801 }, { 2802 .chip = CHELSIO_T6, 2803 .kld_name = "t6fw_cfg", 2804 .fw_mod_name = "t6fw", 2805 .fw_hdr = { 2806 .chip = FW_HDR_CHIP_T6, 2807 .fw_ver = htobe32_const(FW_VERSION(T6)), 2808 .intfver_nic = FW_INTFVER(T6, NIC), 2809 .intfver_vnic = FW_INTFVER(T6, VNIC), 2810 .intfver_ofld = FW_INTFVER(T6, OFLD), 2811 .intfver_ri = FW_INTFVER(T6, RI), 2812 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 2813 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2814 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 2815 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2816 }, 2817 } 2818}; 2819 2820static struct fw_info * 2821find_fw_info(int chip) 2822{ 2823 int i; 2824 2825 for (i = 0; i < nitems(fw_info); i++) { 2826 if (fw_info[i].chip == chip) 2827 return (&fw_info[i]); 2828 } 2829 return (NULL); 2830} 2831 2832/* 2833 * Is the given firmware API compatible with the one the driver was compiled 2834 * with? 2835 */ 2836static int 2837fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2838{ 2839 2840 /* short circuit if it's the exact same firmware version */ 2841 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2842 return (1); 2843 2844 /* 2845 * XXX: Is this too conservative? Perhaps I should limit this to the 2846 * features that are supported in the driver. 2847 */ 2848#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2849 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2850 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2851 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2852 return (1); 2853#undef SAME_INTF 2854 2855 return (0); 2856} 2857 2858/* 2859 * The firmware in the KLD is usable, but should it be installed? This routine 2860 * explains itself in detail if it indicates the KLD firmware should be 2861 * installed. 2862 */ 2863static int 2864should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2865{ 2866 const char *reason; 2867 2868 if (!card_fw_usable) { 2869 reason = "incompatible or unusable"; 2870 goto install; 2871 } 2872 2873 if (k > c) { 2874 reason = "older than the version bundled with this driver"; 2875 goto install; 2876 } 2877 2878 if (t4_fw_install == 2 && k != c) { 2879 reason = "different than the version bundled with this driver"; 2880 goto install; 2881 } 2882 2883 return (0); 2884 2885install: 2886 if (t4_fw_install == 0) { 2887 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2888 "but the driver is prohibited from installing a different " 2889 "firmware on the card.\n", 2890 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2891 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2892 2893 return (0); 2894 } 2895 2896 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2897 "installing firmware %u.%u.%u.%u on card.\n", 2898 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2899 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2900 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2901 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2902 2903 return (1); 2904} 2905/* 2906 * Establish contact with the firmware and determine if we are the master driver 2907 * or not, and whether we are responsible for chip initialization. 2908 */ 2909static int 2910prep_firmware(struct adapter *sc) 2911{ 2912 const struct firmware *fw = NULL, *default_cfg; 2913 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2914 enum dev_state state; 2915 struct fw_info *fw_info; 2916 struct fw_hdr *card_fw; /* fw on the card */ 2917 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2918 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2919 against */ 2920 2921 /* Contact firmware. */ 2922 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2923 if (rc < 0 || state == DEV_STATE_ERR) { 2924 rc = -rc; 2925 device_printf(sc->dev, 2926 "failed to connect to the firmware: %d, %d.\n", rc, state); 2927 return (rc); 2928 } 2929 pf = rc; 2930 if (pf == sc->mbox) 2931 sc->flags |= MASTER_PF; 2932 else if (state == DEV_STATE_UNINIT) { 2933 /* 2934 * We didn't get to be the master so we definitely won't be 2935 * configuring the chip. It's a bug if someone else hasn't 2936 * configured it already. 2937 */ 2938 device_printf(sc->dev, "couldn't be master(%d), " 2939 "device not already initialized either(%d).\n", rc, state); 2940 return (EDOOFUS); 2941 } 2942 2943 /* This is the firmware whose headers the driver was compiled against */ 2944 fw_info = find_fw_info(chip_id(sc)); 2945 if (fw_info == NULL) { 2946 device_printf(sc->dev, 2947 "unable to look up firmware information for chip %d.\n", 2948 chip_id(sc)); 2949 return (EINVAL); 2950 } 2951 drv_fw = &fw_info->fw_hdr; 2952 2953 /* 2954 * The firmware KLD contains many modules. The KLD name is also the 2955 * name of the module that contains the default config file. 2956 */ 2957 default_cfg = firmware_get(fw_info->kld_name); 2958 2959 /* Read the header of the firmware on the card */ 2960 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2961 rc = -t4_read_flash(sc, FLASH_FW_START, 2962 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2963 if (rc == 0) 2964 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2965 else { 2966 device_printf(sc->dev, 2967 "Unable to read card's firmware header: %d\n", rc); 2968 card_fw_usable = 0; 2969 } 2970 2971 /* This is the firmware in the KLD */ 2972 fw = firmware_get(fw_info->fw_mod_name); 2973 if (fw != NULL) { 2974 kld_fw = (const void *)fw->data; 2975 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2976 } else { 2977 kld_fw = NULL; 2978 kld_fw_usable = 0; 2979 } 2980 2981 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2982 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2983 /* 2984 * Common case: the firmware on the card is an exact match and 2985 * the KLD is an exact match too, or the KLD is 2986 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2987 * here -- use cxgbetool loadfw if you want to reinstall the 2988 * same firmware as the one on the card. 2989 */ 2990 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2991 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2992 be32toh(card_fw->fw_ver))) { 2993 2994 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2995 if (rc != 0) { 2996 device_printf(sc->dev, 2997 "failed to install firmware: %d\n", rc); 2998 goto done; 2999 } 3000 3001 /* Installed successfully, update the cached header too. */ 3002 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 3003 card_fw_usable = 1; 3004 need_fw_reset = 0; /* already reset as part of load_fw */ 3005 } 3006 3007 if (!card_fw_usable) { 3008 uint32_t d, c, k; 3009 3010 d = ntohl(drv_fw->fw_ver); 3011 c = ntohl(card_fw->fw_ver); 3012 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 3013 3014 device_printf(sc->dev, "Cannot find a usable firmware: " 3015 "fw_install %d, chip state %d, " 3016 "driver compiled with %d.%d.%d.%d, " 3017 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 3018 t4_fw_install, state, 3019 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3020 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 3021 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3022 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 3023 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3024 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 3025 rc = EINVAL; 3026 goto done; 3027 } 3028 3029 /* Reset device */ 3030 if (need_fw_reset && 3031 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 3032 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 3033 if (rc != ETIMEDOUT && rc != EIO) 3034 t4_fw_bye(sc, sc->mbox); 3035 goto done; 3036 } 3037 sc->flags |= FW_OK; 3038 3039 rc = get_params__pre_init(sc); 3040 if (rc != 0) 3041 goto done; /* error message displayed already */ 3042 3043 /* Partition adapter resources as specified in the config file. */ 3044 if (state == DEV_STATE_UNINIT) { 3045 3046 KASSERT(sc->flags & MASTER_PF, 3047 ("%s: trying to change chip settings when not master.", 3048 __func__)); 3049 3050 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 3051 if (rc != 0) 3052 goto done; /* error message displayed already */ 3053 3054 t4_tweak_chip_settings(sc); 3055 3056 /* get basic stuff going */ 3057 rc = -t4_fw_initialize(sc, sc->mbox); 3058 if (rc != 0) { 3059 device_printf(sc->dev, "fw init failed: %d.\n", rc); 3060 goto done; 3061 } 3062 } else { 3063 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 3064 sc->cfcsum = 0; 3065 } 3066 3067done: 3068 free(card_fw, M_CXGBE); 3069 if (fw != NULL) 3070 firmware_put(fw, FIRMWARE_UNLOAD); 3071 if (default_cfg != NULL) 3072 firmware_put(default_cfg, FIRMWARE_UNLOAD); 3073 3074 return (rc); 3075} 3076 3077#define FW_PARAM_DEV(param) \ 3078 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3079 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3080#define FW_PARAM_PFVF(param) \ 3081 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3082 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 3083 3084/* 3085 * Partition chip resources for use between various PFs, VFs, etc. 3086 */ 3087static int 3088partition_resources(struct adapter *sc, const struct firmware *default_cfg, 3089 const char *name_prefix) 3090{ 3091 const struct firmware *cfg = NULL; 3092 int rc = 0; 3093 struct fw_caps_config_cmd caps; 3094 uint32_t mtype, moff, finicsum, cfcsum; 3095 3096 /* 3097 * Figure out what configuration file to use. Pick the default config 3098 * file for the card if the user hasn't specified one explicitly. 3099 */ 3100 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 3101 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 3102 /* Card specific overrides go here. */ 3103 if (pci_get_device(sc->dev) == 0x440a) 3104 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 3105 if (is_fpga(sc)) 3106 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 3107 } 3108 3109 /* 3110 * We need to load another module if the profile is anything except 3111 * "default" or "flash". 3112 */ 3113 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 3114 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3115 char s[32]; 3116 3117 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 3118 cfg = firmware_get(s); 3119 if (cfg == NULL) { 3120 if (default_cfg != NULL) { 3121 device_printf(sc->dev, 3122 "unable to load module \"%s\" for " 3123 "configuration profile \"%s\", will use " 3124 "the default config file instead.\n", 3125 s, sc->cfg_file); 3126 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3127 "%s", DEFAULT_CF); 3128 } else { 3129 device_printf(sc->dev, 3130 "unable to load module \"%s\" for " 3131 "configuration profile \"%s\", will use " 3132 "the config file on the card's flash " 3133 "instead.\n", s, sc->cfg_file); 3134 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3135 "%s", FLASH_CF); 3136 } 3137 } 3138 } 3139 3140 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 3141 default_cfg == NULL) { 3142 device_printf(sc->dev, 3143 "default config file not available, will use the config " 3144 "file on the card's flash instead.\n"); 3145 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 3146 } 3147 3148 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3149 u_int cflen; 3150 const uint32_t *cfdata; 3151 uint32_t param, val, addr; 3152 3153 KASSERT(cfg != NULL || default_cfg != NULL, 3154 ("%s: no config to upload", __func__)); 3155 3156 /* 3157 * Ask the firmware where it wants us to upload the config file. 3158 */ 3159 param = FW_PARAM_DEV(CF); 3160 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3161 if (rc != 0) { 3162 /* No support for config file? Shouldn't happen. */ 3163 device_printf(sc->dev, 3164 "failed to query config file location: %d.\n", rc); 3165 goto done; 3166 } 3167 mtype = G_FW_PARAMS_PARAM_Y(val); 3168 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3169 3170 /* 3171 * XXX: sheer laziness. We deliberately added 4 bytes of 3172 * useless stuffing/comments at the end of the config file so 3173 * it's ok to simply throw away the last remaining bytes when 3174 * the config file is not an exact multiple of 4. This also 3175 * helps with the validate_mt_off_len check. 3176 */ 3177 if (cfg != NULL) { 3178 cflen = cfg->datasize & ~3; 3179 cfdata = cfg->data; 3180 } else { 3181 cflen = default_cfg->datasize & ~3; 3182 cfdata = default_cfg->data; 3183 } 3184 3185 if (cflen > FLASH_CFG_MAX_SIZE) { 3186 device_printf(sc->dev, 3187 "config file too long (%d, max allowed is %d). " 3188 "Will try to use the config on the card, if any.\n", 3189 cflen, FLASH_CFG_MAX_SIZE); 3190 goto use_config_on_flash; 3191 } 3192 3193 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3194 if (rc != 0) { 3195 device_printf(sc->dev, 3196 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3197 "Will try to use the config on the card, if any.\n", 3198 __func__, mtype, moff, cflen, rc); 3199 goto use_config_on_flash; 3200 } 3201 write_via_memwin(sc, 2, addr, cfdata, cflen); 3202 } else { 3203use_config_on_flash: 3204 mtype = FW_MEMTYPE_FLASH; 3205 moff = t4_flash_cfg_addr(sc); 3206 } 3207 3208 bzero(&caps, sizeof(caps)); 3209 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3210 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3211 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3212 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3213 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3214 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3215 if (rc != 0) { 3216 device_printf(sc->dev, 3217 "failed to pre-process config file: %d " 3218 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3219 goto done; 3220 } 3221 3222 finicsum = be32toh(caps.finicsum); 3223 cfcsum = be32toh(caps.cfcsum); 3224 if (finicsum != cfcsum) { 3225 device_printf(sc->dev, 3226 "WARNING: config file checksum mismatch: %08x %08x\n", 3227 finicsum, cfcsum); 3228 } 3229 sc->cfcsum = cfcsum; 3230 3231#define LIMIT_CAPS(x) do { \ 3232 caps.x &= htobe16(t4_##x##_allowed); \ 3233} while (0) 3234 3235 /* 3236 * Let the firmware know what features will (not) be used so it can tune 3237 * things accordingly. 3238 */ 3239 LIMIT_CAPS(nbmcaps); 3240 LIMIT_CAPS(linkcaps); 3241 LIMIT_CAPS(switchcaps); 3242 LIMIT_CAPS(niccaps); 3243 LIMIT_CAPS(toecaps); 3244 LIMIT_CAPS(rdmacaps); 3245 LIMIT_CAPS(cryptocaps); 3246 LIMIT_CAPS(iscsicaps); 3247 LIMIT_CAPS(fcoecaps); 3248#undef LIMIT_CAPS 3249 3250 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3251 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3252 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3253 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3254 if (rc != 0) { 3255 device_printf(sc->dev, 3256 "failed to process config file: %d.\n", rc); 3257 } 3258done: 3259 if (cfg != NULL) 3260 firmware_put(cfg, FIRMWARE_UNLOAD); 3261 return (rc); 3262} 3263 3264/* 3265 * Retrieve parameters that are needed (or nice to have) very early. 3266 */ 3267static int 3268get_params__pre_init(struct adapter *sc) 3269{ 3270 int rc; 3271 uint32_t param[2], val[2]; 3272 3273 t4_get_version_info(sc); 3274 3275 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 3276 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 3277 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 3278 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 3279 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 3280 3281 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 3282 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 3283 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 3284 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 3285 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 3286 3287 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 3288 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 3289 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 3290 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 3291 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 3292 3293 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 3294 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 3295 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 3296 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 3297 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 3298 3299 param[0] = FW_PARAM_DEV(PORTVEC); 3300 param[1] = FW_PARAM_DEV(CCLK); 3301 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3302 if (rc != 0) { 3303 device_printf(sc->dev, 3304 "failed to query parameters (pre_init): %d.\n", rc); 3305 return (rc); 3306 } 3307 3308 sc->params.portvec = val[0]; 3309 sc->params.nports = bitcount32(val[0]); 3310 sc->params.vpd.cclk = val[1]; 3311 3312 /* Read device log parameters. */ 3313 rc = -t4_init_devlog_params(sc, 1); 3314 if (rc == 0) 3315 fixup_devlog_params(sc); 3316 else { 3317 device_printf(sc->dev, 3318 "failed to get devlog parameters: %d.\n", rc); 3319 rc = 0; /* devlog isn't critical for device operation */ 3320 } 3321 3322 return (rc); 3323} 3324 3325/* 3326 * Retrieve various parameters that are of interest to the driver. The device 3327 * has been initialized by the firmware at this point. 3328 */ 3329static int 3330get_params__post_init(struct adapter *sc) 3331{ 3332 int rc; 3333 uint32_t param[7], val[7]; 3334 struct fw_caps_config_cmd caps; 3335 3336 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3337 param[1] = FW_PARAM_PFVF(EQ_START); 3338 param[2] = FW_PARAM_PFVF(FILTER_START); 3339 param[3] = FW_PARAM_PFVF(FILTER_END); 3340 param[4] = FW_PARAM_PFVF(L2T_START); 3341 param[5] = FW_PARAM_PFVF(L2T_END); 3342 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3343 if (rc != 0) { 3344 device_printf(sc->dev, 3345 "failed to query parameters (post_init): %d.\n", rc); 3346 return (rc); 3347 } 3348 3349 sc->sge.iq_start = val[0]; 3350 sc->sge.eq_start = val[1]; 3351 sc->tids.ftid_base = val[2]; 3352 sc->tids.nftids = val[3] - val[2] + 1; 3353 sc->params.ftid_min = val[2]; 3354 sc->params.ftid_max = val[3]; 3355 sc->vres.l2t.start = val[4]; 3356 sc->vres.l2t.size = val[5] - val[4] + 1; 3357 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3358 ("%s: L2 table size (%u) larger than expected (%u)", 3359 __func__, sc->vres.l2t.size, L2T_SIZE)); 3360 3361 /* get capabilites */ 3362 bzero(&caps, sizeof(caps)); 3363 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3364 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3365 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3366 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3367 if (rc != 0) { 3368 device_printf(sc->dev, 3369 "failed to get card capabilities: %d.\n", rc); 3370 return (rc); 3371 } 3372 3373#define READ_CAPS(x) do { \ 3374 sc->x = htobe16(caps.x); \ 3375} while (0) 3376 READ_CAPS(nbmcaps); 3377 READ_CAPS(linkcaps); 3378 READ_CAPS(switchcaps); 3379 READ_CAPS(niccaps); 3380 READ_CAPS(toecaps); 3381 READ_CAPS(rdmacaps); 3382 READ_CAPS(cryptocaps); 3383 READ_CAPS(iscsicaps); 3384 READ_CAPS(fcoecaps); 3385 3386 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3387 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3388 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3389 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3390 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3391 if (rc != 0) { 3392 device_printf(sc->dev, 3393 "failed to query NIC parameters: %d.\n", rc); 3394 return (rc); 3395 } 3396 sc->tids.etid_base = val[0]; 3397 sc->params.etid_min = val[0]; 3398 sc->tids.netids = val[1] - val[0] + 1; 3399 sc->params.netids = sc->tids.netids; 3400 sc->params.eo_wr_cred = val[2]; 3401 sc->params.ethoffload = 1; 3402 } 3403 3404 if (sc->toecaps) { 3405 /* query offload-related parameters */ 3406 param[0] = FW_PARAM_DEV(NTID); 3407 param[1] = FW_PARAM_PFVF(SERVER_START); 3408 param[2] = FW_PARAM_PFVF(SERVER_END); 3409 param[3] = FW_PARAM_PFVF(TDDP_START); 3410 param[4] = FW_PARAM_PFVF(TDDP_END); 3411 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3412 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3413 if (rc != 0) { 3414 device_printf(sc->dev, 3415 "failed to query TOE parameters: %d.\n", rc); 3416 return (rc); 3417 } 3418 sc->tids.ntids = val[0]; 3419 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3420 sc->tids.stid_base = val[1]; 3421 sc->tids.nstids = val[2] - val[1] + 1; 3422 sc->vres.ddp.start = val[3]; 3423 sc->vres.ddp.size = val[4] - val[3] + 1; 3424 sc->params.ofldq_wr_cred = val[5]; 3425 sc->params.offload = 1; 3426 } 3427 if (sc->rdmacaps) { 3428 param[0] = FW_PARAM_PFVF(STAG_START); 3429 param[1] = FW_PARAM_PFVF(STAG_END); 3430 param[2] = FW_PARAM_PFVF(RQ_START); 3431 param[3] = FW_PARAM_PFVF(RQ_END); 3432 param[4] = FW_PARAM_PFVF(PBL_START); 3433 param[5] = FW_PARAM_PFVF(PBL_END); 3434 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3435 if (rc != 0) { 3436 device_printf(sc->dev, 3437 "failed to query RDMA parameters(1): %d.\n", rc); 3438 return (rc); 3439 } 3440 sc->vres.stag.start = val[0]; 3441 sc->vres.stag.size = val[1] - val[0] + 1; 3442 sc->vres.rq.start = val[2]; 3443 sc->vres.rq.size = val[3] - val[2] + 1; 3444 sc->vres.pbl.start = val[4]; 3445 sc->vres.pbl.size = val[5] - val[4] + 1; 3446 3447 param[0] = FW_PARAM_PFVF(SQRQ_START); 3448 param[1] = FW_PARAM_PFVF(SQRQ_END); 3449 param[2] = FW_PARAM_PFVF(CQ_START); 3450 param[3] = FW_PARAM_PFVF(CQ_END); 3451 param[4] = FW_PARAM_PFVF(OCQ_START); 3452 param[5] = FW_PARAM_PFVF(OCQ_END); 3453 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3454 if (rc != 0) { 3455 device_printf(sc->dev, 3456 "failed to query RDMA parameters(2): %d.\n", rc); 3457 return (rc); 3458 } 3459 sc->vres.qp.start = val[0]; 3460 sc->vres.qp.size = val[1] - val[0] + 1; 3461 sc->vres.cq.start = val[2]; 3462 sc->vres.cq.size = val[3] - val[2] + 1; 3463 sc->vres.ocq.start = val[4]; 3464 sc->vres.ocq.size = val[5] - val[4] + 1; 3465 } 3466 if (sc->iscsicaps) { 3467 param[0] = FW_PARAM_PFVF(ISCSI_START); 3468 param[1] = FW_PARAM_PFVF(ISCSI_END); 3469 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3470 if (rc != 0) { 3471 device_printf(sc->dev, 3472 "failed to query iSCSI parameters: %d.\n", rc); 3473 return (rc); 3474 } 3475 sc->vres.iscsi.start = val[0]; 3476 sc->vres.iscsi.size = val[1] - val[0] + 1; 3477 } 3478 3479 t4_init_sge_params(sc); 3480 3481 /* 3482 * We've got the params we wanted to query via the firmware. Now grab 3483 * some others directly from the chip. 3484 */ 3485 rc = t4_read_chip_settings(sc); 3486 3487 return (rc); 3488} 3489 3490static int 3491set_params__post_init(struct adapter *sc) 3492{ 3493 uint32_t param, val; 3494 3495 /* ask for encapsulated CPLs */ 3496 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3497 val = 1; 3498 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3499 3500 return (0); 3501} 3502 3503#undef FW_PARAM_PFVF 3504#undef FW_PARAM_DEV 3505 3506static void 3507t4_set_desc(struct adapter *sc) 3508{ 3509 char buf[128]; 3510 struct adapter_params *p = &sc->params; 3511 3512 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 3513 3514 device_set_desc_copy(sc->dev, buf); 3515} 3516 3517static void 3518build_medialist(struct port_info *pi, struct ifmedia *media) 3519{ 3520 int m; 3521 3522 PORT_LOCK(pi); 3523 3524 ifmedia_removeall(media); 3525 3526 m = IFM_ETHER | IFM_FDX; 3527 3528 switch(pi->port_type) { 3529 case FW_PORT_TYPE_BT_XFI: 3530 case FW_PORT_TYPE_BT_XAUI: 3531 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3532 /* fall through */ 3533 3534 case FW_PORT_TYPE_BT_SGMII: 3535 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3536 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3537 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3538 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3539 break; 3540 3541 case FW_PORT_TYPE_CX4: 3542 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3543 ifmedia_set(media, m | IFM_10G_CX4); 3544 break; 3545 3546 case FW_PORT_TYPE_QSFP_10G: 3547 case FW_PORT_TYPE_SFP: 3548 case FW_PORT_TYPE_FIBER_XFI: 3549 case FW_PORT_TYPE_FIBER_XAUI: 3550 switch (pi->mod_type) { 3551 3552 case FW_PORT_MOD_TYPE_LR: 3553 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3554 ifmedia_set(media, m | IFM_10G_LR); 3555 break; 3556 3557 case FW_PORT_MOD_TYPE_SR: 3558 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3559 ifmedia_set(media, m | IFM_10G_SR); 3560 break; 3561 3562 case FW_PORT_MOD_TYPE_LRM: 3563 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3564 ifmedia_set(media, m | IFM_10G_LRM); 3565 break; 3566 3567 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3568 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3569 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3570 ifmedia_set(media, m | IFM_10G_TWINAX); 3571 break; 3572 3573 case FW_PORT_MOD_TYPE_NONE: 3574 m &= ~IFM_FDX; 3575 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3576 ifmedia_set(media, m | IFM_NONE); 3577 break; 3578 3579 case FW_PORT_MOD_TYPE_NA: 3580 case FW_PORT_MOD_TYPE_ER: 3581 default: 3582 device_printf(pi->dev, 3583 "unknown port_type (%d), mod_type (%d)\n", 3584 pi->port_type, pi->mod_type); 3585 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3586 ifmedia_set(media, m | IFM_UNKNOWN); 3587 break; 3588 } 3589 break; 3590 3591 case FW_PORT_TYPE_CR_QSFP: 3592 case FW_PORT_TYPE_CR_SFP28: 3593 case FW_PORT_TYPE_SFP28: 3594 case FW_PORT_TYPE_KR_SFP28: 3595 switch (pi->mod_type) { 3596 3597 case FW_PORT_MOD_TYPE_SR: 3598 ifmedia_add(media, m | IFM_25G_SR, 0, NULL); 3599 ifmedia_set(media, m | IFM_25G_SR); 3600 break; 3601 3602 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3603 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3604 ifmedia_add(media, m | IFM_25G_CR, 0, NULL); 3605 ifmedia_set(media, m | IFM_25G_CR); 3606 break; 3607 3608 case FW_PORT_MOD_TYPE_NONE: 3609 m &= ~IFM_FDX; 3610 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3611 ifmedia_set(media, m | IFM_NONE); 3612 break; 3613 3614 default: 3615 device_printf(pi->dev, 3616 "unknown port_type (%d), mod_type (%d)\n", 3617 pi->port_type, pi->mod_type); 3618 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3619 ifmedia_set(media, m | IFM_UNKNOWN); 3620 break; 3621 } 3622 break; 3623 3624 case FW_PORT_TYPE_QSFP: 3625 switch (pi->mod_type) { 3626 3627 case FW_PORT_MOD_TYPE_LR: 3628 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3629 ifmedia_set(media, m | IFM_40G_LR4); 3630 break; 3631 3632 case FW_PORT_MOD_TYPE_SR: 3633 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3634 ifmedia_set(media, m | IFM_40G_SR4); 3635 break; 3636 3637 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3638 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3639 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3640 ifmedia_set(media, m | IFM_40G_CR4); 3641 break; 3642 3643 case FW_PORT_MOD_TYPE_NONE: 3644 m &= ~IFM_FDX; 3645 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3646 ifmedia_set(media, m | IFM_NONE); 3647 break; 3648 3649 default: 3650 device_printf(pi->dev, 3651 "unknown port_type (%d), mod_type (%d)\n", 3652 pi->port_type, pi->mod_type); 3653 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3654 ifmedia_set(media, m | IFM_UNKNOWN); 3655 break; 3656 } 3657 break; 3658 3659 case FW_PORT_TYPE_KR4_100G: 3660 case FW_PORT_TYPE_CR4_QSFP: 3661 switch (pi->mod_type) { 3662 3663 case FW_PORT_MOD_TYPE_LR: 3664 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL); 3665 ifmedia_set(media, m | IFM_100G_LR4); 3666 break; 3667 3668 case FW_PORT_MOD_TYPE_SR: 3669 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL); 3670 ifmedia_set(media, m | IFM_100G_SR4); 3671 break; 3672 3673 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3674 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3675 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL); 3676 ifmedia_set(media, m | IFM_100G_CR4); 3677 break; 3678 3679 case FW_PORT_MOD_TYPE_NONE: 3680 m &= ~IFM_FDX; 3681 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3682 ifmedia_set(media, m | IFM_NONE); 3683 break; 3684 3685 default: 3686 device_printf(pi->dev, 3687 "unknown port_type (%d), mod_type (%d)\n", 3688 pi->port_type, pi->mod_type); 3689 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3690 ifmedia_set(media, m | IFM_UNKNOWN); 3691 break; 3692 } 3693 break; 3694 3695 default: 3696 device_printf(pi->dev, 3697 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3698 pi->mod_type); 3699 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3700 ifmedia_set(media, m | IFM_UNKNOWN); 3701 break; 3702 } 3703 3704 PORT_UNLOCK(pi); 3705} 3706 3707#define FW_MAC_EXACT_CHUNK 7 3708 3709/* 3710 * Program the port's XGMAC based on parameters in ifnet. The caller also 3711 * indicates which parameters should be programmed (the rest are left alone). 3712 */ 3713int 3714update_mac_settings(struct ifnet *ifp, int flags) 3715{ 3716 int rc = 0; 3717 struct vi_info *vi = ifp->if_softc; 3718 struct port_info *pi = vi->pi; 3719 struct adapter *sc = pi->adapter; 3720 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3721 3722 ASSERT_SYNCHRONIZED_OP(sc); 3723 KASSERT(flags, ("%s: not told what to update.", __func__)); 3724 3725 if (flags & XGMAC_MTU) 3726 mtu = ifp->if_mtu; 3727 3728 if (flags & XGMAC_PROMISC) 3729 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3730 3731 if (flags & XGMAC_ALLMULTI) 3732 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3733 3734 if (flags & XGMAC_VLANEX) 3735 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3736 3737 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3738 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3739 allmulti, 1, vlanex, false); 3740 if (rc) { 3741 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3742 rc); 3743 return (rc); 3744 } 3745 } 3746 3747 if (flags & XGMAC_UCADDR) { 3748 uint8_t ucaddr[ETHER_ADDR_LEN]; 3749 3750 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3751 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3752 ucaddr, true, true); 3753 if (rc < 0) { 3754 rc = -rc; 3755 if_printf(ifp, "change_mac failed: %d\n", rc); 3756 return (rc); 3757 } else { 3758 vi->xact_addr_filt = rc; 3759 rc = 0; 3760 } 3761 } 3762 3763 if (flags & XGMAC_MCADDRS) { 3764 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3765 int del = 1; 3766 uint64_t hash = 0; 3767 struct ifmultiaddr *ifma; 3768 int i = 0, j; 3769 3770 if_maddr_rlock(ifp); 3771 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3772 if (ifma->ifma_addr->sa_family != AF_LINK) 3773 continue; 3774 mcaddr[i] = 3775 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3776 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3777 i++; 3778 3779 if (i == FW_MAC_EXACT_CHUNK) { 3780 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3781 del, i, mcaddr, NULL, &hash, 0); 3782 if (rc < 0) { 3783 rc = -rc; 3784 for (j = 0; j < i; j++) { 3785 if_printf(ifp, 3786 "failed to add mc address" 3787 " %02x:%02x:%02x:" 3788 "%02x:%02x:%02x rc=%d\n", 3789 mcaddr[j][0], mcaddr[j][1], 3790 mcaddr[j][2], mcaddr[j][3], 3791 mcaddr[j][4], mcaddr[j][5], 3792 rc); 3793 } 3794 goto mcfail; 3795 } 3796 del = 0; 3797 i = 0; 3798 } 3799 } 3800 if (i > 0) { 3801 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3802 mcaddr, NULL, &hash, 0); 3803 if (rc < 0) { 3804 rc = -rc; 3805 for (j = 0; j < i; j++) { 3806 if_printf(ifp, 3807 "failed to add mc address" 3808 " %02x:%02x:%02x:" 3809 "%02x:%02x:%02x rc=%d\n", 3810 mcaddr[j][0], mcaddr[j][1], 3811 mcaddr[j][2], mcaddr[j][3], 3812 mcaddr[j][4], mcaddr[j][5], 3813 rc); 3814 } 3815 goto mcfail; 3816 } 3817 } 3818 3819 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3820 if (rc != 0) 3821 if_printf(ifp, "failed to set mc address hash: %d", rc); 3822mcfail: 3823 if_maddr_runlock(ifp); 3824 } 3825 3826 return (rc); 3827} 3828 3829/* 3830 * {begin|end}_synchronized_op must be called from the same thread. 3831 */ 3832int 3833begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3834 char *wmesg) 3835{ 3836 int rc, pri; 3837 3838#ifdef WITNESS 3839 /* the caller thinks it's ok to sleep, but is it really? */ 3840 if (flags & SLEEP_OK) 3841 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3842 "begin_synchronized_op"); 3843#endif 3844 3845 if (INTR_OK) 3846 pri = PCATCH; 3847 else 3848 pri = 0; 3849 3850 ADAPTER_LOCK(sc); 3851 for (;;) { 3852 3853 if (vi && IS_DOOMED(vi)) { 3854 rc = ENXIO; 3855 goto done; 3856 } 3857 3858 if (!IS_BUSY(sc)) { 3859 rc = 0; 3860 break; 3861 } 3862 3863 if (!(flags & SLEEP_OK)) { 3864 rc = EBUSY; 3865 goto done; 3866 } 3867 3868 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3869 rc = EINTR; 3870 goto done; 3871 } 3872 } 3873 3874 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3875 SET_BUSY(sc); 3876#ifdef INVARIANTS 3877 sc->last_op = wmesg; 3878 sc->last_op_thr = curthread; 3879 sc->last_op_flags = flags; 3880#endif 3881 3882done: 3883 if (!(flags & HOLD_LOCK) || rc) 3884 ADAPTER_UNLOCK(sc); 3885 3886 return (rc); 3887} 3888 3889/* 3890 * Tell if_ioctl and if_init that the VI is going away. This is 3891 * special variant of begin_synchronized_op and must be paired with a 3892 * call to end_synchronized_op. 3893 */ 3894void 3895doom_vi(struct adapter *sc, struct vi_info *vi) 3896{ 3897 3898 ADAPTER_LOCK(sc); 3899 SET_DOOMED(vi); 3900 wakeup(&sc->flags); 3901 while (IS_BUSY(sc)) 3902 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3903 SET_BUSY(sc); 3904#ifdef INVARIANTS 3905 sc->last_op = "t4detach"; 3906 sc->last_op_thr = curthread; 3907 sc->last_op_flags = 0; 3908#endif 3909 ADAPTER_UNLOCK(sc); 3910} 3911 3912/* 3913 * {begin|end}_synchronized_op must be called from the same thread. 3914 */ 3915void 3916end_synchronized_op(struct adapter *sc, int flags) 3917{ 3918 3919 if (flags & LOCK_HELD) 3920 ADAPTER_LOCK_ASSERT_OWNED(sc); 3921 else 3922 ADAPTER_LOCK(sc); 3923 3924 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3925 CLR_BUSY(sc); 3926 wakeup(&sc->flags); 3927 ADAPTER_UNLOCK(sc); 3928} 3929 3930static int 3931cxgbe_init_synchronized(struct vi_info *vi) 3932{ 3933 struct port_info *pi = vi->pi; 3934 struct adapter *sc = pi->adapter; 3935 struct ifnet *ifp = vi->ifp; 3936 int rc = 0, i; 3937 struct sge_txq *txq; 3938 3939 ASSERT_SYNCHRONIZED_OP(sc); 3940 3941 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3942 return (0); /* already running */ 3943 3944 if (!(sc->flags & FULL_INIT_DONE) && 3945 ((rc = adapter_full_init(sc)) != 0)) 3946 return (rc); /* error message displayed already */ 3947 3948 if (!(vi->flags & VI_INIT_DONE) && 3949 ((rc = vi_full_init(vi)) != 0)) 3950 return (rc); /* error message displayed already */ 3951 3952 rc = update_mac_settings(ifp, XGMAC_ALL); 3953 if (rc) 3954 goto done; /* error message displayed already */ 3955 3956 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 3957 if (rc != 0) { 3958 if_printf(ifp, "enable_vi failed: %d\n", rc); 3959 goto done; 3960 } 3961 3962 /* 3963 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3964 * if this changes. 3965 */ 3966 3967 for_each_txq(vi, i, txq) { 3968 TXQ_LOCK(txq); 3969 txq->eq.flags |= EQ_ENABLED; 3970 TXQ_UNLOCK(txq); 3971 } 3972 3973 /* 3974 * The first iq of the first port to come up is used for tracing. 3975 */ 3976 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 3977 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 3978 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3979 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3980 V_QUEUENUMBER(sc->traceq)); 3981 pi->flags |= HAS_TRACEQ; 3982 } 3983 3984 /* all ok */ 3985 PORT_LOCK(pi); 3986 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3987 pi->up_vis++; 3988 3989 if (pi->nvi > 1 || sc->flags & IS_VF) 3990 callout_reset(&vi->tick, hz, vi_tick, vi); 3991 else 3992 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3993 PORT_UNLOCK(pi); 3994done: 3995 if (rc != 0) 3996 cxgbe_uninit_synchronized(vi); 3997 3998 return (rc); 3999} 4000 4001/* 4002 * Idempotent. 4003 */ 4004static int 4005cxgbe_uninit_synchronized(struct vi_info *vi) 4006{ 4007 struct port_info *pi = vi->pi; 4008 struct adapter *sc = pi->adapter; 4009 struct ifnet *ifp = vi->ifp; 4010 int rc, i; 4011 struct sge_txq *txq; 4012 4013 ASSERT_SYNCHRONIZED_OP(sc); 4014 4015 if (!(vi->flags & VI_INIT_DONE)) { 4016 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 4017 ("uninited VI is running")); 4018 return (0); 4019 } 4020 4021 /* 4022 * Disable the VI so that all its data in either direction is discarded 4023 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 4024 * tick) intact as the TP can deliver negative advice or data that it's 4025 * holding in its RAM (for an offloaded connection) even after the VI is 4026 * disabled. 4027 */ 4028 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 4029 if (rc) { 4030 if_printf(ifp, "disable_vi failed: %d\n", rc); 4031 return (rc); 4032 } 4033 4034 for_each_txq(vi, i, txq) { 4035 TXQ_LOCK(txq); 4036 txq->eq.flags &= ~EQ_ENABLED; 4037 TXQ_UNLOCK(txq); 4038 } 4039 4040 PORT_LOCK(pi); 4041 if (pi->nvi > 1 || sc->flags & IS_VF) 4042 callout_stop(&vi->tick); 4043 else 4044 callout_stop(&pi->tick); 4045 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4046 PORT_UNLOCK(pi); 4047 return (0); 4048 } 4049 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4050 pi->up_vis--; 4051 if (pi->up_vis > 0) { 4052 PORT_UNLOCK(pi); 4053 return (0); 4054 } 4055 PORT_UNLOCK(pi); 4056 4057 pi->link_cfg.link_ok = 0; 4058 pi->link_cfg.speed = 0; 4059 pi->linkdnrc = -1; 4060 t4_os_link_changed(sc, pi->port_id, 0, -1); 4061 4062 return (0); 4063} 4064 4065/* 4066 * It is ok for this function to fail midway and return right away. t4_detach 4067 * will walk the entire sc->irq list and clean up whatever is valid. 4068 */ 4069int 4070t4_setup_intr_handlers(struct adapter *sc) 4071{ 4072 int rc, rid, p, q, v; 4073 char s[8]; 4074 struct irq *irq; 4075 struct port_info *pi; 4076 struct vi_info *vi; 4077 struct sge *sge = &sc->sge; 4078 struct sge_rxq *rxq; 4079#ifdef TCP_OFFLOAD 4080 struct sge_ofld_rxq *ofld_rxq; 4081#endif 4082#ifdef DEV_NETMAP 4083 struct sge_nm_rxq *nm_rxq; 4084#endif 4085#ifdef RSS 4086 int nbuckets = rss_getnumbuckets(); 4087#endif 4088 4089 /* 4090 * Setup interrupts. 4091 */ 4092 irq = &sc->irq[0]; 4093 rid = sc->intr_type == INTR_INTX ? 0 : 1; 4094 if (sc->intr_count == 1) 4095 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 4096 4097 /* Multiple interrupts. */ 4098 if (sc->flags & IS_VF) 4099 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 4100 ("%s: too few intr.", __func__)); 4101 else 4102 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 4103 ("%s: too few intr.", __func__)); 4104 4105 /* The first one is always error intr on PFs */ 4106 if (!(sc->flags & IS_VF)) { 4107 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 4108 if (rc != 0) 4109 return (rc); 4110 irq++; 4111 rid++; 4112 } 4113 4114 /* The second one is always the firmware event queue (first on VFs) */ 4115 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 4116 if (rc != 0) 4117 return (rc); 4118 irq++; 4119 rid++; 4120 4121 for_each_port(sc, p) { 4122 pi = sc->port[p]; 4123 for_each_vi(pi, v, vi) { 4124 vi->first_intr = rid - 1; 4125 4126 if (vi->nnmrxq > 0) { 4127 int n = max(vi->nrxq, vi->nnmrxq); 4128 4129 MPASS(vi->flags & INTR_RXQ); 4130 4131 rxq = &sge->rxq[vi->first_rxq]; 4132#ifdef DEV_NETMAP 4133 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 4134#endif 4135 for (q = 0; q < n; q++) { 4136 snprintf(s, sizeof(s), "%x%c%x", p, 4137 'a' + v, q); 4138 if (q < vi->nrxq) 4139 irq->rxq = rxq++; 4140#ifdef DEV_NETMAP 4141 if (q < vi->nnmrxq) 4142 irq->nm_rxq = nm_rxq++; 4143#endif 4144 rc = t4_alloc_irq(sc, irq, rid, 4145 t4_vi_intr, irq, s); 4146 if (rc != 0) 4147 return (rc); 4148 irq++; 4149 rid++; 4150 vi->nintr++; 4151 } 4152 } else if (vi->flags & INTR_RXQ) { 4153 for_each_rxq(vi, q, rxq) { 4154 snprintf(s, sizeof(s), "%x%c%x", p, 4155 'a' + v, q); 4156 rc = t4_alloc_irq(sc, irq, rid, 4157 t4_intr, rxq, s); 4158 if (rc != 0) 4159 return (rc); 4160#ifdef RSS 4161 bus_bind_intr(sc->dev, irq->res, 4162 rss_getcpu(q % nbuckets)); 4163#endif 4164 irq++; 4165 rid++; 4166 vi->nintr++; 4167 } 4168 } 4169#ifdef TCP_OFFLOAD 4170 if (vi->flags & INTR_OFLD_RXQ) { 4171 for_each_ofld_rxq(vi, q, ofld_rxq) { 4172 snprintf(s, sizeof(s), "%x%c%x", p, 4173 'A' + v, q); 4174 rc = t4_alloc_irq(sc, irq, rid, 4175 t4_intr, ofld_rxq, s); 4176 if (rc != 0) 4177 return (rc); 4178 irq++; 4179 rid++; 4180 vi->nintr++; 4181 } 4182 } 4183#endif 4184 } 4185 } 4186 MPASS(irq == &sc->irq[sc->intr_count]); 4187 4188 return (0); 4189} 4190 4191int 4192adapter_full_init(struct adapter *sc) 4193{ 4194 int rc, i; 4195 4196 ASSERT_SYNCHRONIZED_OP(sc); 4197 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4198 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 4199 ("%s: FULL_INIT_DONE already", __func__)); 4200 4201 /* 4202 * queues that belong to the adapter (not any particular port). 4203 */ 4204 rc = t4_setup_adapter_queues(sc); 4205 if (rc != 0) 4206 goto done; 4207 4208 for (i = 0; i < nitems(sc->tq); i++) { 4209 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 4210 taskqueue_thread_enqueue, &sc->tq[i]); 4211 if (sc->tq[i] == NULL) { 4212 device_printf(sc->dev, 4213 "failed to allocate task queue %d\n", i); 4214 rc = ENOMEM; 4215 goto done; 4216 } 4217 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 4218 device_get_nameunit(sc->dev), i); 4219 } 4220 4221 if (!(sc->flags & IS_VF)) 4222 t4_intr_enable(sc); 4223 sc->flags |= FULL_INIT_DONE; 4224done: 4225 if (rc != 0) 4226 adapter_full_uninit(sc); 4227 4228 return (rc); 4229} 4230 4231int 4232adapter_full_uninit(struct adapter *sc) 4233{ 4234 int i; 4235 4236 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4237 4238 t4_teardown_adapter_queues(sc); 4239 4240 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 4241 taskqueue_free(sc->tq[i]); 4242 sc->tq[i] = NULL; 4243 } 4244 4245 sc->flags &= ~FULL_INIT_DONE; 4246 4247 return (0); 4248} 4249 4250#ifdef RSS 4251#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 4252 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 4253 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 4254 RSS_HASHTYPE_RSS_UDP_IPV6) 4255 4256/* Translates kernel hash types to hardware. */ 4257static int 4258hashconfig_to_hashen(int hashconfig) 4259{ 4260 int hashen = 0; 4261 4262 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 4263 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 4264 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 4265 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 4266 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 4267 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4268 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4269 } 4270 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 4271 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4272 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4273 } 4274 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 4275 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4276 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 4277 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4278 4279 return (hashen); 4280} 4281 4282/* Translates hardware hash types to kernel. */ 4283static int 4284hashen_to_hashconfig(int hashen) 4285{ 4286 int hashconfig = 0; 4287 4288 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4289 /* 4290 * If UDP hashing was enabled it must have been enabled for 4291 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4292 * enabling any 4-tuple hash is nonsense configuration. 4293 */ 4294 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4295 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4296 4297 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4298 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4299 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4300 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4301 } 4302 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4303 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4304 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4305 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4306 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4307 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4308 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4309 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4310 4311 return (hashconfig); 4312} 4313#endif 4314 4315int 4316vi_full_init(struct vi_info *vi) 4317{ 4318 struct adapter *sc = vi->pi->adapter; 4319 struct ifnet *ifp = vi->ifp; 4320 uint16_t *rss; 4321 struct sge_rxq *rxq; 4322 int rc, i, j, hashen; 4323#ifdef RSS 4324 int nbuckets = rss_getnumbuckets(); 4325 int hashconfig = rss_gethashconfig(); 4326 int extra; 4327 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4328 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4329#endif 4330 4331 ASSERT_SYNCHRONIZED_OP(sc); 4332 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4333 ("%s: VI_INIT_DONE already", __func__)); 4334 4335 sysctl_ctx_init(&vi->ctx); 4336 vi->flags |= VI_SYSCTL_CTX; 4337 4338 /* 4339 * Allocate tx/rx/fl queues for this VI. 4340 */ 4341 rc = t4_setup_vi_queues(vi); 4342 if (rc != 0) 4343 goto done; /* error message displayed already */ 4344 4345 /* 4346 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4347 */ 4348 if (vi->nrxq > vi->rss_size) { 4349 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4350 "some queues will never receive traffic.\n", vi->nrxq, 4351 vi->rss_size); 4352 } else if (vi->rss_size % vi->nrxq) { 4353 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4354 "expect uneven traffic distribution.\n", vi->nrxq, 4355 vi->rss_size); 4356 } 4357#ifdef RSS 4358 MPASS(RSS_KEYSIZE == 40); 4359 if (vi->nrxq != nbuckets) { 4360 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4361 "performance will be impacted.\n", vi->nrxq, nbuckets); 4362 } 4363 4364 rss_getkey((void *)&raw_rss_key[0]); 4365 for (i = 0; i < nitems(rss_key); i++) { 4366 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4367 } 4368 t4_write_rss_key(sc, &rss_key[0], -1); 4369#endif 4370 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4371 for (i = 0; i < vi->rss_size;) { 4372#ifdef RSS 4373 j = rss_get_indirection_to_bucket(i); 4374 j %= vi->nrxq; 4375 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4376 rss[i++] = rxq->iq.abs_id; 4377#else 4378 for_each_rxq(vi, j, rxq) { 4379 rss[i++] = rxq->iq.abs_id; 4380 if (i == vi->rss_size) 4381 break; 4382 } 4383#endif 4384 } 4385 4386 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4387 vi->rss_size); 4388 if (rc != 0) { 4389 if_printf(ifp, "rss_config failed: %d\n", rc); 4390 goto done; 4391 } 4392 4393#ifdef RSS 4394 hashen = hashconfig_to_hashen(hashconfig); 4395 4396 /* 4397 * We may have had to enable some hashes even though the global config 4398 * wants them disabled. This is a potential problem that must be 4399 * reported to the user. 4400 */ 4401 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4402 4403 /* 4404 * If we consider only the supported hash types, then the enabled hashes 4405 * are a superset of the requested hashes. In other words, there cannot 4406 * be any supported hash that was requested but not enabled, but there 4407 * can be hashes that were not requested but had to be enabled. 4408 */ 4409 extra &= SUPPORTED_RSS_HASHTYPES; 4410 MPASS((extra & hashconfig) == 0); 4411 4412 if (extra) { 4413 if_printf(ifp, 4414 "global RSS config (0x%x) cannot be accommodated.\n", 4415 hashconfig); 4416 } 4417 if (extra & RSS_HASHTYPE_RSS_IPV4) 4418 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4419 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4420 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4421 if (extra & RSS_HASHTYPE_RSS_IPV6) 4422 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4423 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4424 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4425 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4426 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4427 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4428 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4429#else 4430 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4431 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4432 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4433 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4434#endif 4435 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0); 4436 if (rc != 0) { 4437 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4438 goto done; 4439 } 4440 4441 vi->rss = rss; 4442 vi->flags |= VI_INIT_DONE; 4443done: 4444 if (rc != 0) 4445 vi_full_uninit(vi); 4446 4447 return (rc); 4448} 4449 4450/* 4451 * Idempotent. 4452 */ 4453int 4454vi_full_uninit(struct vi_info *vi) 4455{ 4456 struct port_info *pi = vi->pi; 4457 struct adapter *sc = pi->adapter; 4458 int i; 4459 struct sge_rxq *rxq; 4460 struct sge_txq *txq; 4461#ifdef TCP_OFFLOAD 4462 struct sge_ofld_rxq *ofld_rxq; 4463 struct sge_wrq *ofld_txq; 4464#endif 4465 4466 if (vi->flags & VI_INIT_DONE) { 4467 4468 /* Need to quiesce queues. */ 4469 4470 /* XXX: Only for the first VI? */ 4471 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 4472 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4473 4474 for_each_txq(vi, i, txq) { 4475 quiesce_txq(sc, txq); 4476 } 4477 4478#ifdef TCP_OFFLOAD 4479 for_each_ofld_txq(vi, i, ofld_txq) { 4480 quiesce_wrq(sc, ofld_txq); 4481 } 4482#endif 4483 4484 for_each_rxq(vi, i, rxq) { 4485 quiesce_iq(sc, &rxq->iq); 4486 quiesce_fl(sc, &rxq->fl); 4487 } 4488 4489#ifdef TCP_OFFLOAD 4490 for_each_ofld_rxq(vi, i, ofld_rxq) { 4491 quiesce_iq(sc, &ofld_rxq->iq); 4492 quiesce_fl(sc, &ofld_rxq->fl); 4493 } 4494#endif 4495 free(vi->rss, M_CXGBE); 4496 free(vi->nm_rss, M_CXGBE); 4497 } 4498 4499 t4_teardown_vi_queues(vi); 4500 vi->flags &= ~VI_INIT_DONE; 4501 4502 return (0); 4503} 4504 4505static void 4506quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4507{ 4508 struct sge_eq *eq = &txq->eq; 4509 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4510 4511 (void) sc; /* unused */ 4512 4513#ifdef INVARIANTS 4514 TXQ_LOCK(txq); 4515 MPASS((eq->flags & EQ_ENABLED) == 0); 4516 TXQ_UNLOCK(txq); 4517#endif 4518 4519 /* Wait for the mp_ring to empty. */ 4520 while (!mp_ring_is_idle(txq->r)) { 4521 mp_ring_check_drainage(txq->r, 0); 4522 pause("rquiesce", 1); 4523 } 4524 4525 /* Then wait for the hardware to finish. */ 4526 while (spg->cidx != htobe16(eq->pidx)) 4527 pause("equiesce", 1); 4528 4529 /* Finally, wait for the driver to reclaim all descriptors. */ 4530 while (eq->cidx != eq->pidx) 4531 pause("dquiesce", 1); 4532} 4533 4534static void 4535quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4536{ 4537 4538 /* XXXTX */ 4539} 4540 4541static void 4542quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4543{ 4544 (void) sc; /* unused */ 4545 4546 /* Synchronize with the interrupt handler */ 4547 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4548 pause("iqfree", 1); 4549} 4550 4551static void 4552quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4553{ 4554 mtx_lock(&sc->sfl_lock); 4555 FL_LOCK(fl); 4556 fl->flags |= FL_DOOMED; 4557 FL_UNLOCK(fl); 4558 callout_stop(&sc->sfl_callout); 4559 mtx_unlock(&sc->sfl_lock); 4560 4561 KASSERT((fl->flags & FL_STARVING) == 0, 4562 ("%s: still starving", __func__)); 4563} 4564 4565static int 4566t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4567 driver_intr_t *handler, void *arg, char *name) 4568{ 4569 int rc; 4570 4571 irq->rid = rid; 4572 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4573 RF_SHAREABLE | RF_ACTIVE); 4574 if (irq->res == NULL) { 4575 device_printf(sc->dev, 4576 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4577 return (ENOMEM); 4578 } 4579 4580 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4581 NULL, handler, arg, &irq->tag); 4582 if (rc != 0) { 4583 device_printf(sc->dev, 4584 "failed to setup interrupt for rid %d, name %s: %d\n", 4585 rid, name, rc); 4586 } else if (name) 4587 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); 4588 4589 return (rc); 4590} 4591 4592static int 4593t4_free_irq(struct adapter *sc, struct irq *irq) 4594{ 4595 if (irq->tag) 4596 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4597 if (irq->res) 4598 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4599 4600 bzero(irq, sizeof(*irq)); 4601 4602 return (0); 4603} 4604 4605static void 4606get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4607{ 4608 4609 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4610 t4_get_regs(sc, buf, regs->len); 4611} 4612 4613#define A_PL_INDIR_CMD 0x1f8 4614 4615#define S_PL_AUTOINC 31 4616#define M_PL_AUTOINC 0x1U 4617#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4618#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4619 4620#define S_PL_VFID 20 4621#define M_PL_VFID 0xffU 4622#define V_PL_VFID(x) ((x) << S_PL_VFID) 4623#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4624 4625#define S_PL_ADDR 0 4626#define M_PL_ADDR 0xfffffU 4627#define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4628#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4629 4630#define A_PL_INDIR_DATA 0x1fc 4631 4632static uint64_t 4633read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4634{ 4635 u32 stats[2]; 4636 4637 mtx_assert(&sc->reg_lock, MA_OWNED); 4638 if (sc->flags & IS_VF) { 4639 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 4640 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 4641 } else { 4642 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4643 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4644 V_PL_ADDR(VF_MPS_REG(reg))); 4645 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4646 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4647 } 4648 return (((uint64_t)stats[1]) << 32 | stats[0]); 4649} 4650 4651static void 4652t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4653 struct fw_vi_stats_vf *stats) 4654{ 4655 4656#define GET_STAT(name) \ 4657 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4658 4659 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4660 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4661 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4662 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4663 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4664 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4665 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4666 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4667 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4668 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4669 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4670 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4671 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4672 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4673 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4674 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4675 4676#undef GET_STAT 4677} 4678 4679static void 4680t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4681{ 4682 int reg; 4683 4684 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4685 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4686 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4687 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4688 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4689 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4690} 4691 4692static void 4693vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4694{ 4695 struct timeval tv; 4696 const struct timeval interval = {0, 250000}; /* 250ms */ 4697 4698 if (!(vi->flags & VI_INIT_DONE)) 4699 return; 4700 4701 getmicrotime(&tv); 4702 timevalsub(&tv, &interval); 4703 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4704 return; 4705 4706 mtx_lock(&sc->reg_lock); 4707 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4708 getmicrotime(&vi->last_refreshed); 4709 mtx_unlock(&sc->reg_lock); 4710} 4711 4712static void 4713cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4714{ 4715 int i; 4716 u_int v, tnl_cong_drops; 4717 struct timeval tv; 4718 const struct timeval interval = {0, 250000}; /* 250ms */ 4719 4720 getmicrotime(&tv); 4721 timevalsub(&tv, &interval); 4722 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4723 return; 4724 4725 tnl_cong_drops = 0; 4726 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4727 for (i = 0; i < sc->chip_params->nchan; i++) { 4728 if (pi->rx_chan_map & (1 << i)) { 4729 mtx_lock(&sc->reg_lock); 4730 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4731 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4732 mtx_unlock(&sc->reg_lock); 4733 tnl_cong_drops += v; 4734 } 4735 } 4736 pi->tnl_cong_drops = tnl_cong_drops; 4737 getmicrotime(&pi->last_refreshed); 4738} 4739 4740static void 4741cxgbe_tick(void *arg) 4742{ 4743 struct port_info *pi = arg; 4744 struct adapter *sc = pi->adapter; 4745 4746 PORT_LOCK_ASSERT_OWNED(pi); 4747 cxgbe_refresh_stats(sc, pi); 4748 4749 callout_schedule(&pi->tick, hz); 4750} 4751 4752void 4753vi_tick(void *arg) 4754{ 4755 struct vi_info *vi = arg; 4756 struct adapter *sc = vi->pi->adapter; 4757 4758 vi_refresh_stats(sc, vi); 4759 4760 callout_schedule(&vi->tick, hz); 4761} 4762 4763static void 4764cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4765{ 4766 struct ifnet *vlan; 4767 4768 if (arg != ifp || ifp->if_type != IFT_ETHER) 4769 return; 4770 4771 vlan = VLAN_DEVAT(ifp, vid); 4772 VLAN_SETCOOKIE(vlan, ifp); 4773} 4774 4775/* 4776 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 4777 */ 4778static char *caps_decoder[] = { 4779 "\20\001IPMI\002NCSI", /* 0: NBM */ 4780 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 4781 "\20\001INGRESS\002EGRESS", /* 2: switch */ 4782 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 4783 "\006HASHFILTER\007ETHOFLD", 4784 "\20\001TOE", /* 4: TOE */ 4785 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 4786 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 4787 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 4788 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 4789 "\007T10DIF" 4790 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 4791 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ 4792 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 4793 "\004PO_INITIATOR\005PO_TARGET", 4794}; 4795 4796void 4797t4_sysctls(struct adapter *sc) 4798{ 4799 struct sysctl_ctx_list *ctx; 4800 struct sysctl_oid *oid; 4801 struct sysctl_oid_list *children, *c0; 4802 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4803 4804 ctx = device_get_sysctl_ctx(sc->dev); 4805 4806 /* 4807 * dev.t4nex.X. 4808 */ 4809 oid = device_get_sysctl_tree(sc->dev); 4810 c0 = children = SYSCTL_CHILDREN(oid); 4811 4812 sc->sc_do_rxcopy = 1; 4813 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4814 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4815 4816 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4817 sc->params.nports, "# of ports"); 4818 4819 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4820 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4821 sysctl_bitfield, "A", "available doorbells"); 4822 4823 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4824 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4825 4826 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4827 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4828 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4829 "interrupt holdoff timer values (us)"); 4830 4831 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4832 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4833 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4834 "interrupt holdoff packet counter values"); 4835 4836 t4_sge_sysctls(sc, ctx, children); 4837 4838 sc->lro_timeout = 100; 4839 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4840 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4841 4842 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 4843 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4844 4845 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 4846 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 4847 4848 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4849 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4850 4851 if (sc->flags & IS_VF) 4852 return; 4853 4854 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4855 NULL, chip_rev(sc), "chip hardware revision"); 4856 4857 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 4858 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 4859 4860 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 4861 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 4862 4863 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 4864 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 4865 4866 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 4867 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 4868 4869 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 4870 sc->er_version, 0, "expansion ROM version"); 4871 4872 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 4873 sc->bs_version, 0, "bootstrap firmware version"); 4874 4875 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 4876 NULL, sc->params.scfg_vers, "serial config version"); 4877 4878 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 4879 NULL, sc->params.vpd_vers, "VPD version"); 4880 4881 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4882 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4883 4884 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4885 sc->cfcsum, "config file checksum"); 4886 4887#define SYSCTL_CAP(name, n, text) \ 4888 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 4889 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 4890 sysctl_bitfield, "A", "available " text " capabilities") 4891 4892 SYSCTL_CAP(nbmcaps, 0, "NBM"); 4893 SYSCTL_CAP(linkcaps, 1, "link"); 4894 SYSCTL_CAP(switchcaps, 2, "switch"); 4895 SYSCTL_CAP(niccaps, 3, "NIC"); 4896 SYSCTL_CAP(toecaps, 4, "TCP offload"); 4897 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 4898 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 4899 SYSCTL_CAP(cryptocaps, 7, "crypto"); 4900 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 4901#undef SYSCTL_CAP 4902 4903 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4904 NULL, sc->tids.nftids, "number of filters"); 4905 4906 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4907 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4908 "chip temperature (in Celsius)"); 4909 4910#ifdef SBUF_DRAIN 4911 /* 4912 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4913 */ 4914 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4915 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4916 "logs and miscellaneous information"); 4917 children = SYSCTL_CHILDREN(oid); 4918 4919 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4920 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4921 sysctl_cctrl, "A", "congestion control"); 4922 4923 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4924 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4925 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4926 4927 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4928 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4929 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4930 4931 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4932 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4933 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4934 4935 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4936 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4937 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4938 4939 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4940 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4941 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4942 4943 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4944 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4945 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4946 4947 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4948 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4949 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 4950 "A", "CIM logic analyzer"); 4951 4952 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4953 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4954 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4955 4956 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4957 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4958 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4959 4960 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4961 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4962 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4963 4964 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4965 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4966 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4967 4968 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4969 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4970 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4971 4972 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4973 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4974 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4975 4976 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4977 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4978 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4979 4980 if (chip_id(sc) > CHELSIO_T4) { 4981 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4982 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4983 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4984 4985 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4986 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4987 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4988 } 4989 4990 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4991 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4992 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4993 4994 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4995 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4996 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4997 4998 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4999 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5000 sysctl_cpl_stats, "A", "CPL statistics"); 5001 5002 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 5003 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5004 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 5005 5006 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 5007 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5008 sysctl_devlog, "A", "firmware's device log"); 5009 5010 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 5011 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5012 sysctl_fcoe_stats, "A", "FCoE statistics"); 5013 5014 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 5015 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5016 sysctl_hw_sched, "A", "hardware scheduler "); 5017 5018 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 5019 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5020 sysctl_l2t, "A", "hardware L2 table"); 5021 5022 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 5023 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5024 sysctl_lb_stats, "A", "loopback statistics"); 5025 5026 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 5027 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5028 sysctl_meminfo, "A", "memory regions"); 5029 5030 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 5031 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5032 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 5033 "A", "MPS TCAM entries"); 5034 5035 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 5036 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5037 sysctl_path_mtus, "A", "path MTUs"); 5038 5039 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 5040 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5041 sysctl_pm_stats, "A", "PM statistics"); 5042 5043 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 5044 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5045 sysctl_rdma_stats, "A", "RDMA statistics"); 5046 5047 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 5048 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5049 sysctl_tcp_stats, "A", "TCP statistics"); 5050 5051 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 5052 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5053 sysctl_tids, "A", "TID information"); 5054 5055 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 5056 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5057 sysctl_tp_err_stats, "A", "TP error statistics"); 5058 5059 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 5060 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 5061 "TP logic analyzer event capture mask"); 5062 5063 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 5064 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5065 sysctl_tp_la, "A", "TP logic analyzer"); 5066 5067 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 5068 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5069 sysctl_tx_rate, "A", "Tx rate"); 5070 5071 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 5072 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5073 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 5074 5075 if (chip_id(sc) >= CHELSIO_T5) { 5076 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 5077 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5078 sysctl_wcwr_stats, "A", "write combined work requests"); 5079 } 5080#endif 5081 5082#ifdef TCP_OFFLOAD 5083 if (is_offload(sc)) { 5084 /* 5085 * dev.t4nex.X.toe. 5086 */ 5087 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 5088 NULL, "TOE parameters"); 5089 children = SYSCTL_CHILDREN(oid); 5090 5091 sc->tt.sndbuf = 256 * 1024; 5092 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 5093 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 5094 5095 sc->tt.ddp = 0; 5096 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 5097 &sc->tt.ddp, 0, "DDP allowed"); 5098 5099 sc->tt.rx_coalesce = 1; 5100 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 5101 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 5102 5103 sc->tt.tx_align = 1; 5104 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 5105 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 5106 5107 sc->tt.tx_zcopy = 0; 5108 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", 5109 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, 5110 "Enable zero-copy aio_write(2)"); 5111 5112 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 5113 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 5114 "TP timer tick (us)"); 5115 5116 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 5117 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 5118 "TCP timestamp tick (us)"); 5119 5120 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 5121 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 5122 "DACK tick (us)"); 5123 5124 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 5125 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 5126 "IU", "DACK timer (us)"); 5127 5128 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 5129 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 5130 sysctl_tp_timer, "LU", "Retransmit min (us)"); 5131 5132 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 5133 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 5134 sysctl_tp_timer, "LU", "Retransmit max (us)"); 5135 5136 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 5137 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 5138 sysctl_tp_timer, "LU", "Persist timer min (us)"); 5139 5140 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 5141 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 5142 sysctl_tp_timer, "LU", "Persist timer max (us)"); 5143 5144 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 5145 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 5146 sysctl_tp_timer, "LU", "Keepidle idle timer (us)"); 5147 5148 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl", 5149 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 5150 sysctl_tp_timer, "LU", "Keepidle interval (us)"); 5151 5152 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 5153 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 5154 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 5155 5156 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 5157 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 5158 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 5159 } 5160#endif 5161} 5162 5163void 5164vi_sysctls(struct vi_info *vi) 5165{ 5166 struct sysctl_ctx_list *ctx; 5167 struct sysctl_oid *oid; 5168 struct sysctl_oid_list *children; 5169 5170 ctx = device_get_sysctl_ctx(vi->dev); 5171 5172 /* 5173 * dev.v?(cxgbe|cxl).X. 5174 */ 5175 oid = device_get_sysctl_tree(vi->dev); 5176 children = SYSCTL_CHILDREN(oid); 5177 5178 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 5179 vi->viid, "VI identifer"); 5180 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 5181 &vi->nrxq, 0, "# of rx queues"); 5182 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 5183 &vi->ntxq, 0, "# of tx queues"); 5184 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 5185 &vi->first_rxq, 0, "index of first rx queue"); 5186 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 5187 &vi->first_txq, 0, "index of first tx queue"); 5188 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 5189 vi->rss_size, "size of RSS indirection table"); 5190 5191 if (IS_MAIN_VI(vi)) { 5192 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 5193 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 5194 "Reserve queue 0 for non-flowid packets"); 5195 } 5196 5197#ifdef TCP_OFFLOAD 5198 if (vi->nofldrxq != 0) { 5199 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 5200 &vi->nofldrxq, 0, 5201 "# of rx queues for offloaded TCP connections"); 5202 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 5203 &vi->nofldtxq, 0, 5204 "# of tx queues for offloaded TCP connections"); 5205 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 5206 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 5207 "index of first TOE rx queue"); 5208 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 5209 CTLFLAG_RD, &vi->first_ofld_txq, 0, 5210 "index of first TOE tx queue"); 5211 } 5212#endif 5213#ifdef DEV_NETMAP 5214 if (vi->nnmrxq != 0) { 5215 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 5216 &vi->nnmrxq, 0, "# of netmap rx queues"); 5217 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 5218 &vi->nnmtxq, 0, "# of netmap tx queues"); 5219 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 5220 CTLFLAG_RD, &vi->first_nm_rxq, 0, 5221 "index of first netmap rx queue"); 5222 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 5223 CTLFLAG_RD, &vi->first_nm_txq, 0, 5224 "index of first netmap tx queue"); 5225 } 5226#endif 5227 5228 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 5229 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 5230 "holdoff timer index"); 5231 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 5232 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 5233 "holdoff packet counter index"); 5234 5235 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 5236 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 5237 "rx queue size"); 5238 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 5239 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 5240 "tx queue size"); 5241} 5242 5243static void 5244cxgbe_sysctls(struct port_info *pi) 5245{ 5246 struct sysctl_ctx_list *ctx; 5247 struct sysctl_oid *oid; 5248 struct sysctl_oid_list *children, *children2; 5249 struct adapter *sc = pi->adapter; 5250 int i; 5251 char name[16]; 5252 5253 ctx = device_get_sysctl_ctx(pi->dev); 5254 5255 /* 5256 * dev.cxgbe.X. 5257 */ 5258 oid = device_get_sysctl_tree(pi->dev); 5259 children = SYSCTL_CHILDREN(oid); 5260 5261 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5262 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5263 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5264 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5265 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5266 "PHY temperature (in Celsius)"); 5267 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5268 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5269 "PHY firmware version"); 5270 } 5271 5272 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5273 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 5274 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5275 5276 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5277 port_top_speed(pi), "max speed (in Gbps)"); 5278 5279 if (sc->flags & IS_VF) 5280 return; 5281 5282 /* 5283 * dev.(cxgbe|cxl).X.tc. 5284 */ 5285 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 5286 "Tx scheduler traffic classes"); 5287 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 5288 struct tx_sched_class *tc = &pi->tc[i]; 5289 5290 snprintf(name, sizeof(name), "%d", i); 5291 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 5292 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 5293 "traffic class")); 5294 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 5295 &tc->flags, 0, "flags"); 5296 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 5297 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 5298#ifdef SBUF_DRAIN 5299 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 5300 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 5301 sysctl_tc_params, "A", "traffic class parameters"); 5302#endif 5303 } 5304 5305 /* 5306 * dev.cxgbe.X.stats. 5307 */ 5308 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5309 NULL, "port statistics"); 5310 children = SYSCTL_CHILDREN(oid); 5311 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5312 &pi->tx_parse_error, 0, 5313 "# of tx packets with invalid length or # of segments"); 5314 5315#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5316 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5317 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5318 sysctl_handle_t4_reg64, "QU", desc) 5319 5320 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5321 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5322 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5323 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5324 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5325 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5326 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5327 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5328 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5329 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5330 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5331 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5332 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5333 "# of tx frames in this range", 5334 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5335 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5336 "# of tx frames in this range", 5337 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5338 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5339 "# of tx frames in this range", 5340 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5341 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5342 "# of tx frames in this range", 5343 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5344 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5345 "# of tx frames in this range", 5346 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5347 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5348 "# of tx frames in this range", 5349 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5350 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5351 "# of tx frames in this range", 5352 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5353 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5354 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5355 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5356 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5357 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5358 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5359 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5360 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5361 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5362 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5363 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5364 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5365 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5366 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5367 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5368 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5369 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5370 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5371 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5372 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5373 5374 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5375 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5376 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5377 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5378 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5379 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5380 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5381 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5382 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5383 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5384 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5385 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5386 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5387 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5388 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5389 "# of frames received with bad FCS", 5390 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5391 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5392 "# of frames received with length error", 5393 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5394 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5395 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5396 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5397 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5398 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5399 "# of rx frames in this range", 5400 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5401 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5402 "# of rx frames in this range", 5403 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5404 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5405 "# of rx frames in this range", 5406 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5407 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5408 "# of rx frames in this range", 5409 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5410 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5411 "# of rx frames in this range", 5412 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5413 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5414 "# of rx frames in this range", 5415 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5416 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5417 "# of rx frames in this range", 5418 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5419 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5420 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5421 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5422 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5423 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5424 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5425 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5426 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5427 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5428 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5429 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5430 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5431 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5432 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5433 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5434 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5435 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5436 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5437 5438#undef SYSCTL_ADD_T4_REG64 5439 5440#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5441 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5442 &pi->stats.name, desc) 5443 5444 /* We get these from port_stats and they may be stale by up to 1s */ 5445 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5446 "# drops due to buffer-group 0 overflows"); 5447 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5448 "# drops due to buffer-group 1 overflows"); 5449 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5450 "# drops due to buffer-group 2 overflows"); 5451 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5452 "# drops due to buffer-group 3 overflows"); 5453 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5454 "# of buffer-group 0 truncated packets"); 5455 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5456 "# of buffer-group 1 truncated packets"); 5457 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5458 "# of buffer-group 2 truncated packets"); 5459 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5460 "# of buffer-group 3 truncated packets"); 5461 5462#undef SYSCTL_ADD_T4_PORTSTAT 5463} 5464 5465static int 5466sysctl_int_array(SYSCTL_HANDLER_ARGS) 5467{ 5468 int rc, *i, space = 0; 5469 struct sbuf sb; 5470 5471 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5472 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5473 if (space) 5474 sbuf_printf(&sb, " "); 5475 sbuf_printf(&sb, "%d", *i); 5476 space = 1; 5477 } 5478 rc = sbuf_finish(&sb); 5479 sbuf_delete(&sb); 5480 return (rc); 5481} 5482 5483static int 5484sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5485{ 5486 int rc; 5487 struct sbuf *sb; 5488 5489 rc = sysctl_wire_old_buffer(req, 0); 5490 if (rc != 0) 5491 return(rc); 5492 5493 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5494 if (sb == NULL) 5495 return (ENOMEM); 5496 5497 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5498 rc = sbuf_finish(sb); 5499 sbuf_delete(sb); 5500 5501 return (rc); 5502} 5503 5504static int 5505sysctl_btphy(SYSCTL_HANDLER_ARGS) 5506{ 5507 struct port_info *pi = arg1; 5508 int op = arg2; 5509 struct adapter *sc = pi->adapter; 5510 u_int v; 5511 int rc; 5512 5513 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5514 if (rc) 5515 return (rc); 5516 /* XXX: magic numbers */ 5517 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5518 &v); 5519 end_synchronized_op(sc, 0); 5520 if (rc) 5521 return (rc); 5522 if (op == 0) 5523 v /= 256; 5524 5525 rc = sysctl_handle_int(oidp, &v, 0, req); 5526 return (rc); 5527} 5528 5529static int 5530sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5531{ 5532 struct vi_info *vi = arg1; 5533 int rc, val; 5534 5535 val = vi->rsrv_noflowq; 5536 rc = sysctl_handle_int(oidp, &val, 0, req); 5537 if (rc != 0 || req->newptr == NULL) 5538 return (rc); 5539 5540 if ((val >= 1) && (vi->ntxq > 1)) 5541 vi->rsrv_noflowq = 1; 5542 else 5543 vi->rsrv_noflowq = 0; 5544 5545 return (rc); 5546} 5547 5548static int 5549sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5550{ 5551 struct vi_info *vi = arg1; 5552 struct adapter *sc = vi->pi->adapter; 5553 int idx, rc, i; 5554 struct sge_rxq *rxq; 5555#ifdef TCP_OFFLOAD 5556 struct sge_ofld_rxq *ofld_rxq; 5557#endif 5558 uint8_t v; 5559 5560 idx = vi->tmr_idx; 5561 5562 rc = sysctl_handle_int(oidp, &idx, 0, req); 5563 if (rc != 0 || req->newptr == NULL) 5564 return (rc); 5565 5566 if (idx < 0 || idx >= SGE_NTIMERS) 5567 return (EINVAL); 5568 5569 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5570 "t4tmr"); 5571 if (rc) 5572 return (rc); 5573 5574 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5575 for_each_rxq(vi, i, rxq) { 5576#ifdef atomic_store_rel_8 5577 atomic_store_rel_8(&rxq->iq.intr_params, v); 5578#else 5579 rxq->iq.intr_params = v; 5580#endif 5581 } 5582#ifdef TCP_OFFLOAD 5583 for_each_ofld_rxq(vi, i, ofld_rxq) { 5584#ifdef atomic_store_rel_8 5585 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5586#else 5587 ofld_rxq->iq.intr_params = v; 5588#endif 5589 } 5590#endif 5591 vi->tmr_idx = idx; 5592 5593 end_synchronized_op(sc, LOCK_HELD); 5594 return (0); 5595} 5596 5597static int 5598sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5599{ 5600 struct vi_info *vi = arg1; 5601 struct adapter *sc = vi->pi->adapter; 5602 int idx, rc; 5603 5604 idx = vi->pktc_idx; 5605 5606 rc = sysctl_handle_int(oidp, &idx, 0, req); 5607 if (rc != 0 || req->newptr == NULL) 5608 return (rc); 5609 5610 if (idx < -1 || idx >= SGE_NCOUNTERS) 5611 return (EINVAL); 5612 5613 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5614 "t4pktc"); 5615 if (rc) 5616 return (rc); 5617 5618 if (vi->flags & VI_INIT_DONE) 5619 rc = EBUSY; /* cannot be changed once the queues are created */ 5620 else 5621 vi->pktc_idx = idx; 5622 5623 end_synchronized_op(sc, LOCK_HELD); 5624 return (rc); 5625} 5626 5627static int 5628sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5629{ 5630 struct vi_info *vi = arg1; 5631 struct adapter *sc = vi->pi->adapter; 5632 int qsize, rc; 5633 5634 qsize = vi->qsize_rxq; 5635 5636 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5637 if (rc != 0 || req->newptr == NULL) 5638 return (rc); 5639 5640 if (qsize < 128 || (qsize & 7)) 5641 return (EINVAL); 5642 5643 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5644 "t4rxqs"); 5645 if (rc) 5646 return (rc); 5647 5648 if (vi->flags & VI_INIT_DONE) 5649 rc = EBUSY; /* cannot be changed once the queues are created */ 5650 else 5651 vi->qsize_rxq = qsize; 5652 5653 end_synchronized_op(sc, LOCK_HELD); 5654 return (rc); 5655} 5656 5657static int 5658sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5659{ 5660 struct vi_info *vi = arg1; 5661 struct adapter *sc = vi->pi->adapter; 5662 int qsize, rc; 5663 5664 qsize = vi->qsize_txq; 5665 5666 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5667 if (rc != 0 || req->newptr == NULL) 5668 return (rc); 5669 5670 if (qsize < 128 || qsize > 65536) 5671 return (EINVAL); 5672 5673 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5674 "t4txqs"); 5675 if (rc) 5676 return (rc); 5677 5678 if (vi->flags & VI_INIT_DONE) 5679 rc = EBUSY; /* cannot be changed once the queues are created */ 5680 else 5681 vi->qsize_txq = qsize; 5682 5683 end_synchronized_op(sc, LOCK_HELD); 5684 return (rc); 5685} 5686 5687static int 5688sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5689{ 5690 struct port_info *pi = arg1; 5691 struct adapter *sc = pi->adapter; 5692 struct link_config *lc = &pi->link_cfg; 5693 int rc; 5694 5695 if (req->newptr == NULL) { 5696 struct sbuf *sb; 5697 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5698 5699 rc = sysctl_wire_old_buffer(req, 0); 5700 if (rc != 0) 5701 return(rc); 5702 5703 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5704 if (sb == NULL) 5705 return (ENOMEM); 5706 5707 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5708 rc = sbuf_finish(sb); 5709 sbuf_delete(sb); 5710 } else { 5711 char s[2]; 5712 int n; 5713 5714 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5715 s[1] = 0; 5716 5717 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5718 if (rc != 0) 5719 return(rc); 5720 5721 if (s[1] != 0) 5722 return (EINVAL); 5723 if (s[0] < '0' || s[0] > '9') 5724 return (EINVAL); /* not a number */ 5725 n = s[0] - '0'; 5726 if (n & ~(PAUSE_TX | PAUSE_RX)) 5727 return (EINVAL); /* some other bit is set too */ 5728 5729 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5730 "t4PAUSE"); 5731 if (rc) 5732 return (rc); 5733 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5734 int link_ok = lc->link_ok; 5735 5736 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5737 lc->requested_fc |= n; 5738 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5739 lc->link_ok = link_ok; /* restore */ 5740 } 5741 end_synchronized_op(sc, 0); 5742 } 5743 5744 return (rc); 5745} 5746 5747static int 5748sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5749{ 5750 struct adapter *sc = arg1; 5751 int reg = arg2; 5752 uint64_t val; 5753 5754 val = t4_read_reg64(sc, reg); 5755 5756 return (sysctl_handle_64(oidp, &val, 0, req)); 5757} 5758 5759static int 5760sysctl_temperature(SYSCTL_HANDLER_ARGS) 5761{ 5762 struct adapter *sc = arg1; 5763 int rc, t; 5764 uint32_t param, val; 5765 5766 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5767 if (rc) 5768 return (rc); 5769 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5770 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5771 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5772 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5773 end_synchronized_op(sc, 0); 5774 if (rc) 5775 return (rc); 5776 5777 /* unknown is returned as 0 but we display -1 in that case */ 5778 t = val == 0 ? -1 : val; 5779 5780 rc = sysctl_handle_int(oidp, &t, 0, req); 5781 return (rc); 5782} 5783 5784#ifdef SBUF_DRAIN 5785static int 5786sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5787{ 5788 struct adapter *sc = arg1; 5789 struct sbuf *sb; 5790 int rc, i; 5791 uint16_t incr[NMTUS][NCCTRL_WIN]; 5792 static const char *dec_fac[] = { 5793 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5794 "0.9375" 5795 }; 5796 5797 rc = sysctl_wire_old_buffer(req, 0); 5798 if (rc != 0) 5799 return (rc); 5800 5801 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5802 if (sb == NULL) 5803 return (ENOMEM); 5804 5805 t4_read_cong_tbl(sc, incr); 5806 5807 for (i = 0; i < NCCTRL_WIN; ++i) { 5808 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5809 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5810 incr[5][i], incr[6][i], incr[7][i]); 5811 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5812 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5813 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5814 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5815 } 5816 5817 rc = sbuf_finish(sb); 5818 sbuf_delete(sb); 5819 5820 return (rc); 5821} 5822 5823static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5824 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5825 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5826 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5827}; 5828 5829static int 5830sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5831{ 5832 struct adapter *sc = arg1; 5833 struct sbuf *sb; 5834 int rc, i, n, qid = arg2; 5835 uint32_t *buf, *p; 5836 char *qtype; 5837 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5838 5839 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5840 ("%s: bad qid %d\n", __func__, qid)); 5841 5842 if (qid < CIM_NUM_IBQ) { 5843 /* inbound queue */ 5844 qtype = "IBQ"; 5845 n = 4 * CIM_IBQ_SIZE; 5846 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5847 rc = t4_read_cim_ibq(sc, qid, buf, n); 5848 } else { 5849 /* outbound queue */ 5850 qtype = "OBQ"; 5851 qid -= CIM_NUM_IBQ; 5852 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5853 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5854 rc = t4_read_cim_obq(sc, qid, buf, n); 5855 } 5856 5857 if (rc < 0) { 5858 rc = -rc; 5859 goto done; 5860 } 5861 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5862 5863 rc = sysctl_wire_old_buffer(req, 0); 5864 if (rc != 0) 5865 goto done; 5866 5867 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5868 if (sb == NULL) { 5869 rc = ENOMEM; 5870 goto done; 5871 } 5872 5873 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5874 for (i = 0, p = buf; i < n; i += 16, p += 4) 5875 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5876 p[2], p[3]); 5877 5878 rc = sbuf_finish(sb); 5879 sbuf_delete(sb); 5880done: 5881 free(buf, M_CXGBE); 5882 return (rc); 5883} 5884 5885static int 5886sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5887{ 5888 struct adapter *sc = arg1; 5889 u_int cfg; 5890 struct sbuf *sb; 5891 uint32_t *buf, *p; 5892 int rc; 5893 5894 MPASS(chip_id(sc) <= CHELSIO_T5); 5895 5896 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5897 if (rc != 0) 5898 return (rc); 5899 5900 rc = sysctl_wire_old_buffer(req, 0); 5901 if (rc != 0) 5902 return (rc); 5903 5904 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5905 if (sb == NULL) 5906 return (ENOMEM); 5907 5908 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5909 M_ZERO | M_WAITOK); 5910 5911 rc = -t4_cim_read_la(sc, buf, NULL); 5912 if (rc != 0) 5913 goto done; 5914 5915 sbuf_printf(sb, "Status Data PC%s", 5916 cfg & F_UPDBGLACAPTPCONLY ? "" : 5917 " LS0Stat LS0Addr LS0Data"); 5918 5919 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 5920 if (cfg & F_UPDBGLACAPTPCONLY) { 5921 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5922 p[6], p[7]); 5923 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5924 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5925 p[4] & 0xff, p[5] >> 8); 5926 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5927 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5928 p[1] & 0xf, p[2] >> 4); 5929 } else { 5930 sbuf_printf(sb, 5931 "\n %02x %x%07x %x%07x %08x %08x " 5932 "%08x%08x%08x%08x", 5933 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5934 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5935 p[6], p[7]); 5936 } 5937 } 5938 5939 rc = sbuf_finish(sb); 5940 sbuf_delete(sb); 5941done: 5942 free(buf, M_CXGBE); 5943 return (rc); 5944} 5945 5946static int 5947sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 5948{ 5949 struct adapter *sc = arg1; 5950 u_int cfg; 5951 struct sbuf *sb; 5952 uint32_t *buf, *p; 5953 int rc; 5954 5955 MPASS(chip_id(sc) > CHELSIO_T5); 5956 5957 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5958 if (rc != 0) 5959 return (rc); 5960 5961 rc = sysctl_wire_old_buffer(req, 0); 5962 if (rc != 0) 5963 return (rc); 5964 5965 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5966 if (sb == NULL) 5967 return (ENOMEM); 5968 5969 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5970 M_ZERO | M_WAITOK); 5971 5972 rc = -t4_cim_read_la(sc, buf, NULL); 5973 if (rc != 0) 5974 goto done; 5975 5976 sbuf_printf(sb, "Status Inst Data PC%s", 5977 cfg & F_UPDBGLACAPTPCONLY ? "" : 5978 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 5979 5980 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 5981 if (cfg & F_UPDBGLACAPTPCONLY) { 5982 sbuf_printf(sb, "\n %02x %08x %08x %08x", 5983 p[3] & 0xff, p[2], p[1], p[0]); 5984 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 5985 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 5986 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 5987 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 5988 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 5989 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 5990 p[6] >> 16); 5991 } else { 5992 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 5993 "%08x %08x %08x %08x %08x %08x", 5994 (p[9] >> 16) & 0xff, 5995 p[9] & 0xffff, p[8] >> 16, 5996 p[8] & 0xffff, p[7] >> 16, 5997 p[7] & 0xffff, p[6] >> 16, 5998 p[2], p[1], p[0], p[5], p[4], p[3]); 5999 } 6000 } 6001 6002 rc = sbuf_finish(sb); 6003 sbuf_delete(sb); 6004done: 6005 free(buf, M_CXGBE); 6006 return (rc); 6007} 6008 6009static int 6010sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 6011{ 6012 struct adapter *sc = arg1; 6013 u_int i; 6014 struct sbuf *sb; 6015 uint32_t *buf, *p; 6016 int rc; 6017 6018 rc = sysctl_wire_old_buffer(req, 0); 6019 if (rc != 0) 6020 return (rc); 6021 6022 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6023 if (sb == NULL) 6024 return (ENOMEM); 6025 6026 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 6027 M_ZERO | M_WAITOK); 6028 6029 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 6030 p = buf; 6031 6032 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6033 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 6034 p[1], p[0]); 6035 } 6036 6037 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 6038 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6039 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 6040 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 6041 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 6042 (p[1] >> 2) | ((p[2] & 3) << 30), 6043 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 6044 p[0] & 1); 6045 } 6046 6047 rc = sbuf_finish(sb); 6048 sbuf_delete(sb); 6049 free(buf, M_CXGBE); 6050 return (rc); 6051} 6052 6053static int 6054sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 6055{ 6056 struct adapter *sc = arg1; 6057 u_int i; 6058 struct sbuf *sb; 6059 uint32_t *buf, *p; 6060 int rc; 6061 6062 rc = sysctl_wire_old_buffer(req, 0); 6063 if (rc != 0) 6064 return (rc); 6065 6066 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6067 if (sb == NULL) 6068 return (ENOMEM); 6069 6070 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 6071 M_ZERO | M_WAITOK); 6072 6073 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 6074 p = buf; 6075 6076 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 6077 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6078 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 6079 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 6080 p[4], p[3], p[2], p[1], p[0]); 6081 } 6082 6083 sbuf_printf(sb, "\n\nCntl ID Data"); 6084 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6085 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 6086 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 6087 } 6088 6089 rc = sbuf_finish(sb); 6090 sbuf_delete(sb); 6091 free(buf, M_CXGBE); 6092 return (rc); 6093} 6094 6095static int 6096sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 6097{ 6098 struct adapter *sc = arg1; 6099 struct sbuf *sb; 6100 int rc, i; 6101 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6102 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6103 uint16_t thres[CIM_NUM_IBQ]; 6104 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 6105 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 6106 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 6107 6108 cim_num_obq = sc->chip_params->cim_num_obq; 6109 if (is_t4(sc)) { 6110 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 6111 obq_rdaddr = A_UP_OBQ_0_REALADDR; 6112 } else { 6113 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 6114 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 6115 } 6116 nq = CIM_NUM_IBQ + cim_num_obq; 6117 6118 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 6119 if (rc == 0) 6120 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 6121 if (rc != 0) 6122 return (rc); 6123 6124 t4_read_cimq_cfg(sc, base, size, thres); 6125 6126 rc = sysctl_wire_old_buffer(req, 0); 6127 if (rc != 0) 6128 return (rc); 6129 6130 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6131 if (sb == NULL) 6132 return (ENOMEM); 6133 6134 sbuf_printf(sb, 6135 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 6136 6137 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 6138 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 6139 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 6140 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6141 G_QUEREMFLITS(p[2]) * 16); 6142 for ( ; i < nq; i++, p += 4, wr += 2) 6143 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 6144 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 6145 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6146 G_QUEREMFLITS(p[2]) * 16); 6147 6148 rc = sbuf_finish(sb); 6149 sbuf_delete(sb); 6150 6151 return (rc); 6152} 6153 6154static int 6155sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 6156{ 6157 struct adapter *sc = arg1; 6158 struct sbuf *sb; 6159 int rc; 6160 struct tp_cpl_stats stats; 6161 6162 rc = sysctl_wire_old_buffer(req, 0); 6163 if (rc != 0) 6164 return (rc); 6165 6166 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6167 if (sb == NULL) 6168 return (ENOMEM); 6169 6170 mtx_lock(&sc->reg_lock); 6171 t4_tp_get_cpl_stats(sc, &stats); 6172 mtx_unlock(&sc->reg_lock); 6173 6174 if (sc->chip_params->nchan > 2) { 6175 sbuf_printf(sb, " channel 0 channel 1" 6176 " channel 2 channel 3"); 6177 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 6178 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 6179 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 6180 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 6181 } else { 6182 sbuf_printf(sb, " channel 0 channel 1"); 6183 sbuf_printf(sb, "\nCPL requests: %10u %10u", 6184 stats.req[0], stats.req[1]); 6185 sbuf_printf(sb, "\nCPL responses: %10u %10u", 6186 stats.rsp[0], stats.rsp[1]); 6187 } 6188 6189 rc = sbuf_finish(sb); 6190 sbuf_delete(sb); 6191 6192 return (rc); 6193} 6194 6195static int 6196sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 6197{ 6198 struct adapter *sc = arg1; 6199 struct sbuf *sb; 6200 int rc; 6201 struct tp_usm_stats stats; 6202 6203 rc = sysctl_wire_old_buffer(req, 0); 6204 if (rc != 0) 6205 return(rc); 6206 6207 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6208 if (sb == NULL) 6209 return (ENOMEM); 6210 6211 t4_get_usm_stats(sc, &stats); 6212 6213 sbuf_printf(sb, "Frames: %u\n", stats.frames); 6214 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 6215 sbuf_printf(sb, "Drops: %u", stats.drops); 6216 6217 rc = sbuf_finish(sb); 6218 sbuf_delete(sb); 6219 6220 return (rc); 6221} 6222 6223static const char * const devlog_level_strings[] = { 6224 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 6225 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 6226 [FW_DEVLOG_LEVEL_ERR] = "ERR", 6227 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 6228 [FW_DEVLOG_LEVEL_INFO] = "INFO", 6229 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 6230}; 6231 6232static const char * const devlog_facility_strings[] = { 6233 [FW_DEVLOG_FACILITY_CORE] = "CORE", 6234 [FW_DEVLOG_FACILITY_CF] = "CF", 6235 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 6236 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 6237 [FW_DEVLOG_FACILITY_RES] = "RES", 6238 [FW_DEVLOG_FACILITY_HW] = "HW", 6239 [FW_DEVLOG_FACILITY_FLR] = "FLR", 6240 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 6241 [FW_DEVLOG_FACILITY_PHY] = "PHY", 6242 [FW_DEVLOG_FACILITY_MAC] = "MAC", 6243 [FW_DEVLOG_FACILITY_PORT] = "PORT", 6244 [FW_DEVLOG_FACILITY_VI] = "VI", 6245 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6246 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6247 [FW_DEVLOG_FACILITY_TM] = "TM", 6248 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6249 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6250 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6251 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6252 [FW_DEVLOG_FACILITY_RI] = "RI", 6253 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6254 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6255 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6256 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6257 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6258}; 6259 6260static int 6261sysctl_devlog(SYSCTL_HANDLER_ARGS) 6262{ 6263 struct adapter *sc = arg1; 6264 struct devlog_params *dparams = &sc->params.devlog; 6265 struct fw_devlog_e *buf, *e; 6266 int i, j, rc, nentries, first = 0; 6267 struct sbuf *sb; 6268 uint64_t ftstamp = UINT64_MAX; 6269 6270 if (dparams->addr == 0) 6271 return (ENXIO); 6272 6273 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6274 if (buf == NULL) 6275 return (ENOMEM); 6276 6277 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6278 if (rc != 0) 6279 goto done; 6280 6281 nentries = dparams->size / sizeof(struct fw_devlog_e); 6282 for (i = 0; i < nentries; i++) { 6283 e = &buf[i]; 6284 6285 if (e->timestamp == 0) 6286 break; /* end */ 6287 6288 e->timestamp = be64toh(e->timestamp); 6289 e->seqno = be32toh(e->seqno); 6290 for (j = 0; j < 8; j++) 6291 e->params[j] = be32toh(e->params[j]); 6292 6293 if (e->timestamp < ftstamp) { 6294 ftstamp = e->timestamp; 6295 first = i; 6296 } 6297 } 6298 6299 if (buf[first].timestamp == 0) 6300 goto done; /* nothing in the log */ 6301 6302 rc = sysctl_wire_old_buffer(req, 0); 6303 if (rc != 0) 6304 goto done; 6305 6306 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6307 if (sb == NULL) { 6308 rc = ENOMEM; 6309 goto done; 6310 } 6311 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6312 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6313 6314 i = first; 6315 do { 6316 e = &buf[i]; 6317 if (e->timestamp == 0) 6318 break; /* end */ 6319 6320 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6321 e->seqno, e->timestamp, 6322 (e->level < nitems(devlog_level_strings) ? 6323 devlog_level_strings[e->level] : "UNKNOWN"), 6324 (e->facility < nitems(devlog_facility_strings) ? 6325 devlog_facility_strings[e->facility] : "UNKNOWN")); 6326 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6327 e->params[2], e->params[3], e->params[4], 6328 e->params[5], e->params[6], e->params[7]); 6329 6330 if (++i == nentries) 6331 i = 0; 6332 } while (i != first); 6333 6334 rc = sbuf_finish(sb); 6335 sbuf_delete(sb); 6336done: 6337 free(buf, M_CXGBE); 6338 return (rc); 6339} 6340 6341static int 6342sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6343{ 6344 struct adapter *sc = arg1; 6345 struct sbuf *sb; 6346 int rc; 6347 struct tp_fcoe_stats stats[MAX_NCHAN]; 6348 int i, nchan = sc->chip_params->nchan; 6349 6350 rc = sysctl_wire_old_buffer(req, 0); 6351 if (rc != 0) 6352 return (rc); 6353 6354 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6355 if (sb == NULL) 6356 return (ENOMEM); 6357 6358 for (i = 0; i < nchan; i++) 6359 t4_get_fcoe_stats(sc, i, &stats[i]); 6360 6361 if (nchan > 2) { 6362 sbuf_printf(sb, " channel 0 channel 1" 6363 " channel 2 channel 3"); 6364 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6365 stats[0].octets_ddp, stats[1].octets_ddp, 6366 stats[2].octets_ddp, stats[3].octets_ddp); 6367 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6368 stats[0].frames_ddp, stats[1].frames_ddp, 6369 stats[2].frames_ddp, stats[3].frames_ddp); 6370 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6371 stats[0].frames_drop, stats[1].frames_drop, 6372 stats[2].frames_drop, stats[3].frames_drop); 6373 } else { 6374 sbuf_printf(sb, " channel 0 channel 1"); 6375 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6376 stats[0].octets_ddp, stats[1].octets_ddp); 6377 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6378 stats[0].frames_ddp, stats[1].frames_ddp); 6379 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6380 stats[0].frames_drop, stats[1].frames_drop); 6381 } 6382 6383 rc = sbuf_finish(sb); 6384 sbuf_delete(sb); 6385 6386 return (rc); 6387} 6388 6389static int 6390sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6391{ 6392 struct adapter *sc = arg1; 6393 struct sbuf *sb; 6394 int rc, i; 6395 unsigned int map, kbps, ipg, mode; 6396 unsigned int pace_tab[NTX_SCHED]; 6397 6398 rc = sysctl_wire_old_buffer(req, 0); 6399 if (rc != 0) 6400 return (rc); 6401 6402 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6403 if (sb == NULL) 6404 return (ENOMEM); 6405 6406 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6407 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6408 t4_read_pace_tbl(sc, pace_tab); 6409 6410 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6411 "Class IPG (0.1 ns) Flow IPG (us)"); 6412 6413 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6414 t4_get_tx_sched(sc, i, &kbps, &ipg); 6415 sbuf_printf(sb, "\n %u %-5s %u ", i, 6416 (mode & (1 << i)) ? "flow" : "class", map & 3); 6417 if (kbps) 6418 sbuf_printf(sb, "%9u ", kbps); 6419 else 6420 sbuf_printf(sb, " disabled "); 6421 6422 if (ipg) 6423 sbuf_printf(sb, "%13u ", ipg); 6424 else 6425 sbuf_printf(sb, " disabled "); 6426 6427 if (pace_tab[i]) 6428 sbuf_printf(sb, "%10u", pace_tab[i]); 6429 else 6430 sbuf_printf(sb, " disabled"); 6431 } 6432 6433 rc = sbuf_finish(sb); 6434 sbuf_delete(sb); 6435 6436 return (rc); 6437} 6438 6439static int 6440sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6441{ 6442 struct adapter *sc = arg1; 6443 struct sbuf *sb; 6444 int rc, i, j; 6445 uint64_t *p0, *p1; 6446 struct lb_port_stats s[2]; 6447 static const char *stat_name[] = { 6448 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6449 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6450 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6451 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6452 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6453 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6454 "BG2FramesTrunc:", "BG3FramesTrunc:" 6455 }; 6456 6457 rc = sysctl_wire_old_buffer(req, 0); 6458 if (rc != 0) 6459 return (rc); 6460 6461 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6462 if (sb == NULL) 6463 return (ENOMEM); 6464 6465 memset(s, 0, sizeof(s)); 6466 6467 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6468 t4_get_lb_stats(sc, i, &s[0]); 6469 t4_get_lb_stats(sc, i + 1, &s[1]); 6470 6471 p0 = &s[0].octets; 6472 p1 = &s[1].octets; 6473 sbuf_printf(sb, "%s Loopback %u" 6474 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6475 6476 for (j = 0; j < nitems(stat_name); j++) 6477 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6478 *p0++, *p1++); 6479 } 6480 6481 rc = sbuf_finish(sb); 6482 sbuf_delete(sb); 6483 6484 return (rc); 6485} 6486 6487static int 6488sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6489{ 6490 int rc = 0; 6491 struct port_info *pi = arg1; 6492 struct sbuf *sb; 6493 6494 rc = sysctl_wire_old_buffer(req, 0); 6495 if (rc != 0) 6496 return(rc); 6497 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6498 if (sb == NULL) 6499 return (ENOMEM); 6500 6501 if (pi->linkdnrc < 0) 6502 sbuf_printf(sb, "n/a"); 6503 else 6504 sbuf_printf(sb, "%s", t4_link_down_rc_str(pi->linkdnrc)); 6505 6506 rc = sbuf_finish(sb); 6507 sbuf_delete(sb); 6508 6509 return (rc); 6510} 6511 6512struct mem_desc { 6513 unsigned int base; 6514 unsigned int limit; 6515 unsigned int idx; 6516}; 6517 6518static int 6519mem_desc_cmp(const void *a, const void *b) 6520{ 6521 return ((const struct mem_desc *)a)->base - 6522 ((const struct mem_desc *)b)->base; 6523} 6524 6525static void 6526mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6527 unsigned int to) 6528{ 6529 unsigned int size; 6530 6531 if (from == to) 6532 return; 6533 6534 size = to - from + 1; 6535 if (size == 0) 6536 return; 6537 6538 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6539 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6540} 6541 6542static int 6543sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6544{ 6545 struct adapter *sc = arg1; 6546 struct sbuf *sb; 6547 int rc, i, n; 6548 uint32_t lo, hi, used, alloc; 6549 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6550 static const char *region[] = { 6551 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6552 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6553 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6554 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6555 "RQUDP region:", "PBL region:", "TXPBL region:", 6556 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6557 "On-chip queues:" 6558 }; 6559 struct mem_desc avail[4]; 6560 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6561 struct mem_desc *md = mem; 6562 6563 rc = sysctl_wire_old_buffer(req, 0); 6564 if (rc != 0) 6565 return (rc); 6566 6567 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6568 if (sb == NULL) 6569 return (ENOMEM); 6570 6571 for (i = 0; i < nitems(mem); i++) { 6572 mem[i].limit = 0; 6573 mem[i].idx = i; 6574 } 6575 6576 /* Find and sort the populated memory ranges */ 6577 i = 0; 6578 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6579 if (lo & F_EDRAM0_ENABLE) { 6580 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6581 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6582 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6583 avail[i].idx = 0; 6584 i++; 6585 } 6586 if (lo & F_EDRAM1_ENABLE) { 6587 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6588 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6589 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6590 avail[i].idx = 1; 6591 i++; 6592 } 6593 if (lo & F_EXT_MEM_ENABLE) { 6594 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6595 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6596 avail[i].limit = avail[i].base + 6597 (G_EXT_MEM_SIZE(hi) << 20); 6598 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6599 i++; 6600 } 6601 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6602 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6603 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6604 avail[i].limit = avail[i].base + 6605 (G_EXT_MEM1_SIZE(hi) << 20); 6606 avail[i].idx = 4; 6607 i++; 6608 } 6609 if (!i) /* no memory available */ 6610 return 0; 6611 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6612 6613 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6614 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6615 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6616 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6617 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6618 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6619 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6620 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6621 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6622 6623 /* the next few have explicit upper bounds */ 6624 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6625 md->limit = md->base - 1 + 6626 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6627 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6628 md++; 6629 6630 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6631 md->limit = md->base - 1 + 6632 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6633 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6634 md++; 6635 6636 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6637 if (chip_id(sc) <= CHELSIO_T5) 6638 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6639 else 6640 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6641 md->limit = 0; 6642 } else { 6643 md->base = 0; 6644 md->idx = nitems(region); /* hide it */ 6645 } 6646 md++; 6647 6648#define ulp_region(reg) \ 6649 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6650 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6651 6652 ulp_region(RX_ISCSI); 6653 ulp_region(RX_TDDP); 6654 ulp_region(TX_TPT); 6655 ulp_region(RX_STAG); 6656 ulp_region(RX_RQ); 6657 ulp_region(RX_RQUDP); 6658 ulp_region(RX_PBL); 6659 ulp_region(TX_PBL); 6660#undef ulp_region 6661 6662 md->base = 0; 6663 md->idx = nitems(region); 6664 if (!is_t4(sc)) { 6665 uint32_t size = 0; 6666 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6667 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6668 6669 if (is_t5(sc)) { 6670 if (sge_ctrl & F_VFIFO_ENABLE) 6671 size = G_DBVFIFO_SIZE(fifo_size); 6672 } else 6673 size = G_T6_DBVFIFO_SIZE(fifo_size); 6674 6675 if (size) { 6676 md->base = G_BASEADDR(t4_read_reg(sc, 6677 A_SGE_DBVFIFO_BADDR)); 6678 md->limit = md->base + (size << 2) - 1; 6679 } 6680 } 6681 md++; 6682 6683 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6684 md->limit = 0; 6685 md++; 6686 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6687 md->limit = 0; 6688 md++; 6689 6690 md->base = sc->vres.ocq.start; 6691 if (sc->vres.ocq.size) 6692 md->limit = md->base + sc->vres.ocq.size - 1; 6693 else 6694 md->idx = nitems(region); /* hide it */ 6695 md++; 6696 6697 /* add any address-space holes, there can be up to 3 */ 6698 for (n = 0; n < i - 1; n++) 6699 if (avail[n].limit < avail[n + 1].base) 6700 (md++)->base = avail[n].limit; 6701 if (avail[n].limit) 6702 (md++)->base = avail[n].limit; 6703 6704 n = md - mem; 6705 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6706 6707 for (lo = 0; lo < i; lo++) 6708 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6709 avail[lo].limit - 1); 6710 6711 sbuf_printf(sb, "\n"); 6712 for (i = 0; i < n; i++) { 6713 if (mem[i].idx >= nitems(region)) 6714 continue; /* skip holes */ 6715 if (!mem[i].limit) 6716 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6717 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6718 mem[i].limit); 6719 } 6720 6721 sbuf_printf(sb, "\n"); 6722 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6723 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6724 mem_region_show(sb, "uP RAM:", lo, hi); 6725 6726 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6727 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6728 mem_region_show(sb, "uP Extmem2:", lo, hi); 6729 6730 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6731 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6732 G_PMRXMAXPAGE(lo), 6733 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6734 (lo & F_PMRXNUMCHN) ? 2 : 1); 6735 6736 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6737 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6738 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6739 G_PMTXMAXPAGE(lo), 6740 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6741 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6742 sbuf_printf(sb, "%u p-structs\n", 6743 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6744 6745 for (i = 0; i < 4; i++) { 6746 if (chip_id(sc) > CHELSIO_T5) 6747 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6748 else 6749 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6750 if (is_t5(sc)) { 6751 used = G_T5_USED(lo); 6752 alloc = G_T5_ALLOC(lo); 6753 } else { 6754 used = G_USED(lo); 6755 alloc = G_ALLOC(lo); 6756 } 6757 /* For T6 these are MAC buffer groups */ 6758 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6759 i, used, alloc); 6760 } 6761 for (i = 0; i < sc->chip_params->nchan; i++) { 6762 if (chip_id(sc) > CHELSIO_T5) 6763 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6764 else 6765 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6766 if (is_t5(sc)) { 6767 used = G_T5_USED(lo); 6768 alloc = G_T5_ALLOC(lo); 6769 } else { 6770 used = G_USED(lo); 6771 alloc = G_ALLOC(lo); 6772 } 6773 /* For T6 these are MAC buffer groups */ 6774 sbuf_printf(sb, 6775 "\nLoopback %d using %u pages out of %u allocated", 6776 i, used, alloc); 6777 } 6778 6779 rc = sbuf_finish(sb); 6780 sbuf_delete(sb); 6781 6782 return (rc); 6783} 6784 6785static inline void 6786tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6787{ 6788 *mask = x | y; 6789 y = htobe64(y); 6790 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6791} 6792 6793static int 6794sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6795{ 6796 struct adapter *sc = arg1; 6797 struct sbuf *sb; 6798 int rc, i; 6799 6800 MPASS(chip_id(sc) <= CHELSIO_T5); 6801 6802 rc = sysctl_wire_old_buffer(req, 0); 6803 if (rc != 0) 6804 return (rc); 6805 6806 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6807 if (sb == NULL) 6808 return (ENOMEM); 6809 6810 sbuf_printf(sb, 6811 "Idx Ethernet address Mask Vld Ports PF" 6812 " VF Replication P0 P1 P2 P3 ML"); 6813 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6814 uint64_t tcamx, tcamy, mask; 6815 uint32_t cls_lo, cls_hi; 6816 uint8_t addr[ETHER_ADDR_LEN]; 6817 6818 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6819 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6820 if (tcamx & tcamy) 6821 continue; 6822 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6823 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6824 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6825 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6826 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6827 addr[3], addr[4], addr[5], (uintmax_t)mask, 6828 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6829 G_PORTMAP(cls_hi), G_PF(cls_lo), 6830 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6831 6832 if (cls_lo & F_REPLICATE) { 6833 struct fw_ldst_cmd ldst_cmd; 6834 6835 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6836 ldst_cmd.op_to_addrspace = 6837 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6838 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6839 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6840 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6841 ldst_cmd.u.mps.rplc.fid_idx = 6842 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6843 V_FW_LDST_CMD_IDX(i)); 6844 6845 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6846 "t4mps"); 6847 if (rc) 6848 break; 6849 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6850 sizeof(ldst_cmd), &ldst_cmd); 6851 end_synchronized_op(sc, 0); 6852 6853 if (rc != 0) { 6854 sbuf_printf(sb, "%36d", rc); 6855 rc = 0; 6856 } else { 6857 sbuf_printf(sb, " %08x %08x %08x %08x", 6858 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6859 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6860 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6861 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6862 } 6863 } else 6864 sbuf_printf(sb, "%36s", ""); 6865 6866 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6867 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6868 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6869 } 6870 6871 if (rc) 6872 (void) sbuf_finish(sb); 6873 else 6874 rc = sbuf_finish(sb); 6875 sbuf_delete(sb); 6876 6877 return (rc); 6878} 6879 6880static int 6881sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 6882{ 6883 struct adapter *sc = arg1; 6884 struct sbuf *sb; 6885 int rc, i; 6886 6887 MPASS(chip_id(sc) > CHELSIO_T5); 6888 6889 rc = sysctl_wire_old_buffer(req, 0); 6890 if (rc != 0) 6891 return (rc); 6892 6893 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6894 if (sb == NULL) 6895 return (ENOMEM); 6896 6897 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 6898 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 6899 " Replication" 6900 " P0 P1 P2 P3 ML\n"); 6901 6902 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6903 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 6904 uint16_t ivlan; 6905 uint64_t tcamx, tcamy, val, mask; 6906 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 6907 uint8_t addr[ETHER_ADDR_LEN]; 6908 6909 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 6910 if (i < 256) 6911 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 6912 else 6913 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 6914 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6915 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6916 tcamy = G_DMACH(val) << 32; 6917 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6918 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6919 lookup_type = G_DATALKPTYPE(data2); 6920 port_num = G_DATAPORTNUM(data2); 6921 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6922 /* Inner header VNI */ 6923 vniy = ((data2 & F_DATAVIDH2) << 23) | 6924 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6925 dip_hit = data2 & F_DATADIPHIT; 6926 vlan_vld = 0; 6927 } else { 6928 vniy = 0; 6929 dip_hit = 0; 6930 vlan_vld = data2 & F_DATAVIDH2; 6931 ivlan = G_VIDL(val); 6932 } 6933 6934 ctl |= V_CTLXYBITSEL(1); 6935 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6936 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6937 tcamx = G_DMACH(val) << 32; 6938 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6939 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6940 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6941 /* Inner header VNI mask */ 6942 vnix = ((data2 & F_DATAVIDH2) << 23) | 6943 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6944 } else 6945 vnix = 0; 6946 6947 if (tcamx & tcamy) 6948 continue; 6949 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6950 6951 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6952 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6953 6954 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6955 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6956 "%012jx %06x %06x - - %3c" 6957 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 6958 addr[1], addr[2], addr[3], addr[4], addr[5], 6959 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 6960 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6961 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6962 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6963 } else { 6964 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6965 "%012jx - - ", i, addr[0], addr[1], 6966 addr[2], addr[3], addr[4], addr[5], 6967 (uintmax_t)mask); 6968 6969 if (vlan_vld) 6970 sbuf_printf(sb, "%4u Y ", ivlan); 6971 else 6972 sbuf_printf(sb, " - N "); 6973 6974 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 6975 lookup_type ? 'I' : 'O', port_num, 6976 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6977 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6978 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6979 } 6980 6981 6982 if (cls_lo & F_T6_REPLICATE) { 6983 struct fw_ldst_cmd ldst_cmd; 6984 6985 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6986 ldst_cmd.op_to_addrspace = 6987 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6988 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6989 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6990 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6991 ldst_cmd.u.mps.rplc.fid_idx = 6992 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6993 V_FW_LDST_CMD_IDX(i)); 6994 6995 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6996 "t6mps"); 6997 if (rc) 6998 break; 6999 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7000 sizeof(ldst_cmd), &ldst_cmd); 7001 end_synchronized_op(sc, 0); 7002 7003 if (rc != 0) { 7004 sbuf_printf(sb, "%72d", rc); 7005 rc = 0; 7006 } else { 7007 sbuf_printf(sb, " %08x %08x %08x %08x" 7008 " %08x %08x %08x %08x", 7009 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 7010 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 7011 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 7012 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 7013 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7014 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7015 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7016 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7017 } 7018 } else 7019 sbuf_printf(sb, "%72s", ""); 7020 7021 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 7022 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 7023 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 7024 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 7025 } 7026 7027 if (rc) 7028 (void) sbuf_finish(sb); 7029 else 7030 rc = sbuf_finish(sb); 7031 sbuf_delete(sb); 7032 7033 return (rc); 7034} 7035 7036static int 7037sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 7038{ 7039 struct adapter *sc = arg1; 7040 struct sbuf *sb; 7041 int rc; 7042 uint16_t mtus[NMTUS]; 7043 7044 rc = sysctl_wire_old_buffer(req, 0); 7045 if (rc != 0) 7046 return (rc); 7047 7048 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7049 if (sb == NULL) 7050 return (ENOMEM); 7051 7052 t4_read_mtu_tbl(sc, mtus, NULL); 7053 7054 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 7055 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 7056 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 7057 mtus[14], mtus[15]); 7058 7059 rc = sbuf_finish(sb); 7060 sbuf_delete(sb); 7061 7062 return (rc); 7063} 7064 7065static int 7066sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 7067{ 7068 struct adapter *sc = arg1; 7069 struct sbuf *sb; 7070 int rc, i; 7071 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 7072 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 7073 static const char *tx_stats[MAX_PM_NSTATS] = { 7074 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 7075 "Tx FIFO wait", NULL, "Tx latency" 7076 }; 7077 static const char *rx_stats[MAX_PM_NSTATS] = { 7078 "Read:", "Write bypass:", "Write mem:", "Flush:", 7079 "Rx FIFO wait", NULL, "Rx latency" 7080 }; 7081 7082 rc = sysctl_wire_old_buffer(req, 0); 7083 if (rc != 0) 7084 return (rc); 7085 7086 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7087 if (sb == NULL) 7088 return (ENOMEM); 7089 7090 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 7091 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 7092 7093 sbuf_printf(sb, " Tx pcmds Tx bytes"); 7094 for (i = 0; i < 4; i++) { 7095 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7096 tx_cyc[i]); 7097 } 7098 7099 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 7100 for (i = 0; i < 4; i++) { 7101 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7102 rx_cyc[i]); 7103 } 7104 7105 if (chip_id(sc) > CHELSIO_T5) { 7106 sbuf_printf(sb, 7107 "\n Total wait Total occupancy"); 7108 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7109 tx_cyc[i]); 7110 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7111 rx_cyc[i]); 7112 7113 i += 2; 7114 MPASS(i < nitems(tx_stats)); 7115 7116 sbuf_printf(sb, 7117 "\n Reads Total wait"); 7118 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7119 tx_cyc[i]); 7120 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7121 rx_cyc[i]); 7122 } 7123 7124 rc = sbuf_finish(sb); 7125 sbuf_delete(sb); 7126 7127 return (rc); 7128} 7129 7130static int 7131sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 7132{ 7133 struct adapter *sc = arg1; 7134 struct sbuf *sb; 7135 int rc; 7136 struct tp_rdma_stats stats; 7137 7138 rc = sysctl_wire_old_buffer(req, 0); 7139 if (rc != 0) 7140 return (rc); 7141 7142 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7143 if (sb == NULL) 7144 return (ENOMEM); 7145 7146 mtx_lock(&sc->reg_lock); 7147 t4_tp_get_rdma_stats(sc, &stats); 7148 mtx_unlock(&sc->reg_lock); 7149 7150 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 7151 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 7152 7153 rc = sbuf_finish(sb); 7154 sbuf_delete(sb); 7155 7156 return (rc); 7157} 7158 7159static int 7160sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 7161{ 7162 struct adapter *sc = arg1; 7163 struct sbuf *sb; 7164 int rc; 7165 struct tp_tcp_stats v4, v6; 7166 7167 rc = sysctl_wire_old_buffer(req, 0); 7168 if (rc != 0) 7169 return (rc); 7170 7171 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7172 if (sb == NULL) 7173 return (ENOMEM); 7174 7175 mtx_lock(&sc->reg_lock); 7176 t4_tp_get_tcp_stats(sc, &v4, &v6); 7177 mtx_unlock(&sc->reg_lock); 7178 7179 sbuf_printf(sb, 7180 " IP IPv6\n"); 7181 sbuf_printf(sb, "OutRsts: %20u %20u\n", 7182 v4.tcp_out_rsts, v6.tcp_out_rsts); 7183 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 7184 v4.tcp_in_segs, v6.tcp_in_segs); 7185 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 7186 v4.tcp_out_segs, v6.tcp_out_segs); 7187 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 7188 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 7189 7190 rc = sbuf_finish(sb); 7191 sbuf_delete(sb); 7192 7193 return (rc); 7194} 7195 7196static int 7197sysctl_tids(SYSCTL_HANDLER_ARGS) 7198{ 7199 struct adapter *sc = arg1; 7200 struct sbuf *sb; 7201 int rc; 7202 struct tid_info *t = &sc->tids; 7203 7204 rc = sysctl_wire_old_buffer(req, 0); 7205 if (rc != 0) 7206 return (rc); 7207 7208 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7209 if (sb == NULL) 7210 return (ENOMEM); 7211 7212 if (t->natids) { 7213 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 7214 t->atids_in_use); 7215 } 7216 7217 if (t->ntids) { 7218 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7219 uint32_t b; 7220 7221 if (chip_id(sc) <= CHELSIO_T5) 7222 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 7223 else 7224 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); 7225 7226 if (b) { 7227 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 7228 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 7229 t->ntids - 1); 7230 } else { 7231 sbuf_printf(sb, "TID range: %u-%u", 7232 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 7233 t->ntids - 1); 7234 } 7235 } else 7236 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 7237 sbuf_printf(sb, ", in use: %u\n", 7238 atomic_load_acq_int(&t->tids_in_use)); 7239 } 7240 7241 if (t->nstids) { 7242 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 7243 t->stid_base + t->nstids - 1, t->stids_in_use); 7244 } 7245 7246 if (t->nftids) { 7247 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 7248 t->ftid_base + t->nftids - 1); 7249 } 7250 7251 if (t->netids) { 7252 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7253 t->etid_base + t->netids - 1); 7254 } 7255 7256 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7257 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7258 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7259 7260 rc = sbuf_finish(sb); 7261 sbuf_delete(sb); 7262 7263 return (rc); 7264} 7265 7266static int 7267sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7268{ 7269 struct adapter *sc = arg1; 7270 struct sbuf *sb; 7271 int rc; 7272 struct tp_err_stats stats; 7273 7274 rc = sysctl_wire_old_buffer(req, 0); 7275 if (rc != 0) 7276 return (rc); 7277 7278 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7279 if (sb == NULL) 7280 return (ENOMEM); 7281 7282 mtx_lock(&sc->reg_lock); 7283 t4_tp_get_err_stats(sc, &stats); 7284 mtx_unlock(&sc->reg_lock); 7285 7286 if (sc->chip_params->nchan > 2) { 7287 sbuf_printf(sb, " channel 0 channel 1" 7288 " channel 2 channel 3\n"); 7289 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7290 stats.mac_in_errs[0], stats.mac_in_errs[1], 7291 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7292 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7293 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7294 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7295 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7296 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7297 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7298 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7299 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7300 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7301 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7302 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7303 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7304 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7305 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7306 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7307 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7308 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7309 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7310 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7311 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7312 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7313 } else { 7314 sbuf_printf(sb, " channel 0 channel 1\n"); 7315 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7316 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7317 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7318 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7319 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7320 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7321 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7322 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7323 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7324 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7325 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7326 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7327 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7328 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7329 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7330 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7331 } 7332 7333 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7334 stats.ofld_no_neigh, stats.ofld_cong_defer); 7335 7336 rc = sbuf_finish(sb); 7337 sbuf_delete(sb); 7338 7339 return (rc); 7340} 7341 7342static int 7343sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7344{ 7345 struct adapter *sc = arg1; 7346 struct tp_params *tpp = &sc->params.tp; 7347 u_int mask; 7348 int rc; 7349 7350 mask = tpp->la_mask >> 16; 7351 rc = sysctl_handle_int(oidp, &mask, 0, req); 7352 if (rc != 0 || req->newptr == NULL) 7353 return (rc); 7354 if (mask > 0xffff) 7355 return (EINVAL); 7356 tpp->la_mask = mask << 16; 7357 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7358 7359 return (0); 7360} 7361 7362struct field_desc { 7363 const char *name; 7364 u_int start; 7365 u_int width; 7366}; 7367 7368static void 7369field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7370{ 7371 char buf[32]; 7372 int line_size = 0; 7373 7374 while (f->name) { 7375 uint64_t mask = (1ULL << f->width) - 1; 7376 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7377 ((uintmax_t)v >> f->start) & mask); 7378 7379 if (line_size + len >= 79) { 7380 line_size = 8; 7381 sbuf_printf(sb, "\n "); 7382 } 7383 sbuf_printf(sb, "%s ", buf); 7384 line_size += len + 1; 7385 f++; 7386 } 7387 sbuf_printf(sb, "\n"); 7388} 7389 7390static const struct field_desc tp_la0[] = { 7391 { "RcfOpCodeOut", 60, 4 }, 7392 { "State", 56, 4 }, 7393 { "WcfState", 52, 4 }, 7394 { "RcfOpcSrcOut", 50, 2 }, 7395 { "CRxError", 49, 1 }, 7396 { "ERxError", 48, 1 }, 7397 { "SanityFailed", 47, 1 }, 7398 { "SpuriousMsg", 46, 1 }, 7399 { "FlushInputMsg", 45, 1 }, 7400 { "FlushInputCpl", 44, 1 }, 7401 { "RssUpBit", 43, 1 }, 7402 { "RssFilterHit", 42, 1 }, 7403 { "Tid", 32, 10 }, 7404 { "InitTcb", 31, 1 }, 7405 { "LineNumber", 24, 7 }, 7406 { "Emsg", 23, 1 }, 7407 { "EdataOut", 22, 1 }, 7408 { "Cmsg", 21, 1 }, 7409 { "CdataOut", 20, 1 }, 7410 { "EreadPdu", 19, 1 }, 7411 { "CreadPdu", 18, 1 }, 7412 { "TunnelPkt", 17, 1 }, 7413 { "RcfPeerFin", 16, 1 }, 7414 { "RcfReasonOut", 12, 4 }, 7415 { "TxCchannel", 10, 2 }, 7416 { "RcfTxChannel", 8, 2 }, 7417 { "RxEchannel", 6, 2 }, 7418 { "RcfRxChannel", 5, 1 }, 7419 { "RcfDataOutSrdy", 4, 1 }, 7420 { "RxDvld", 3, 1 }, 7421 { "RxOoDvld", 2, 1 }, 7422 { "RxCongestion", 1, 1 }, 7423 { "TxCongestion", 0, 1 }, 7424 { NULL } 7425}; 7426 7427static const struct field_desc tp_la1[] = { 7428 { "CplCmdIn", 56, 8 }, 7429 { "CplCmdOut", 48, 8 }, 7430 { "ESynOut", 47, 1 }, 7431 { "EAckOut", 46, 1 }, 7432 { "EFinOut", 45, 1 }, 7433 { "ERstOut", 44, 1 }, 7434 { "SynIn", 43, 1 }, 7435 { "AckIn", 42, 1 }, 7436 { "FinIn", 41, 1 }, 7437 { "RstIn", 40, 1 }, 7438 { "DataIn", 39, 1 }, 7439 { "DataInVld", 38, 1 }, 7440 { "PadIn", 37, 1 }, 7441 { "RxBufEmpty", 36, 1 }, 7442 { "RxDdp", 35, 1 }, 7443 { "RxFbCongestion", 34, 1 }, 7444 { "TxFbCongestion", 33, 1 }, 7445 { "TxPktSumSrdy", 32, 1 }, 7446 { "RcfUlpType", 28, 4 }, 7447 { "Eread", 27, 1 }, 7448 { "Ebypass", 26, 1 }, 7449 { "Esave", 25, 1 }, 7450 { "Static0", 24, 1 }, 7451 { "Cread", 23, 1 }, 7452 { "Cbypass", 22, 1 }, 7453 { "Csave", 21, 1 }, 7454 { "CPktOut", 20, 1 }, 7455 { "RxPagePoolFull", 18, 2 }, 7456 { "RxLpbkPkt", 17, 1 }, 7457 { "TxLpbkPkt", 16, 1 }, 7458 { "RxVfValid", 15, 1 }, 7459 { "SynLearned", 14, 1 }, 7460 { "SetDelEntry", 13, 1 }, 7461 { "SetInvEntry", 12, 1 }, 7462 { "CpcmdDvld", 11, 1 }, 7463 { "CpcmdSave", 10, 1 }, 7464 { "RxPstructsFull", 8, 2 }, 7465 { "EpcmdDvld", 7, 1 }, 7466 { "EpcmdFlush", 6, 1 }, 7467 { "EpcmdTrimPrefix", 5, 1 }, 7468 { "EpcmdTrimPostfix", 4, 1 }, 7469 { "ERssIp4Pkt", 3, 1 }, 7470 { "ERssIp6Pkt", 2, 1 }, 7471 { "ERssTcpUdpPkt", 1, 1 }, 7472 { "ERssFceFipPkt", 0, 1 }, 7473 { NULL } 7474}; 7475 7476static const struct field_desc tp_la2[] = { 7477 { "CplCmdIn", 56, 8 }, 7478 { "MpsVfVld", 55, 1 }, 7479 { "MpsPf", 52, 3 }, 7480 { "MpsVf", 44, 8 }, 7481 { "SynIn", 43, 1 }, 7482 { "AckIn", 42, 1 }, 7483 { "FinIn", 41, 1 }, 7484 { "RstIn", 40, 1 }, 7485 { "DataIn", 39, 1 }, 7486 { "DataInVld", 38, 1 }, 7487 { "PadIn", 37, 1 }, 7488 { "RxBufEmpty", 36, 1 }, 7489 { "RxDdp", 35, 1 }, 7490 { "RxFbCongestion", 34, 1 }, 7491 { "TxFbCongestion", 33, 1 }, 7492 { "TxPktSumSrdy", 32, 1 }, 7493 { "RcfUlpType", 28, 4 }, 7494 { "Eread", 27, 1 }, 7495 { "Ebypass", 26, 1 }, 7496 { "Esave", 25, 1 }, 7497 { "Static0", 24, 1 }, 7498 { "Cread", 23, 1 }, 7499 { "Cbypass", 22, 1 }, 7500 { "Csave", 21, 1 }, 7501 { "CPktOut", 20, 1 }, 7502 { "RxPagePoolFull", 18, 2 }, 7503 { "RxLpbkPkt", 17, 1 }, 7504 { "TxLpbkPkt", 16, 1 }, 7505 { "RxVfValid", 15, 1 }, 7506 { "SynLearned", 14, 1 }, 7507 { "SetDelEntry", 13, 1 }, 7508 { "SetInvEntry", 12, 1 }, 7509 { "CpcmdDvld", 11, 1 }, 7510 { "CpcmdSave", 10, 1 }, 7511 { "RxPstructsFull", 8, 2 }, 7512 { "EpcmdDvld", 7, 1 }, 7513 { "EpcmdFlush", 6, 1 }, 7514 { "EpcmdTrimPrefix", 5, 1 }, 7515 { "EpcmdTrimPostfix", 4, 1 }, 7516 { "ERssIp4Pkt", 3, 1 }, 7517 { "ERssIp6Pkt", 2, 1 }, 7518 { "ERssTcpUdpPkt", 1, 1 }, 7519 { "ERssFceFipPkt", 0, 1 }, 7520 { NULL } 7521}; 7522 7523static void 7524tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7525{ 7526 7527 field_desc_show(sb, *p, tp_la0); 7528} 7529 7530static void 7531tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7532{ 7533 7534 if (idx) 7535 sbuf_printf(sb, "\n"); 7536 field_desc_show(sb, p[0], tp_la0); 7537 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7538 field_desc_show(sb, p[1], tp_la0); 7539} 7540 7541static void 7542tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7543{ 7544 7545 if (idx) 7546 sbuf_printf(sb, "\n"); 7547 field_desc_show(sb, p[0], tp_la0); 7548 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7549 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7550} 7551 7552static int 7553sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7554{ 7555 struct adapter *sc = arg1; 7556 struct sbuf *sb; 7557 uint64_t *buf, *p; 7558 int rc; 7559 u_int i, inc; 7560 void (*show_func)(struct sbuf *, uint64_t *, int); 7561 7562 rc = sysctl_wire_old_buffer(req, 0); 7563 if (rc != 0) 7564 return (rc); 7565 7566 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7567 if (sb == NULL) 7568 return (ENOMEM); 7569 7570 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7571 7572 t4_tp_read_la(sc, buf, NULL); 7573 p = buf; 7574 7575 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7576 case 2: 7577 inc = 2; 7578 show_func = tp_la_show2; 7579 break; 7580 case 3: 7581 inc = 2; 7582 show_func = tp_la_show3; 7583 break; 7584 default: 7585 inc = 1; 7586 show_func = tp_la_show; 7587 } 7588 7589 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7590 (*show_func)(sb, p, i); 7591 7592 rc = sbuf_finish(sb); 7593 sbuf_delete(sb); 7594 free(buf, M_CXGBE); 7595 return (rc); 7596} 7597 7598static int 7599sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7600{ 7601 struct adapter *sc = arg1; 7602 struct sbuf *sb; 7603 int rc; 7604 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7605 7606 rc = sysctl_wire_old_buffer(req, 0); 7607 if (rc != 0) 7608 return (rc); 7609 7610 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7611 if (sb == NULL) 7612 return (ENOMEM); 7613 7614 t4_get_chan_txrate(sc, nrate, orate); 7615 7616 if (sc->chip_params->nchan > 2) { 7617 sbuf_printf(sb, " channel 0 channel 1" 7618 " channel 2 channel 3\n"); 7619 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7620 nrate[0], nrate[1], nrate[2], nrate[3]); 7621 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7622 orate[0], orate[1], orate[2], orate[3]); 7623 } else { 7624 sbuf_printf(sb, " channel 0 channel 1\n"); 7625 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7626 nrate[0], nrate[1]); 7627 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7628 orate[0], orate[1]); 7629 } 7630 7631 rc = sbuf_finish(sb); 7632 sbuf_delete(sb); 7633 7634 return (rc); 7635} 7636 7637static int 7638sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7639{ 7640 struct adapter *sc = arg1; 7641 struct sbuf *sb; 7642 uint32_t *buf, *p; 7643 int rc, i; 7644 7645 rc = sysctl_wire_old_buffer(req, 0); 7646 if (rc != 0) 7647 return (rc); 7648 7649 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7650 if (sb == NULL) 7651 return (ENOMEM); 7652 7653 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7654 M_ZERO | M_WAITOK); 7655 7656 t4_ulprx_read_la(sc, buf); 7657 p = buf; 7658 7659 sbuf_printf(sb, " Pcmd Type Message" 7660 " Data"); 7661 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7662 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7663 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7664 } 7665 7666 rc = sbuf_finish(sb); 7667 sbuf_delete(sb); 7668 free(buf, M_CXGBE); 7669 return (rc); 7670} 7671 7672static int 7673sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7674{ 7675 struct adapter *sc = arg1; 7676 struct sbuf *sb; 7677 int rc, v; 7678 7679 MPASS(chip_id(sc) >= CHELSIO_T5); 7680 7681 rc = sysctl_wire_old_buffer(req, 0); 7682 if (rc != 0) 7683 return (rc); 7684 7685 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7686 if (sb == NULL) 7687 return (ENOMEM); 7688 7689 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7690 if (G_STATSOURCE_T5(v) == 7) { 7691 int mode; 7692 7693 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); 7694 if (mode == 0) { 7695 sbuf_printf(sb, "total %d, incomplete %d", 7696 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7697 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7698 } else if (mode == 1) { 7699 sbuf_printf(sb, "total %d, data overflow %d", 7700 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7701 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7702 } else { 7703 sbuf_printf(sb, "unknown mode %d", mode); 7704 } 7705 } 7706 rc = sbuf_finish(sb); 7707 sbuf_delete(sb); 7708 7709 return (rc); 7710} 7711 7712static int 7713sysctl_tc_params(SYSCTL_HANDLER_ARGS) 7714{ 7715 struct adapter *sc = arg1; 7716 struct tx_sched_class *tc; 7717 struct t4_sched_class_params p; 7718 struct sbuf *sb; 7719 int i, rc, port_id, flags, mbps, gbps; 7720 7721 rc = sysctl_wire_old_buffer(req, 0); 7722 if (rc != 0) 7723 return (rc); 7724 7725 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7726 if (sb == NULL) 7727 return (ENOMEM); 7728 7729 port_id = arg2 >> 16; 7730 MPASS(port_id < sc->params.nports); 7731 MPASS(sc->port[port_id] != NULL); 7732 i = arg2 & 0xffff; 7733 MPASS(i < sc->chip_params->nsched_cls); 7734 tc = &sc->port[port_id]->tc[i]; 7735 7736 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7737 "t4tc_p"); 7738 if (rc) 7739 goto done; 7740 flags = tc->flags; 7741 p = tc->params; 7742 end_synchronized_op(sc, LOCK_HELD); 7743 7744 if ((flags & TX_SC_OK) == 0) { 7745 sbuf_printf(sb, "none"); 7746 goto done; 7747 } 7748 7749 if (p.level == SCHED_CLASS_LEVEL_CL_WRR) { 7750 sbuf_printf(sb, "cl-wrr weight %u", p.weight); 7751 goto done; 7752 } else if (p.level == SCHED_CLASS_LEVEL_CL_RL) 7753 sbuf_printf(sb, "cl-rl"); 7754 else if (p.level == SCHED_CLASS_LEVEL_CH_RL) 7755 sbuf_printf(sb, "ch-rl"); 7756 else { 7757 rc = ENXIO; 7758 goto done; 7759 } 7760 7761 if (p.ratemode == SCHED_CLASS_RATEMODE_REL) { 7762 /* XXX: top speed or actual link speed? */ 7763 gbps = port_top_speed(sc->port[port_id]); 7764 sbuf_printf(sb, " %u%% of %uGbps", p.maxrate, gbps); 7765 } 7766 else if (p.ratemode == SCHED_CLASS_RATEMODE_ABS) { 7767 switch (p.rateunit) { 7768 case SCHED_CLASS_RATEUNIT_BITS: 7769 mbps = p.maxrate / 1000; 7770 gbps = p.maxrate / 1000000; 7771 if (p.maxrate == gbps * 1000000) 7772 sbuf_printf(sb, " %uGbps", gbps); 7773 else if (p.maxrate == mbps * 1000) 7774 sbuf_printf(sb, " %uMbps", mbps); 7775 else 7776 sbuf_printf(sb, " %uKbps", p.maxrate); 7777 break; 7778 case SCHED_CLASS_RATEUNIT_PKTS: 7779 sbuf_printf(sb, " %upps", p.maxrate); 7780 break; 7781 default: 7782 rc = ENXIO; 7783 goto done; 7784 } 7785 } 7786 7787 switch (p.mode) { 7788 case SCHED_CLASS_MODE_CLASS: 7789 sbuf_printf(sb, " aggregate"); 7790 break; 7791 case SCHED_CLASS_MODE_FLOW: 7792 sbuf_printf(sb, " per-flow"); 7793 break; 7794 default: 7795 rc = ENXIO; 7796 goto done; 7797 } 7798 7799done: 7800 if (rc == 0) 7801 rc = sbuf_finish(sb); 7802 sbuf_delete(sb); 7803 7804 return (rc); 7805} 7806#endif 7807 7808#ifdef TCP_OFFLOAD 7809static void 7810unit_conv(char *buf, size_t len, u_int val, u_int factor) 7811{ 7812 u_int rem = val % factor; 7813 7814 if (rem == 0) 7815 snprintf(buf, len, "%u", val / factor); 7816 else { 7817 while (rem % 10 == 0) 7818 rem /= 10; 7819 snprintf(buf, len, "%u.%u", val / factor, rem); 7820 } 7821} 7822 7823static int 7824sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 7825{ 7826 struct adapter *sc = arg1; 7827 char buf[16]; 7828 u_int res, re; 7829 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7830 7831 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7832 switch (arg2) { 7833 case 0: 7834 /* timer_tick */ 7835 re = G_TIMERRESOLUTION(res); 7836 break; 7837 case 1: 7838 /* TCP timestamp tick */ 7839 re = G_TIMESTAMPRESOLUTION(res); 7840 break; 7841 case 2: 7842 /* DACK tick */ 7843 re = G_DELAYEDACKRESOLUTION(res); 7844 break; 7845 default: 7846 return (EDOOFUS); 7847 } 7848 7849 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 7850 7851 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 7852} 7853 7854static int 7855sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 7856{ 7857 struct adapter *sc = arg1; 7858 u_int res, dack_re, v; 7859 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7860 7861 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7862 dack_re = G_DELAYEDACKRESOLUTION(res); 7863 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 7864 7865 return (sysctl_handle_int(oidp, &v, 0, req)); 7866} 7867 7868static int 7869sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 7870{ 7871 struct adapter *sc = arg1; 7872 int reg = arg2; 7873 u_int tre; 7874 u_long tp_tick_us, v; 7875 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7876 7877 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 7878 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 7879 reg == A_TP_KEEP_IDLE || A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || 7880 reg == A_TP_FINWAIT2_TIMER); 7881 7882 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 7883 tp_tick_us = (cclk_ps << tre) / 1000000; 7884 7885 if (reg == A_TP_INIT_SRTT) 7886 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 7887 else 7888 v = tp_tick_us * t4_read_reg(sc, reg); 7889 7890 return (sysctl_handle_long(oidp, &v, 0, req)); 7891} 7892#endif 7893 7894static uint32_t 7895fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 7896{ 7897 uint32_t mode; 7898 7899 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 7900 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 7901 7902 if (fconf & F_FRAGMENTATION) 7903 mode |= T4_FILTER_IP_FRAGMENT; 7904 7905 if (fconf & F_MPSHITTYPE) 7906 mode |= T4_FILTER_MPS_HIT_TYPE; 7907 7908 if (fconf & F_MACMATCH) 7909 mode |= T4_FILTER_MAC_IDX; 7910 7911 if (fconf & F_ETHERTYPE) 7912 mode |= T4_FILTER_ETH_TYPE; 7913 7914 if (fconf & F_PROTOCOL) 7915 mode |= T4_FILTER_IP_PROTO; 7916 7917 if (fconf & F_TOS) 7918 mode |= T4_FILTER_IP_TOS; 7919 7920 if (fconf & F_VLAN) 7921 mode |= T4_FILTER_VLAN; 7922 7923 if (fconf & F_VNIC_ID) { 7924 mode |= T4_FILTER_VNIC; 7925 if (iconf & F_VNIC) 7926 mode |= T4_FILTER_IC_VNIC; 7927 } 7928 7929 if (fconf & F_PORT) 7930 mode |= T4_FILTER_PORT; 7931 7932 if (fconf & F_FCOE) 7933 mode |= T4_FILTER_FCoE; 7934 7935 return (mode); 7936} 7937 7938static uint32_t 7939mode_to_fconf(uint32_t mode) 7940{ 7941 uint32_t fconf = 0; 7942 7943 if (mode & T4_FILTER_IP_FRAGMENT) 7944 fconf |= F_FRAGMENTATION; 7945 7946 if (mode & T4_FILTER_MPS_HIT_TYPE) 7947 fconf |= F_MPSHITTYPE; 7948 7949 if (mode & T4_FILTER_MAC_IDX) 7950 fconf |= F_MACMATCH; 7951 7952 if (mode & T4_FILTER_ETH_TYPE) 7953 fconf |= F_ETHERTYPE; 7954 7955 if (mode & T4_FILTER_IP_PROTO) 7956 fconf |= F_PROTOCOL; 7957 7958 if (mode & T4_FILTER_IP_TOS) 7959 fconf |= F_TOS; 7960 7961 if (mode & T4_FILTER_VLAN) 7962 fconf |= F_VLAN; 7963 7964 if (mode & T4_FILTER_VNIC) 7965 fconf |= F_VNIC_ID; 7966 7967 if (mode & T4_FILTER_PORT) 7968 fconf |= F_PORT; 7969 7970 if (mode & T4_FILTER_FCoE) 7971 fconf |= F_FCOE; 7972 7973 return (fconf); 7974} 7975 7976static uint32_t 7977mode_to_iconf(uint32_t mode) 7978{ 7979 7980 if (mode & T4_FILTER_IC_VNIC) 7981 return (F_VNIC); 7982 return (0); 7983} 7984 7985static int check_fspec_against_fconf_iconf(struct adapter *sc, 7986 struct t4_filter_specification *fs) 7987{ 7988 struct tp_params *tpp = &sc->params.tp; 7989 uint32_t fconf = 0; 7990 7991 if (fs->val.frag || fs->mask.frag) 7992 fconf |= F_FRAGMENTATION; 7993 7994 if (fs->val.matchtype || fs->mask.matchtype) 7995 fconf |= F_MPSHITTYPE; 7996 7997 if (fs->val.macidx || fs->mask.macidx) 7998 fconf |= F_MACMATCH; 7999 8000 if (fs->val.ethtype || fs->mask.ethtype) 8001 fconf |= F_ETHERTYPE; 8002 8003 if (fs->val.proto || fs->mask.proto) 8004 fconf |= F_PROTOCOL; 8005 8006 if (fs->val.tos || fs->mask.tos) 8007 fconf |= F_TOS; 8008 8009 if (fs->val.vlan_vld || fs->mask.vlan_vld) 8010 fconf |= F_VLAN; 8011 8012 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 8013 fconf |= F_VNIC_ID; 8014 if (tpp->ingress_config & F_VNIC) 8015 return (EINVAL); 8016 } 8017 8018 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 8019 fconf |= F_VNIC_ID; 8020 if ((tpp->ingress_config & F_VNIC) == 0) 8021 return (EINVAL); 8022 } 8023 8024 if (fs->val.iport || fs->mask.iport) 8025 fconf |= F_PORT; 8026 8027 if (fs->val.fcoe || fs->mask.fcoe) 8028 fconf |= F_FCOE; 8029 8030 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 8031 return (E2BIG); 8032 8033 return (0); 8034} 8035 8036static int 8037get_filter_mode(struct adapter *sc, uint32_t *mode) 8038{ 8039 struct tp_params *tpp = &sc->params.tp; 8040 8041 /* 8042 * We trust the cached values of the relevant TP registers. This means 8043 * things work reliably only if writes to those registers are always via 8044 * t4_set_filter_mode. 8045 */ 8046 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 8047 8048 return (0); 8049} 8050 8051static int 8052set_filter_mode(struct adapter *sc, uint32_t mode) 8053{ 8054 struct tp_params *tpp = &sc->params.tp; 8055 uint32_t fconf, iconf; 8056 int rc; 8057 8058 iconf = mode_to_iconf(mode); 8059 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 8060 /* 8061 * For now we just complain if A_TP_INGRESS_CONFIG is not 8062 * already set to the correct value for the requested filter 8063 * mode. It's not clear if it's safe to write to this register 8064 * on the fly. (And we trust the cached value of the register). 8065 */ 8066 return (EBUSY); 8067 } 8068 8069 fconf = mode_to_fconf(mode); 8070 8071 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8072 "t4setfm"); 8073 if (rc) 8074 return (rc); 8075 8076 if (sc->tids.ftids_in_use > 0) { 8077 rc = EBUSY; 8078 goto done; 8079 } 8080 8081#ifdef TCP_OFFLOAD 8082 if (uld_active(sc, ULD_TOM)) { 8083 rc = EBUSY; 8084 goto done; 8085 } 8086#endif 8087 8088 rc = -t4_set_filter_mode(sc, fconf); 8089done: 8090 end_synchronized_op(sc, LOCK_HELD); 8091 return (rc); 8092} 8093 8094static inline uint64_t 8095get_filter_hits(struct adapter *sc, uint32_t fid) 8096{ 8097 uint32_t tcb_addr; 8098 8099 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 8100 (fid + sc->tids.ftid_base) * TCB_SIZE; 8101 8102 if (is_t4(sc)) { 8103 uint64_t hits; 8104 8105 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 8106 return (be64toh(hits)); 8107 } else { 8108 uint32_t hits; 8109 8110 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 8111 return (be32toh(hits)); 8112 } 8113} 8114 8115static int 8116get_filter(struct adapter *sc, struct t4_filter *t) 8117{ 8118 int i, rc, nfilters = sc->tids.nftids; 8119 struct filter_entry *f; 8120 8121 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8122 "t4getf"); 8123 if (rc) 8124 return (rc); 8125 8126 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 8127 t->idx >= nfilters) { 8128 t->idx = 0xffffffff; 8129 goto done; 8130 } 8131 8132 f = &sc->tids.ftid_tab[t->idx]; 8133 for (i = t->idx; i < nfilters; i++, f++) { 8134 if (f->valid) { 8135 t->idx = i; 8136 t->l2tidx = f->l2t ? f->l2t->idx : 0; 8137 t->smtidx = f->smtidx; 8138 if (f->fs.hitcnts) 8139 t->hits = get_filter_hits(sc, t->idx); 8140 else 8141 t->hits = UINT64_MAX; 8142 t->fs = f->fs; 8143 8144 goto done; 8145 } 8146 } 8147 8148 t->idx = 0xffffffff; 8149done: 8150 end_synchronized_op(sc, LOCK_HELD); 8151 return (0); 8152} 8153 8154static int 8155set_filter(struct adapter *sc, struct t4_filter *t) 8156{ 8157 unsigned int nfilters, nports; 8158 struct filter_entry *f; 8159 int i, rc; 8160 8161 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 8162 if (rc) 8163 return (rc); 8164 8165 nfilters = sc->tids.nftids; 8166 nports = sc->params.nports; 8167 8168 if (nfilters == 0) { 8169 rc = ENOTSUP; 8170 goto done; 8171 } 8172 8173 if (t->idx >= nfilters) { 8174 rc = EINVAL; 8175 goto done; 8176 } 8177 8178 /* Validate against the global filter mode and ingress config */ 8179 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 8180 if (rc != 0) 8181 goto done; 8182 8183 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 8184 rc = EINVAL; 8185 goto done; 8186 } 8187 8188 if (t->fs.val.iport >= nports) { 8189 rc = EINVAL; 8190 goto done; 8191 } 8192 8193 /* Can't specify an iq if not steering to it */ 8194 if (!t->fs.dirsteer && t->fs.iq) { 8195 rc = EINVAL; 8196 goto done; 8197 } 8198 8199 /* IPv6 filter idx must be 4 aligned */ 8200 if (t->fs.type == 1 && 8201 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 8202 rc = EINVAL; 8203 goto done; 8204 } 8205 8206 if (!(sc->flags & FULL_INIT_DONE) && 8207 ((rc = adapter_full_init(sc)) != 0)) 8208 goto done; 8209 8210 if (sc->tids.ftid_tab == NULL) { 8211 KASSERT(sc->tids.ftids_in_use == 0, 8212 ("%s: no memory allocated but filters_in_use > 0", 8213 __func__)); 8214 8215 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 8216 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 8217 if (sc->tids.ftid_tab == NULL) { 8218 rc = ENOMEM; 8219 goto done; 8220 } 8221 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 8222 } 8223 8224 for (i = 0; i < 4; i++) { 8225 f = &sc->tids.ftid_tab[t->idx + i]; 8226 8227 if (f->pending || f->valid) { 8228 rc = EBUSY; 8229 goto done; 8230 } 8231 if (f->locked) { 8232 rc = EPERM; 8233 goto done; 8234 } 8235 8236 if (t->fs.type == 0) 8237 break; 8238 } 8239 8240 f = &sc->tids.ftid_tab[t->idx]; 8241 f->fs = t->fs; 8242 8243 rc = set_filter_wr(sc, t->idx); 8244done: 8245 end_synchronized_op(sc, 0); 8246 8247 if (rc == 0) { 8248 mtx_lock(&sc->tids.ftid_lock); 8249 for (;;) { 8250 if (f->pending == 0) { 8251 rc = f->valid ? 0 : EIO; 8252 break; 8253 } 8254 8255 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8256 PCATCH, "t4setfw", 0)) { 8257 rc = EINPROGRESS; 8258 break; 8259 } 8260 } 8261 mtx_unlock(&sc->tids.ftid_lock); 8262 } 8263 return (rc); 8264} 8265 8266static int 8267del_filter(struct adapter *sc, struct t4_filter *t) 8268{ 8269 unsigned int nfilters; 8270 struct filter_entry *f; 8271 int rc; 8272 8273 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 8274 if (rc) 8275 return (rc); 8276 8277 nfilters = sc->tids.nftids; 8278 8279 if (nfilters == 0) { 8280 rc = ENOTSUP; 8281 goto done; 8282 } 8283 8284 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 8285 t->idx >= nfilters) { 8286 rc = EINVAL; 8287 goto done; 8288 } 8289 8290 if (!(sc->flags & FULL_INIT_DONE)) { 8291 rc = EAGAIN; 8292 goto done; 8293 } 8294 8295 f = &sc->tids.ftid_tab[t->idx]; 8296 8297 if (f->pending) { 8298 rc = EBUSY; 8299 goto done; 8300 } 8301 if (f->locked) { 8302 rc = EPERM; 8303 goto done; 8304 } 8305 8306 if (f->valid) { 8307 t->fs = f->fs; /* extra info for the caller */ 8308 rc = del_filter_wr(sc, t->idx); 8309 } 8310 8311done: 8312 end_synchronized_op(sc, 0); 8313 8314 if (rc == 0) { 8315 mtx_lock(&sc->tids.ftid_lock); 8316 for (;;) { 8317 if (f->pending == 0) { 8318 rc = f->valid ? EIO : 0; 8319 break; 8320 } 8321 8322 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8323 PCATCH, "t4delfw", 0)) { 8324 rc = EINPROGRESS; 8325 break; 8326 } 8327 } 8328 mtx_unlock(&sc->tids.ftid_lock); 8329 } 8330 8331 return (rc); 8332} 8333 8334static void 8335clear_filter(struct filter_entry *f) 8336{ 8337 if (f->l2t) 8338 t4_l2t_release(f->l2t); 8339 8340 bzero(f, sizeof (*f)); 8341} 8342 8343static int 8344set_filter_wr(struct adapter *sc, int fidx) 8345{ 8346 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8347 struct fw_filter_wr *fwr; 8348 unsigned int ftid, vnic_vld, vnic_vld_mask; 8349 struct wrq_cookie cookie; 8350 8351 ASSERT_SYNCHRONIZED_OP(sc); 8352 8353 if (f->fs.newdmac || f->fs.newvlan) { 8354 /* This filter needs an L2T entry; allocate one. */ 8355 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8356 if (f->l2t == NULL) 8357 return (EAGAIN); 8358 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8359 f->fs.dmac)) { 8360 t4_l2t_release(f->l2t); 8361 f->l2t = NULL; 8362 return (ENOMEM); 8363 } 8364 } 8365 8366 /* Already validated against fconf, iconf */ 8367 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8368 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8369 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8370 vnic_vld = 1; 8371 else 8372 vnic_vld = 0; 8373 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8374 vnic_vld_mask = 1; 8375 else 8376 vnic_vld_mask = 0; 8377 8378 ftid = sc->tids.ftid_base + fidx; 8379 8380 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8381 if (fwr == NULL) 8382 return (ENOMEM); 8383 bzero(fwr, sizeof(*fwr)); 8384 8385 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8386 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8387 fwr->tid_to_iq = 8388 htobe32(V_FW_FILTER_WR_TID(ftid) | 8389 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8390 V_FW_FILTER_WR_NOREPLY(0) | 8391 V_FW_FILTER_WR_IQ(f->fs.iq)); 8392 fwr->del_filter_to_l2tix = 8393 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8394 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8395 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8396 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8397 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8398 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8399 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8400 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8401 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8402 f->fs.newvlan == VLAN_REWRITE) | 8403 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8404 f->fs.newvlan == VLAN_REWRITE) | 8405 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8406 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8407 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8408 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8409 fwr->ethtype = htobe16(f->fs.val.ethtype); 8410 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8411 fwr->frag_to_ovlan_vldm = 8412 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8413 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8414 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8415 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8416 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8417 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8418 fwr->smac_sel = 0; 8419 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8420 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8421 fwr->maci_to_matchtypem = 8422 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8423 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8424 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8425 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8426 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8427 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8428 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8429 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8430 fwr->ptcl = f->fs.val.proto; 8431 fwr->ptclm = f->fs.mask.proto; 8432 fwr->ttyp = f->fs.val.tos; 8433 fwr->ttypm = f->fs.mask.tos; 8434 fwr->ivlan = htobe16(f->fs.val.vlan); 8435 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8436 fwr->ovlan = htobe16(f->fs.val.vnic); 8437 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8438 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8439 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8440 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8441 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8442 fwr->lp = htobe16(f->fs.val.dport); 8443 fwr->lpm = htobe16(f->fs.mask.dport); 8444 fwr->fp = htobe16(f->fs.val.sport); 8445 fwr->fpm = htobe16(f->fs.mask.sport); 8446 if (f->fs.newsmac) 8447 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8448 8449 f->pending = 1; 8450 sc->tids.ftids_in_use++; 8451 8452 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8453 return (0); 8454} 8455 8456static int 8457del_filter_wr(struct adapter *sc, int fidx) 8458{ 8459 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8460 struct fw_filter_wr *fwr; 8461 unsigned int ftid; 8462 struct wrq_cookie cookie; 8463 8464 ftid = sc->tids.ftid_base + fidx; 8465 8466 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8467 if (fwr == NULL) 8468 return (ENOMEM); 8469 bzero(fwr, sizeof (*fwr)); 8470 8471 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8472 8473 f->pending = 1; 8474 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8475 return (0); 8476} 8477 8478int 8479t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8480{ 8481 struct adapter *sc = iq->adapter; 8482 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8483 unsigned int idx = GET_TID(rpl); 8484 unsigned int rc; 8485 struct filter_entry *f; 8486 8487 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8488 rss->opcode)); 8489 MPASS(iq == &sc->sge.fwq); 8490 MPASS(is_ftid(sc, idx)); 8491 8492 idx -= sc->tids.ftid_base; 8493 f = &sc->tids.ftid_tab[idx]; 8494 rc = G_COOKIE(rpl->cookie); 8495 8496 mtx_lock(&sc->tids.ftid_lock); 8497 if (rc == FW_FILTER_WR_FLT_ADDED) { 8498 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8499 __func__, idx)); 8500 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8501 f->pending = 0; /* asynchronous setup completed */ 8502 f->valid = 1; 8503 } else { 8504 if (rc != FW_FILTER_WR_FLT_DELETED) { 8505 /* Add or delete failed, display an error */ 8506 log(LOG_ERR, 8507 "filter %u setup failed with error %u\n", 8508 idx, rc); 8509 } 8510 8511 clear_filter(f); 8512 sc->tids.ftids_in_use--; 8513 } 8514 wakeup(&sc->tids.ftid_tab); 8515 mtx_unlock(&sc->tids.ftid_lock); 8516 8517 return (0); 8518} 8519 8520static int 8521set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8522{ 8523 8524 MPASS(iq->set_tcb_rpl != NULL); 8525 return (iq->set_tcb_rpl(iq, rss, m)); 8526} 8527 8528static int 8529l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8530{ 8531 8532 MPASS(iq->l2t_write_rpl != NULL); 8533 return (iq->l2t_write_rpl(iq, rss, m)); 8534} 8535 8536static int 8537get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8538{ 8539 int rc; 8540 8541 if (cntxt->cid > M_CTXTQID) 8542 return (EINVAL); 8543 8544 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8545 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8546 return (EINVAL); 8547 8548 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8549 if (rc) 8550 return (rc); 8551 8552 if (sc->flags & FW_OK) { 8553 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8554 &cntxt->data[0]); 8555 if (rc == 0) 8556 goto done; 8557 } 8558 8559 /* 8560 * Read via firmware failed or wasn't even attempted. Read directly via 8561 * the backdoor. 8562 */ 8563 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8564done: 8565 end_synchronized_op(sc, 0); 8566 return (rc); 8567} 8568 8569static int 8570load_fw(struct adapter *sc, struct t4_data *fw) 8571{ 8572 int rc; 8573 uint8_t *fw_data; 8574 8575 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8576 if (rc) 8577 return (rc); 8578 8579 if (sc->flags & FULL_INIT_DONE) { 8580 rc = EBUSY; 8581 goto done; 8582 } 8583 8584 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8585 if (fw_data == NULL) { 8586 rc = ENOMEM; 8587 goto done; 8588 } 8589 8590 rc = copyin(fw->data, fw_data, fw->len); 8591 if (rc == 0) 8592 rc = -t4_load_fw(sc, fw_data, fw->len); 8593 8594 free(fw_data, M_CXGBE); 8595done: 8596 end_synchronized_op(sc, 0); 8597 return (rc); 8598} 8599 8600#define MAX_READ_BUF_SIZE (128 * 1024) 8601static int 8602read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 8603{ 8604 uint32_t addr, remaining, n; 8605 uint32_t *buf; 8606 int rc; 8607 uint8_t *dst; 8608 8609 rc = validate_mem_range(sc, mr->addr, mr->len); 8610 if (rc != 0) 8611 return (rc); 8612 8613 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 8614 addr = mr->addr; 8615 remaining = mr->len; 8616 dst = (void *)mr->data; 8617 8618 while (remaining) { 8619 n = min(remaining, MAX_READ_BUF_SIZE); 8620 read_via_memwin(sc, 2, addr, buf, n); 8621 8622 rc = copyout(buf, dst, n); 8623 if (rc != 0) 8624 break; 8625 8626 dst += n; 8627 remaining -= n; 8628 addr += n; 8629 } 8630 8631 free(buf, M_CXGBE); 8632 return (rc); 8633} 8634#undef MAX_READ_BUF_SIZE 8635 8636static int 8637read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 8638{ 8639 int rc; 8640 8641 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 8642 return (EINVAL); 8643 8644 if (i2cd->len > sizeof(i2cd->data)) 8645 return (EFBIG); 8646 8647 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 8648 if (rc) 8649 return (rc); 8650 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 8651 i2cd->offset, i2cd->len, &i2cd->data[0]); 8652 end_synchronized_op(sc, 0); 8653 8654 return (rc); 8655} 8656 8657static int 8658in_range(int val, int lo, int hi) 8659{ 8660 8661 return (val < 0 || (val <= hi && val >= lo)); 8662} 8663 8664static int 8665set_sched_class_config(struct adapter *sc, int minmax) 8666{ 8667 int rc; 8668 8669 if (minmax < 0) 8670 return (EINVAL); 8671 8672 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc"); 8673 if (rc) 8674 return (rc); 8675 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1); 8676 end_synchronized_op(sc, 0); 8677 8678 return (rc); 8679} 8680 8681static int 8682set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p, 8683 int sleep_ok) 8684{ 8685 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode; 8686 struct port_info *pi; 8687 struct tx_sched_class *tc; 8688 8689 if (p->level == SCHED_CLASS_LEVEL_CL_RL) 8690 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 8691 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8692 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 8693 else if (p->level == SCHED_CLASS_LEVEL_CH_RL) 8694 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 8695 else 8696 return (EINVAL); 8697 8698 if (p->mode == SCHED_CLASS_MODE_CLASS) 8699 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 8700 else if (p->mode == SCHED_CLASS_MODE_FLOW) 8701 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 8702 else 8703 return (EINVAL); 8704 8705 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS) 8706 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 8707 else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS) 8708 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 8709 else 8710 return (EINVAL); 8711 8712 if (p->ratemode == SCHED_CLASS_RATEMODE_REL) 8713 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 8714 else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS) 8715 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 8716 else 8717 return (EINVAL); 8718 8719 /* Vet our parameters ... */ 8720 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1)) 8721 return (ERANGE); 8722 8723 pi = sc->port[sc->chan_map[p->channel]]; 8724 if (pi == NULL) 8725 return (ENXIO); 8726 MPASS(pi->tx_chan == p->channel); 8727 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */ 8728 8729 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) || 8730 !in_range(p->minrate, 0, top_speed) || 8731 !in_range(p->maxrate, 0, top_speed) || 8732 !in_range(p->weight, 0, 100)) 8733 return (ERANGE); 8734 8735 /* 8736 * Translate any unset parameters into the firmware's 8737 * nomenclature and/or fail the call if the parameters 8738 * are required ... 8739 */ 8740 if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0) 8741 return (EINVAL); 8742 8743 if (p->minrate < 0) 8744 p->minrate = 0; 8745 if (p->maxrate < 0) { 8746 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8747 p->level == SCHED_CLASS_LEVEL_CH_RL) 8748 return (EINVAL); 8749 else 8750 p->maxrate = 0; 8751 } 8752 if (p->weight < 0) { 8753 if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8754 return (EINVAL); 8755 else 8756 p->weight = 0; 8757 } 8758 if (p->pktsize < 0) { 8759 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8760 p->level == SCHED_CLASS_LEVEL_CH_RL) 8761 return (EINVAL); 8762 else 8763 p->pktsize = 0; 8764 } 8765 8766 rc = begin_synchronized_op(sc, NULL, 8767 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp"); 8768 if (rc) 8769 return (rc); 8770 tc = &pi->tc[p->cl]; 8771 tc->params = *p; 8772 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode, 8773 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate, 8774 p->weight, p->pktsize, sleep_ok); 8775 if (rc == 0) 8776 tc->flags |= TX_SC_OK; 8777 else { 8778 /* 8779 * Unknown state at this point, see tc->params for what was 8780 * attempted. 8781 */ 8782 tc->flags &= ~TX_SC_OK; 8783 } 8784 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD); 8785 8786 return (rc); 8787} 8788 8789int 8790t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p) 8791{ 8792 8793 if (p->type != SCHED_CLASS_TYPE_PACKET) 8794 return (EINVAL); 8795 8796 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 8797 return (set_sched_class_config(sc, p->u.config.minmax)); 8798 8799 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 8800 return (set_sched_class_params(sc, &p->u.params, 1)); 8801 8802 return (EINVAL); 8803} 8804 8805int 8806t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 8807{ 8808 struct port_info *pi = NULL; 8809 struct vi_info *vi; 8810 struct sge_txq *txq; 8811 uint32_t fw_mnem, fw_queue, fw_class; 8812 int i, rc; 8813 8814 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 8815 if (rc) 8816 return (rc); 8817 8818 if (p->port >= sc->params.nports) { 8819 rc = EINVAL; 8820 goto done; 8821 } 8822 8823 /* XXX: Only supported for the main VI. */ 8824 pi = sc->port[p->port]; 8825 vi = &pi->vi[0]; 8826 if (!(vi->flags & VI_INIT_DONE)) { 8827 /* tx queues not set up yet */ 8828 rc = EAGAIN; 8829 goto done; 8830 } 8831 8832 if (!in_range(p->queue, 0, vi->ntxq - 1) || 8833 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) { 8834 rc = EINVAL; 8835 goto done; 8836 } 8837 8838 /* 8839 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 8840 * Scheduling Class in this case). 8841 */ 8842 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 8843 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 8844 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 8845 8846 /* 8847 * If op.queue is non-negative, then we're only changing the scheduling 8848 * on a single specified TX queue. 8849 */ 8850 if (p->queue >= 0) { 8851 txq = &sc->sge.txq[vi->first_txq + p->queue]; 8852 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8853 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8854 &fw_class); 8855 goto done; 8856 } 8857 8858 /* 8859 * Change the scheduling on all the TX queues for the 8860 * interface. 8861 */ 8862 for_each_txq(vi, i, txq) { 8863 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8864 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8865 &fw_class); 8866 if (rc) 8867 goto done; 8868 } 8869 8870 rc = 0; 8871done: 8872 end_synchronized_op(sc, 0); 8873 return (rc); 8874} 8875 8876int 8877t4_os_find_pci_capability(struct adapter *sc, int cap) 8878{ 8879 int i; 8880 8881 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8882} 8883 8884int 8885t4_os_pci_save_state(struct adapter *sc) 8886{ 8887 device_t dev; 8888 struct pci_devinfo *dinfo; 8889 8890 dev = sc->dev; 8891 dinfo = device_get_ivars(dev); 8892 8893 pci_cfg_save(dev, dinfo, 0); 8894 return (0); 8895} 8896 8897int 8898t4_os_pci_restore_state(struct adapter *sc) 8899{ 8900 device_t dev; 8901 struct pci_devinfo *dinfo; 8902 8903 dev = sc->dev; 8904 dinfo = device_get_ivars(dev); 8905 8906 pci_cfg_restore(dev, dinfo); 8907 return (0); 8908} 8909 8910void 8911t4_os_portmod_changed(const struct adapter *sc, int idx) 8912{ 8913 struct port_info *pi = sc->port[idx]; 8914 struct vi_info *vi; 8915 struct ifnet *ifp; 8916 int v; 8917 static const char *mod_str[] = { 8918 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8919 }; 8920 8921 for_each_vi(pi, v, vi) { 8922 build_medialist(pi, &vi->media); 8923 } 8924 8925 ifp = pi->vi[0].ifp; 8926 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8927 if_printf(ifp, "transceiver unplugged.\n"); 8928 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8929 if_printf(ifp, "unknown transceiver inserted.\n"); 8930 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8931 if_printf(ifp, "unsupported transceiver inserted.\n"); 8932 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8933 if_printf(ifp, "%s transceiver inserted.\n", 8934 mod_str[pi->mod_type]); 8935 } else { 8936 if_printf(ifp, "transceiver (type %d) inserted.\n", 8937 pi->mod_type); 8938 } 8939} 8940 8941void 8942t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 8943{ 8944 struct port_info *pi = sc->port[idx]; 8945 struct vi_info *vi; 8946 struct ifnet *ifp; 8947 int v; 8948 8949 if (link_stat) 8950 pi->linkdnrc = -1; 8951 else { 8952 if (reason >= 0) 8953 pi->linkdnrc = reason; 8954 } 8955 for_each_vi(pi, v, vi) { 8956 ifp = vi->ifp; 8957 if (ifp == NULL) 8958 continue; 8959 8960 if (link_stat) { 8961 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8962 if_link_state_change(ifp, LINK_STATE_UP); 8963 } else { 8964 if_link_state_change(ifp, LINK_STATE_DOWN); 8965 } 8966 } 8967} 8968 8969void 8970t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8971{ 8972 struct adapter *sc; 8973 8974 sx_slock(&t4_list_lock); 8975 SLIST_FOREACH(sc, &t4_list, link) { 8976 /* 8977 * func should not make any assumptions about what state sc is 8978 * in - the only guarantee is that sc->sc_lock is a valid lock. 8979 */ 8980 func(sc, arg); 8981 } 8982 sx_sunlock(&t4_list_lock); 8983} 8984 8985static int 8986t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8987 struct thread *td) 8988{ 8989 int rc; 8990 struct adapter *sc = dev->si_drv1; 8991 8992 rc = priv_check(td, PRIV_DRIVER); 8993 if (rc != 0) 8994 return (rc); 8995 8996 switch (cmd) { 8997 case CHELSIO_T4_GETREG: { 8998 struct t4_reg *edata = (struct t4_reg *)data; 8999 9000 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9001 return (EFAULT); 9002 9003 if (edata->size == 4) 9004 edata->val = t4_read_reg(sc, edata->addr); 9005 else if (edata->size == 8) 9006 edata->val = t4_read_reg64(sc, edata->addr); 9007 else 9008 return (EINVAL); 9009 9010 break; 9011 } 9012 case CHELSIO_T4_SETREG: { 9013 struct t4_reg *edata = (struct t4_reg *)data; 9014 9015 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9016 return (EFAULT); 9017 9018 if (edata->size == 4) { 9019 if (edata->val & 0xffffffff00000000) 9020 return (EINVAL); 9021 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 9022 } else if (edata->size == 8) 9023 t4_write_reg64(sc, edata->addr, edata->val); 9024 else 9025 return (EINVAL); 9026 break; 9027 } 9028 case CHELSIO_T4_REGDUMP: { 9029 struct t4_regdump *regs = (struct t4_regdump *)data; 9030 int reglen = t4_get_regs_len(sc); 9031 uint8_t *buf; 9032 9033 if (regs->len < reglen) { 9034 regs->len = reglen; /* hint to the caller */ 9035 return (ENOBUFS); 9036 } 9037 9038 regs->len = reglen; 9039 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 9040 get_regs(sc, regs, buf); 9041 rc = copyout(buf, regs->data, reglen); 9042 free(buf, M_CXGBE); 9043 break; 9044 } 9045 case CHELSIO_T4_GET_FILTER_MODE: 9046 rc = get_filter_mode(sc, (uint32_t *)data); 9047 break; 9048 case CHELSIO_T4_SET_FILTER_MODE: 9049 rc = set_filter_mode(sc, *(uint32_t *)data); 9050 break; 9051 case CHELSIO_T4_GET_FILTER: 9052 rc = get_filter(sc, (struct t4_filter *)data); 9053 break; 9054 case CHELSIO_T4_SET_FILTER: 9055 rc = set_filter(sc, (struct t4_filter *)data); 9056 break; 9057 case CHELSIO_T4_DEL_FILTER: 9058 rc = del_filter(sc, (struct t4_filter *)data); 9059 break; 9060 case CHELSIO_T4_GET_SGE_CONTEXT: 9061 rc = get_sge_context(sc, (struct t4_sge_context *)data); 9062 break; 9063 case CHELSIO_T4_LOAD_FW: 9064 rc = load_fw(sc, (struct t4_data *)data); 9065 break; 9066 case CHELSIO_T4_GET_MEM: 9067 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 9068 break; 9069 case CHELSIO_T4_GET_I2C: 9070 rc = read_i2c(sc, (struct t4_i2c_data *)data); 9071 break; 9072 case CHELSIO_T4_CLEAR_STATS: { 9073 int i, v; 9074 u_int port_id = *(uint32_t *)data; 9075 struct port_info *pi; 9076 struct vi_info *vi; 9077 9078 if (port_id >= sc->params.nports) 9079 return (EINVAL); 9080 pi = sc->port[port_id]; 9081 if (pi == NULL) 9082 return (EIO); 9083 9084 /* MAC stats */ 9085 t4_clr_port_stats(sc, pi->tx_chan); 9086 pi->tx_parse_error = 0; 9087 mtx_lock(&sc->reg_lock); 9088 for_each_vi(pi, v, vi) { 9089 if (vi->flags & VI_INIT_DONE) 9090 t4_clr_vi_stats(sc, vi->viid); 9091 } 9092 mtx_unlock(&sc->reg_lock); 9093 9094 /* 9095 * Since this command accepts a port, clear stats for 9096 * all VIs on this port. 9097 */ 9098 for_each_vi(pi, v, vi) { 9099 if (vi->flags & VI_INIT_DONE) { 9100 struct sge_rxq *rxq; 9101 struct sge_txq *txq; 9102 struct sge_wrq *wrq; 9103 9104 for_each_rxq(vi, i, rxq) { 9105#if defined(INET) || defined(INET6) 9106 rxq->lro.lro_queued = 0; 9107 rxq->lro.lro_flushed = 0; 9108#endif 9109 rxq->rxcsum = 0; 9110 rxq->vlan_extraction = 0; 9111 } 9112 9113 for_each_txq(vi, i, txq) { 9114 txq->txcsum = 0; 9115 txq->tso_wrs = 0; 9116 txq->vlan_insertion = 0; 9117 txq->imm_wrs = 0; 9118 txq->sgl_wrs = 0; 9119 txq->txpkt_wrs = 0; 9120 txq->txpkts0_wrs = 0; 9121 txq->txpkts1_wrs = 0; 9122 txq->txpkts0_pkts = 0; 9123 txq->txpkts1_pkts = 0; 9124 mp_ring_reset_stats(txq->r); 9125 } 9126 9127#ifdef TCP_OFFLOAD 9128 /* nothing to clear for each ofld_rxq */ 9129 9130 for_each_ofld_txq(vi, i, wrq) { 9131 wrq->tx_wrs_direct = 0; 9132 wrq->tx_wrs_copied = 0; 9133 } 9134#endif 9135 9136 if (IS_MAIN_VI(vi)) { 9137 wrq = &sc->sge.ctrlq[pi->port_id]; 9138 wrq->tx_wrs_direct = 0; 9139 wrq->tx_wrs_copied = 0; 9140 } 9141 } 9142 } 9143 break; 9144 } 9145 case CHELSIO_T4_SCHED_CLASS: 9146 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 9147 break; 9148 case CHELSIO_T4_SCHED_QUEUE: 9149 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 9150 break; 9151 case CHELSIO_T4_GET_TRACER: 9152 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 9153 break; 9154 case CHELSIO_T4_SET_TRACER: 9155 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 9156 break; 9157 default: 9158 rc = ENOTTY; 9159 } 9160 9161 return (rc); 9162} 9163 9164void 9165t4_db_full(struct adapter *sc) 9166{ 9167 9168 CXGBE_UNIMPLEMENTED(__func__); 9169} 9170 9171void 9172t4_db_dropped(struct adapter *sc) 9173{ 9174 9175 CXGBE_UNIMPLEMENTED(__func__); 9176} 9177 9178#ifdef TCP_OFFLOAD 9179void 9180t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 9181{ 9182 9183 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 9184 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 9185 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 9186 V_HPZ3(pgsz_order[3])); 9187} 9188 9189static int 9190toe_capability(struct vi_info *vi, int enable) 9191{ 9192 int rc; 9193 struct port_info *pi = vi->pi; 9194 struct adapter *sc = pi->adapter; 9195 9196 ASSERT_SYNCHRONIZED_OP(sc); 9197 9198 if (!is_offload(sc)) 9199 return (ENODEV); 9200 9201 if (enable) { 9202 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 9203 /* TOE is already enabled. */ 9204 return (0); 9205 } 9206 9207 /* 9208 * We need the port's queues around so that we're able to send 9209 * and receive CPLs to/from the TOE even if the ifnet for this 9210 * port has never been UP'd administratively. 9211 */ 9212 if (!(vi->flags & VI_INIT_DONE)) { 9213 rc = vi_full_init(vi); 9214 if (rc) 9215 return (rc); 9216 } 9217 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 9218 rc = vi_full_init(&pi->vi[0]); 9219 if (rc) 9220 return (rc); 9221 } 9222 9223 if (isset(&sc->offload_map, pi->port_id)) { 9224 /* TOE is enabled on another VI of this port. */ 9225 pi->uld_vis++; 9226 return (0); 9227 } 9228 9229 if (!uld_active(sc, ULD_TOM)) { 9230 rc = t4_activate_uld(sc, ULD_TOM); 9231 if (rc == EAGAIN) { 9232 log(LOG_WARNING, 9233 "You must kldload t4_tom.ko before trying " 9234 "to enable TOE on a cxgbe interface.\n"); 9235 } 9236 if (rc != 0) 9237 return (rc); 9238 KASSERT(sc->tom_softc != NULL, 9239 ("%s: TOM activated but softc NULL", __func__)); 9240 KASSERT(uld_active(sc, ULD_TOM), 9241 ("%s: TOM activated but flag not set", __func__)); 9242 } 9243 9244 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 9245 if (!uld_active(sc, ULD_IWARP)) 9246 (void) t4_activate_uld(sc, ULD_IWARP); 9247 if (!uld_active(sc, ULD_ISCSI)) 9248 (void) t4_activate_uld(sc, ULD_ISCSI); 9249 9250 pi->uld_vis++; 9251 setbit(&sc->offload_map, pi->port_id); 9252 } else { 9253 pi->uld_vis--; 9254 9255 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 9256 return (0); 9257 9258 KASSERT(uld_active(sc, ULD_TOM), 9259 ("%s: TOM never initialized?", __func__)); 9260 clrbit(&sc->offload_map, pi->port_id); 9261 } 9262 9263 return (0); 9264} 9265 9266/* 9267 * Add an upper layer driver to the global list. 9268 */ 9269int 9270t4_register_uld(struct uld_info *ui) 9271{ 9272 int rc = 0; 9273 struct uld_info *u; 9274 9275 sx_xlock(&t4_uld_list_lock); 9276 SLIST_FOREACH(u, &t4_uld_list, link) { 9277 if (u->uld_id == ui->uld_id) { 9278 rc = EEXIST; 9279 goto done; 9280 } 9281 } 9282 9283 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 9284 ui->refcount = 0; 9285done: 9286 sx_xunlock(&t4_uld_list_lock); 9287 return (rc); 9288} 9289 9290int 9291t4_unregister_uld(struct uld_info *ui) 9292{ 9293 int rc = EINVAL; 9294 struct uld_info *u; 9295 9296 sx_xlock(&t4_uld_list_lock); 9297 9298 SLIST_FOREACH(u, &t4_uld_list, link) { 9299 if (u == ui) { 9300 if (ui->refcount > 0) { 9301 rc = EBUSY; 9302 goto done; 9303 } 9304 9305 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 9306 rc = 0; 9307 goto done; 9308 } 9309 } 9310done: 9311 sx_xunlock(&t4_uld_list_lock); 9312 return (rc); 9313} 9314 9315int 9316t4_activate_uld(struct adapter *sc, int id) 9317{ 9318 int rc; 9319 struct uld_info *ui; 9320 9321 ASSERT_SYNCHRONIZED_OP(sc); 9322 9323 if (id < 0 || id > ULD_MAX) 9324 return (EINVAL); 9325 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 9326 9327 sx_slock(&t4_uld_list_lock); 9328 9329 SLIST_FOREACH(ui, &t4_uld_list, link) { 9330 if (ui->uld_id == id) { 9331 if (!(sc->flags & FULL_INIT_DONE)) { 9332 rc = adapter_full_init(sc); 9333 if (rc != 0) 9334 break; 9335 } 9336 9337 rc = ui->activate(sc); 9338 if (rc == 0) { 9339 setbit(&sc->active_ulds, id); 9340 ui->refcount++; 9341 } 9342 break; 9343 } 9344 } 9345 9346 sx_sunlock(&t4_uld_list_lock); 9347 9348 return (rc); 9349} 9350 9351int 9352t4_deactivate_uld(struct adapter *sc, int id) 9353{ 9354 int rc; 9355 struct uld_info *ui; 9356 9357 ASSERT_SYNCHRONIZED_OP(sc); 9358 9359 if (id < 0 || id > ULD_MAX) 9360 return (EINVAL); 9361 rc = ENXIO; 9362 9363 sx_slock(&t4_uld_list_lock); 9364 9365 SLIST_FOREACH(ui, &t4_uld_list, link) { 9366 if (ui->uld_id == id) { 9367 rc = ui->deactivate(sc); 9368 if (rc == 0) { 9369 clrbit(&sc->active_ulds, id); 9370 ui->refcount--; 9371 } 9372 break; 9373 } 9374 } 9375 9376 sx_sunlock(&t4_uld_list_lock); 9377 9378 return (rc); 9379} 9380 9381int 9382uld_active(struct adapter *sc, int uld_id) 9383{ 9384 9385 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9386 9387 return (isset(&sc->active_ulds, uld_id)); 9388} 9389#endif 9390 9391/* 9392 * Come up with reasonable defaults for some of the tunables, provided they're 9393 * not set by the user (in which case we'll use the values as is). 9394 */ 9395static void 9396tweak_tunables(void) 9397{ 9398 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9399 9400 if (t4_ntxq10g < 1) { 9401#ifdef RSS 9402 t4_ntxq10g = rss_getnumbuckets(); 9403#else 9404 t4_ntxq10g = min(nc, NTXQ_10G); 9405#endif 9406 } 9407 9408 if (t4_ntxq1g < 1) { 9409#ifdef RSS 9410 /* XXX: way too many for 1GbE? */ 9411 t4_ntxq1g = rss_getnumbuckets(); 9412#else 9413 t4_ntxq1g = min(nc, NTXQ_1G); 9414#endif 9415 } 9416 9417 if (t4_ntxq_vi < 1) 9418 t4_ntxq_vi = min(nc, NTXQ_VI); 9419 9420 if (t4_nrxq10g < 1) { 9421#ifdef RSS 9422 t4_nrxq10g = rss_getnumbuckets(); 9423#else 9424 t4_nrxq10g = min(nc, NRXQ_10G); 9425#endif 9426 } 9427 9428 if (t4_nrxq1g < 1) { 9429#ifdef RSS 9430 /* XXX: way too many for 1GbE? */ 9431 t4_nrxq1g = rss_getnumbuckets(); 9432#else 9433 t4_nrxq1g = min(nc, NRXQ_1G); 9434#endif 9435 } 9436 9437 if (t4_nrxq_vi < 1) 9438 t4_nrxq_vi = min(nc, NRXQ_VI); 9439 9440#ifdef TCP_OFFLOAD 9441 if (t4_nofldtxq10g < 1) 9442 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 9443 9444 if (t4_nofldtxq1g < 1) 9445 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 9446 9447 if (t4_nofldtxq_vi < 1) 9448 t4_nofldtxq_vi = min(nc, NOFLDTXQ_VI); 9449 9450 if (t4_nofldrxq10g < 1) 9451 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 9452 9453 if (t4_nofldrxq1g < 1) 9454 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 9455 9456 if (t4_nofldrxq_vi < 1) 9457 t4_nofldrxq_vi = min(nc, NOFLDRXQ_VI); 9458 9459 if (t4_toecaps_allowed == -1) 9460 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9461 9462 if (t4_rdmacaps_allowed == -1) { 9463 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9464 FW_CAPS_CONFIG_RDMA_RDMAC; 9465 } 9466 9467 if (t4_iscsicaps_allowed == -1) { 9468 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9469 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9470 FW_CAPS_CONFIG_ISCSI_T10DIF; 9471 } 9472#else 9473 if (t4_toecaps_allowed == -1) 9474 t4_toecaps_allowed = 0; 9475 9476 if (t4_rdmacaps_allowed == -1) 9477 t4_rdmacaps_allowed = 0; 9478 9479 if (t4_iscsicaps_allowed == -1) 9480 t4_iscsicaps_allowed = 0; 9481#endif 9482 9483#ifdef DEV_NETMAP 9484 if (t4_nnmtxq_vi < 1) 9485 t4_nnmtxq_vi = min(nc, NNMTXQ_VI); 9486 9487 if (t4_nnmrxq_vi < 1) 9488 t4_nnmrxq_vi = min(nc, NNMRXQ_VI); 9489#endif 9490 9491 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9492 t4_tmr_idx_10g = TMR_IDX_10G; 9493 9494 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9495 t4_pktc_idx_10g = PKTC_IDX_10G; 9496 9497 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9498 t4_tmr_idx_1g = TMR_IDX_1G; 9499 9500 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9501 t4_pktc_idx_1g = PKTC_IDX_1G; 9502 9503 if (t4_qsize_txq < 128) 9504 t4_qsize_txq = 128; 9505 9506 if (t4_qsize_rxq < 128) 9507 t4_qsize_rxq = 128; 9508 while (t4_qsize_rxq & 7) 9509 t4_qsize_rxq++; 9510 9511 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9512} 9513 9514#ifdef DDB 9515static void 9516t4_dump_tcb(struct adapter *sc, int tid) 9517{ 9518 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 9519 9520 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 9521 save = t4_read_reg(sc, reg); 9522 base = sc->memwin[2].mw_base; 9523 9524 /* Dump TCB for the tid */ 9525 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 9526 tcb_addr += tid * TCB_SIZE; 9527 9528 if (is_t4(sc)) { 9529 pf = 0; 9530 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 9531 } else { 9532 pf = V_PFNUM(sc->pf); 9533 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 9534 } 9535 t4_write_reg(sc, reg, win_pos | pf); 9536 t4_read_reg(sc, reg); 9537 9538 off = tcb_addr - win_pos; 9539 for (i = 0; i < 4; i++) { 9540 uint32_t buf[8]; 9541 for (j = 0; j < 8; j++, off += 4) 9542 buf[j] = htonl(t4_read_reg(sc, base + off)); 9543 9544 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 9545 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 9546 buf[7]); 9547 } 9548 9549 t4_write_reg(sc, reg, save); 9550 t4_read_reg(sc, reg); 9551} 9552 9553static void 9554t4_dump_devlog(struct adapter *sc) 9555{ 9556 struct devlog_params *dparams = &sc->params.devlog; 9557 struct fw_devlog_e e; 9558 int i, first, j, m, nentries, rc; 9559 uint64_t ftstamp = UINT64_MAX; 9560 9561 if (dparams->start == 0) { 9562 db_printf("devlog params not valid\n"); 9563 return; 9564 } 9565 9566 nentries = dparams->size / sizeof(struct fw_devlog_e); 9567 m = fwmtype_to_hwmtype(dparams->memtype); 9568 9569 /* Find the first entry. */ 9570 first = -1; 9571 for (i = 0; i < nentries && !db_pager_quit; i++) { 9572 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9573 sizeof(e), (void *)&e); 9574 if (rc != 0) 9575 break; 9576 9577 if (e.timestamp == 0) 9578 break; 9579 9580 e.timestamp = be64toh(e.timestamp); 9581 if (e.timestamp < ftstamp) { 9582 ftstamp = e.timestamp; 9583 first = i; 9584 } 9585 } 9586 9587 if (first == -1) 9588 return; 9589 9590 i = first; 9591 do { 9592 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9593 sizeof(e), (void *)&e); 9594 if (rc != 0) 9595 return; 9596 9597 if (e.timestamp == 0) 9598 return; 9599 9600 e.timestamp = be64toh(e.timestamp); 9601 e.seqno = be32toh(e.seqno); 9602 for (j = 0; j < 8; j++) 9603 e.params[j] = be32toh(e.params[j]); 9604 9605 db_printf("%10d %15ju %8s %8s ", 9606 e.seqno, e.timestamp, 9607 (e.level < nitems(devlog_level_strings) ? 9608 devlog_level_strings[e.level] : "UNKNOWN"), 9609 (e.facility < nitems(devlog_facility_strings) ? 9610 devlog_facility_strings[e.facility] : "UNKNOWN")); 9611 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 9612 e.params[3], e.params[4], e.params[5], e.params[6], 9613 e.params[7]); 9614 9615 if (++i == nentries) 9616 i = 0; 9617 } while (i != first && !db_pager_quit); 9618} 9619 9620static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 9621_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 9622 9623DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 9624{ 9625 device_t dev; 9626 int t; 9627 bool valid; 9628 9629 valid = false; 9630 t = db_read_token(); 9631 if (t == tIDENT) { 9632 dev = device_lookup_by_name(db_tok_string); 9633 valid = true; 9634 } 9635 db_skip_to_eol(); 9636 if (!valid) { 9637 db_printf("usage: show t4 devlog <nexus>\n"); 9638 return; 9639 } 9640 9641 if (dev == NULL) { 9642 db_printf("device not found\n"); 9643 return; 9644 } 9645 9646 t4_dump_devlog(device_get_softc(dev)); 9647} 9648 9649DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 9650{ 9651 device_t dev; 9652 int radix, tid, t; 9653 bool valid; 9654 9655 valid = false; 9656 radix = db_radix; 9657 db_radix = 10; 9658 t = db_read_token(); 9659 if (t == tIDENT) { 9660 dev = device_lookup_by_name(db_tok_string); 9661 t = db_read_token(); 9662 if (t == tNUMBER) { 9663 tid = db_tok_number; 9664 valid = true; 9665 } 9666 } 9667 db_radix = radix; 9668 db_skip_to_eol(); 9669 if (!valid) { 9670 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 9671 return; 9672 } 9673 9674 if (dev == NULL) { 9675 db_printf("device not found\n"); 9676 return; 9677 } 9678 if (tid < 0) { 9679 db_printf("invalid tid\n"); 9680 return; 9681 } 9682 9683 t4_dump_tcb(device_get_softc(dev), tid); 9684} 9685#endif 9686 9687static struct sx mlu; /* mod load unload */ 9688SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 9689 9690static int 9691mod_event(module_t mod, int cmd, void *arg) 9692{ 9693 int rc = 0; 9694 static int loaded = 0; 9695 9696 switch (cmd) { 9697 case MOD_LOAD: 9698 sx_xlock(&mlu); 9699 if (loaded++ == 0) { 9700 t4_sge_modload(); 9701 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 9702 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 9703 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 9704 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 9705 sx_init(&t4_list_lock, "T4/T5 adapters"); 9706 SLIST_INIT(&t4_list); 9707#ifdef TCP_OFFLOAD 9708 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 9709 SLIST_INIT(&t4_uld_list); 9710#endif 9711 t4_tracer_modload(); 9712 tweak_tunables(); 9713 } 9714 sx_xunlock(&mlu); 9715 break; 9716 9717 case MOD_UNLOAD: 9718 sx_xlock(&mlu); 9719 if (--loaded == 0) { 9720 int tries; 9721 9722 sx_slock(&t4_list_lock); 9723 if (!SLIST_EMPTY(&t4_list)) { 9724 rc = EBUSY; 9725 sx_sunlock(&t4_list_lock); 9726 goto done_unload; 9727 } 9728#ifdef TCP_OFFLOAD 9729 sx_slock(&t4_uld_list_lock); 9730 if (!SLIST_EMPTY(&t4_uld_list)) { 9731 rc = EBUSY; 9732 sx_sunlock(&t4_uld_list_lock); 9733 sx_sunlock(&t4_list_lock); 9734 goto done_unload; 9735 } 9736#endif 9737 tries = 0; 9738 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 9739 uprintf("%ju clusters with custom free routine " 9740 "still is use.\n", t4_sge_extfree_refs()); 9741 pause("t4unload", 2 * hz); 9742 } 9743#ifdef TCP_OFFLOAD 9744 sx_sunlock(&t4_uld_list_lock); 9745#endif 9746 sx_sunlock(&t4_list_lock); 9747 9748 if (t4_sge_extfree_refs() == 0) { 9749 t4_tracer_modunload(); 9750#ifdef TCP_OFFLOAD 9751 sx_destroy(&t4_uld_list_lock); 9752#endif 9753 sx_destroy(&t4_list_lock); 9754 t4_sge_modunload(); 9755 loaded = 0; 9756 } else { 9757 rc = EBUSY; 9758 loaded++; /* undo earlier decrement */ 9759 } 9760 } 9761done_unload: 9762 sx_xunlock(&mlu); 9763 break; 9764 } 9765 9766 return (rc); 9767} 9768 9769static devclass_t t4_devclass, t5_devclass, t6_devclass; 9770static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; 9771static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; 9772 9773DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 9774MODULE_VERSION(t4nex, 1); 9775MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 9776#ifdef DEV_NETMAP 9777MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 9778#endif /* DEV_NETMAP */ 9779 9780DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 9781MODULE_VERSION(t5nex, 1); 9782MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 9783#ifdef DEV_NETMAP 9784MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 9785#endif /* DEV_NETMAP */ 9786 9787DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); 9788MODULE_VERSION(t6nex, 1); 9789MODULE_DEPEND(t6nex, firmware, 1, 1, 1); 9790#ifdef DEV_NETMAP 9791MODULE_DEPEND(t6nex, netmap, 1, 1, 1); 9792#endif /* DEV_NETMAP */ 9793 9794DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 9795MODULE_VERSION(cxgbe, 1); 9796 9797DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 9798MODULE_VERSION(cxl, 1); 9799 9800DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); 9801MODULE_VERSION(cc, 1); 9802 9803DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 9804MODULE_VERSION(vcxgbe, 1); 9805 9806DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 9807MODULE_VERSION(vcxl, 1); 9808 9809DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); 9810MODULE_VERSION(vcc, 1); 9811