t4_sge.c revision 284052
1219019Sgabor/*- 2219019Sgabor * Copyright (c) 2011 Chelsio Communications, Inc. 3219019Sgabor * All rights reserved. 4219019Sgabor * Written by: Navdeep Parhar <np@FreeBSD.org> 5219019Sgabor * 6219019Sgabor * Redistribution and use in source and binary forms, with or without 7219019Sgabor * modification, are permitted provided that the following conditions 8219019Sgabor * are met: 9219019Sgabor * 1. Redistributions of source code must retain the above copyright 10219019Sgabor * notice, this list of conditions and the following disclaimer. 11219019Sgabor * 2. Redistributions in binary form must reproduce the above copyright 12219019Sgabor * notice, this list of conditions and the following disclaimer in the 13219019Sgabor * documentation and/or other materials provided with the distribution. 14219019Sgabor * 15219019Sgabor * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16219019Sgabor * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17219019Sgabor * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18219019Sgabor * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19219019Sgabor * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20219019Sgabor * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21219019Sgabor * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22219019Sgabor * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23219019Sgabor * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24219019Sgabor * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25219019Sgabor * SUCH DAMAGE. 26219019Sgabor */ 27219019Sgabor 28219019Sgabor#include <sys/cdefs.h> 29219019Sgabor__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_sge.c 284052 2015-06-06 09:28:40Z np $"); 30219019Sgabor 31219019Sgabor#include "opt_inet.h" 32219019Sgabor#include "opt_inet6.h" 33219019Sgabor 34219019Sgabor#include <sys/types.h> 35219019Sgabor#include <sys/mbuf.h> 36219019Sgabor#include <sys/socket.h> 37219019Sgabor#include <sys/kernel.h> 38219019Sgabor#include <sys/malloc.h> 39219019Sgabor#include <sys/queue.h> 40219019Sgabor#include <sys/sbuf.h> 41219019Sgabor#include <sys/taskqueue.h> 42219019Sgabor#include <sys/time.h> 43219019Sgabor#include <sys/sglist.h> 44219019Sgabor#include <sys/sysctl.h> 45219019Sgabor#include <sys/smp.h> 46219019Sgabor#include <sys/counter.h> 47219019Sgabor#include <net/bpf.h> 48219019Sgabor#include <net/ethernet.h> 49219019Sgabor#include <net/if.h> 50219019Sgabor#include <net/if_vlan_var.h> 51219019Sgabor#include <netinet/in.h> 52219019Sgabor#include <netinet/ip.h> 53219019Sgabor#include <netinet/ip6.h> 54219019Sgabor#include <netinet/tcp.h> 55219019Sgabor#include <machine/md_var.h> 56219019Sgabor#include <vm/vm.h> 57219019Sgabor#include <vm/pmap.h> 58219019Sgabor#ifdef DEV_NETMAP 59219019Sgabor#include <machine/bus.h> 60219019Sgabor#include <sys/selinfo.h> 61219019Sgabor#include <net/if_var.h> 62219019Sgabor#include <net/netmap.h> 63219019Sgabor#include <dev/netmap/netmap_kern.h> 64219019Sgabor#endif 65219019Sgabor 66219019Sgabor#include "common/common.h" 67219019Sgabor#include "common/t4_regs.h" 68219019Sgabor#include "common/t4_regs_values.h" 69219019Sgabor#include "common/t4_msg.h" 70219019Sgabor#include "t4_mp_ring.h" 71219019Sgabor 72219019Sgabor#ifdef T4_PKT_TIMESTAMP 73219019Sgabor#define RX_COPY_THRESHOLD (MINCLSIZE - 8) 74219019Sgabor#else 75219019Sgabor#define RX_COPY_THRESHOLD MINCLSIZE 76219019Sgabor#endif 77219019Sgabor 78219019Sgabor/* 79219019Sgabor * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 80219019Sgabor * 0-7 are valid values. 81219019Sgabor */ 82219019Sgaborint fl_pktshift = 2; 83219019SgaborTUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift); 84219019Sgabor 85219019Sgabor/* 86219019Sgabor * Pad ethernet payload up to this boundary. 87219019Sgabor * -1: driver should figure out a good value. 88219019Sgabor * 0: disable padding. 89219019Sgabor * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 90219019Sgabor */ 91219019Sgaborint fl_pad = -1; 92219019SgaborTUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad); 93219019Sgabor 94219019Sgabor/* 95219019Sgabor * Status page length. 96219019Sgabor * -1: driver should figure out a good value. 97219019Sgabor * 64 or 128 are the only other valid values. 98219019Sgabor */ 99219019Sgaborint spg_len = -1; 100219019SgaborTUNABLE_INT("hw.cxgbe.spg_len", &spg_len); 101219019Sgabor 102219019Sgabor/* 103219019Sgabor * Congestion drops. 104219019Sgabor * -1: no congestion feedback (not recommended). 105219019Sgabor * 0: backpressure the channel instead of dropping packets right away. 106219019Sgabor * 1: no backpressure, drop packets for the congested queue immediately. 107219019Sgabor */ 108219019Sgaborstatic int cong_drop = 0; 109219019SgaborTUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop); 110219019Sgabor 111219019Sgabor/* 112219019Sgabor * Deliver multiple frames in the same free list buffer if they fit. 113219019Sgabor * -1: let the driver decide whether to enable buffer packing or not. 114219019Sgabor * 0: disable buffer packing. 115219019Sgabor * 1: enable buffer packing. 116219019Sgabor */ 117219019Sgaborstatic int buffer_packing = -1; 118219019SgaborTUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing); 119219019Sgabor 120219019Sgabor/* 121219019Sgabor * Start next frame in a packed buffer at this boundary. 122219019Sgabor * -1: driver should figure out a good value. 123219019Sgabor * T4: driver will ignore this and use the same value as fl_pad above. 124219019Sgabor * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 125219019Sgabor */ 126219019Sgaborstatic int fl_pack = -1; 127219019SgaborTUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack); 128219019Sgabor 129219019Sgabor/* 130219019Sgabor * Allow the driver to create mbuf(s) in a cluster allocated for rx. 131219019Sgabor * 0: never; always allocate mbufs from the zone_mbuf UMA zone. 132219019Sgabor * 1: ok to create mbuf(s) within a cluster if there is room. 133219019Sgabor */ 134219019Sgaborstatic int allow_mbufs_in_cluster = 1; 135219019SgaborTUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster); 136219019Sgabor 137219019Sgabor/* 138219019Sgabor * Largest rx cluster size that the driver is allowed to allocate. 139219019Sgabor */ 140219019Sgaborstatic int largest_rx_cluster = MJUM16BYTES; 141219019SgaborTUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster); 142219019Sgabor 143219019Sgabor/* 144219019Sgabor * Size of cluster allocation that's most likely to succeed. The driver will 145219019Sgabor * fall back to this size if it fails to allocate clusters larger than this. 146219019Sgabor */ 147219019Sgaborstatic int safest_rx_cluster = PAGE_SIZE; 148219019SgaborTUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster); 149219019Sgabor 150219019Sgaborstruct txpkts { 151219019Sgabor u_int wr_type; /* type 0 or type 1 */ 152219019Sgabor u_int npkt; /* # of packets in this work request */ 153219019Sgabor u_int plen; /* total payload (sum of all packets) */ 154219019Sgabor u_int len16; /* # of 16B pieces used by this work request */ 155219019Sgabor}; 156219019Sgabor 157219019Sgabor/* A packet's SGL. This + m_pkthdr has all info needed for tx */ 158219019Sgaborstruct sgl { 159219019Sgabor struct sglist sg; 160219019Sgabor struct sglist_seg seg[TX_SGL_SEGS]; 161219019Sgabor}; 162219019Sgabor 163219019Sgaborstatic int service_iq(struct sge_iq *, int); 164219019Sgaborstatic struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 165219019Sgaborstatic int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); 166219019Sgaborstatic inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 167219019Sgaborstatic inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 168219019Sgaborstatic inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t, 169219019Sgabor char *); 170219019Sgaborstatic int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 171219019Sgabor bus_addr_t *, void **); 172219019Sgaborstatic int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 173219019Sgabor void *); 174219019Sgaborstatic int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 175219019Sgabor int, int); 176219019Sgaborstatic int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 177219019Sgaborstatic void add_fl_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 178219019Sgabor struct sge_fl *); 179219019Sgaborstatic int alloc_fwq(struct adapter *); 180219019Sgaborstatic int free_fwq(struct adapter *); 181219019Sgaborstatic int alloc_mgmtq(struct adapter *); 182219019Sgaborstatic int free_mgmtq(struct adapter *); 183219019Sgaborstatic int alloc_rxq(struct port_info *, struct sge_rxq *, int, int, 184219019Sgabor struct sysctl_oid *); 185219019Sgaborstatic int free_rxq(struct port_info *, struct sge_rxq *); 186219019Sgabor#ifdef TCP_OFFLOAD 187219019Sgaborstatic int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int, 188219019Sgabor struct sysctl_oid *); 189219019Sgaborstatic int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *); 190219019Sgabor#endif 191219019Sgabor#ifdef DEV_NETMAP 192219019Sgaborstatic int alloc_nm_rxq(struct port_info *, struct sge_nm_rxq *, int, int, 193219019Sgabor struct sysctl_oid *); 194219019Sgaborstatic int free_nm_rxq(struct port_info *, struct sge_nm_rxq *); 195219019Sgaborstatic int alloc_nm_txq(struct port_info *, struct sge_nm_txq *, int, int, 196219019Sgabor struct sysctl_oid *); 197219019Sgaborstatic int free_nm_txq(struct port_info *, struct sge_nm_txq *); 198219019Sgabor#endif 199219019Sgaborstatic int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 200219019Sgaborstatic int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 201219019Sgabor#ifdef TCP_OFFLOAD 202219019Sgaborstatic int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 203219019Sgabor#endif 204219019Sgaborstatic int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *); 205219019Sgaborstatic int free_eq(struct adapter *, struct sge_eq *); 206219019Sgaborstatic int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *, 207219019Sgabor struct sysctl_oid *); 208219019Sgaborstatic int free_wrq(struct adapter *, struct sge_wrq *); 209219019Sgaborstatic int alloc_txq(struct port_info *, struct sge_txq *, int, 210219019Sgabor struct sysctl_oid *); 211219019Sgaborstatic int free_txq(struct port_info *, struct sge_txq *); 212static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 213static inline void ring_fl_db(struct adapter *, struct sge_fl *); 214static int refill_fl(struct adapter *, struct sge_fl *, int); 215static void refill_sfl(void *); 216static int alloc_fl_sdesc(struct sge_fl *); 217static void free_fl_sdesc(struct adapter *, struct sge_fl *); 218static void find_best_refill_source(struct adapter *, struct sge_fl *, int); 219static void find_safe_refill_source(struct adapter *, struct sge_fl *); 220static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 221 222static inline void get_pkt_gl(struct mbuf *, struct sglist *); 223static inline u_int txpkt_len16(u_int, u_int); 224static inline u_int txpkts0_len16(u_int); 225static inline u_int txpkts1_len16(void); 226static u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *, 227 struct mbuf *, u_int); 228static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int); 229static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int); 230static u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *, 231 struct mbuf *, const struct txpkts *, u_int); 232static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 233static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 234static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 235static inline uint16_t read_hw_cidx(struct sge_eq *); 236static inline u_int reclaimable_tx_desc(struct sge_eq *); 237static inline u_int total_available_tx_desc(struct sge_eq *); 238static u_int reclaim_tx_descs(struct sge_txq *, u_int); 239static void tx_reclaim(void *, int); 240static __be64 get_flit(struct sglist_seg *, int, int); 241static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 242 struct mbuf *); 243static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 244 struct mbuf *); 245static void wrq_tx_drain(void *, int); 246static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 247 248static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 249static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 250 251static counter_u64_t extfree_refs; 252static counter_u64_t extfree_rels; 253 254/* 255 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 256 */ 257void 258t4_sge_modload(void) 259{ 260 261 if (fl_pktshift < 0 || fl_pktshift > 7) { 262 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 263 " using 2 instead.\n", fl_pktshift); 264 fl_pktshift = 2; 265 } 266 267 if (spg_len != 64 && spg_len != 128) { 268 int len; 269 270#if defined(__i386__) || defined(__amd64__) 271 len = cpu_clflush_line_size > 64 ? 128 : 64; 272#else 273 len = 64; 274#endif 275 if (spg_len != -1) { 276 printf("Invalid hw.cxgbe.spg_len value (%d)," 277 " using %d instead.\n", spg_len, len); 278 } 279 spg_len = len; 280 } 281 282 if (cong_drop < -1 || cong_drop > 1) { 283 printf("Invalid hw.cxgbe.cong_drop value (%d)," 284 " using 0 instead.\n", cong_drop); 285 cong_drop = 0; 286 } 287 288 extfree_refs = counter_u64_alloc(M_WAITOK); 289 extfree_rels = counter_u64_alloc(M_WAITOK); 290 counter_u64_zero(extfree_refs); 291 counter_u64_zero(extfree_rels); 292} 293 294void 295t4_sge_modunload(void) 296{ 297 298 counter_u64_free(extfree_refs); 299 counter_u64_free(extfree_rels); 300} 301 302uint64_t 303t4_sge_extfree_refs(void) 304{ 305 uint64_t refs, rels; 306 307 rels = counter_u64_fetch(extfree_rels); 308 refs = counter_u64_fetch(extfree_refs); 309 310 return (refs - rels); 311} 312 313void 314t4_init_sge_cpl_handlers(struct adapter *sc) 315{ 316 317 t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg); 318 t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg); 319 t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 320 t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx); 321 t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 322} 323 324static inline void 325setup_pad_and_pack_boundaries(struct adapter *sc) 326{ 327 uint32_t v, m; 328 int pad, pack; 329 330 pad = fl_pad; 331 if (fl_pad < 32 || fl_pad > 4096 || !powerof2(fl_pad)) { 332 /* 333 * If there is any chance that we might use buffer packing and 334 * the chip is a T4, then pick 64 as the pad/pack boundary. Set 335 * it to 32 in all other cases. 336 */ 337 pad = is_t4(sc) && buffer_packing ? 64 : 32; 338 339 /* 340 * For fl_pad = 0 we'll still write a reasonable value to the 341 * register but all the freelists will opt out of padding. 342 * We'll complain here only if the user tried to set it to a 343 * value greater than 0 that was invalid. 344 */ 345 if (fl_pad > 0) { 346 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 347 " (%d), using %d instead.\n", fl_pad, pad); 348 } 349 } 350 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 351 v = V_INGPADBOUNDARY(ilog2(pad) - 5); 352 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 353 354 if (is_t4(sc)) { 355 if (fl_pack != -1 && fl_pack != pad) { 356 /* Complain but carry on. */ 357 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 358 " using %d instead.\n", fl_pack, pad); 359 } 360 return; 361 } 362 363 pack = fl_pack; 364 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 365 !powerof2(fl_pack)) { 366 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 367 MPASS(powerof2(pack)); 368 if (pack < 16) 369 pack = 16; 370 if (pack == 32) 371 pack = 64; 372 if (pack > 4096) 373 pack = 4096; 374 if (fl_pack != -1) { 375 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 376 " (%d), using %d instead.\n", fl_pack, pack); 377 } 378 } 379 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 380 if (pack == 16) 381 v = V_INGPACKBOUNDARY(0); 382 else 383 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 384 385 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 386 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 387} 388 389/* 390 * adap->params.vpd.cclk must be set up before this is called. 391 */ 392void 393t4_tweak_chip_settings(struct adapter *sc) 394{ 395 int i; 396 uint32_t v, m; 397 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 398 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 399 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 400 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 401 static int sge_flbuf_sizes[] = { 402 MCLBYTES, 403#if MJUMPAGESIZE != MCLBYTES 404 MJUMPAGESIZE, 405 MJUMPAGESIZE - CL_METADATA_SIZE, 406 MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, 407#endif 408 MJUM9BYTES, 409 MJUM16BYTES, 410 MCLBYTES - MSIZE - CL_METADATA_SIZE, 411 MJUM9BYTES - CL_METADATA_SIZE, 412 MJUM16BYTES - CL_METADATA_SIZE, 413 }; 414 415 KASSERT(sc->flags & MASTER_PF, 416 ("%s: trying to change chip settings when not master.", __func__)); 417 418 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 419 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 420 V_EGRSTATUSPAGESIZE(spg_len == 128); 421 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 422 423 setup_pad_and_pack_boundaries(sc); 424 425 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 426 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 427 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 428 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 429 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 430 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 431 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 432 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 433 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 434 435 KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, 436 ("%s: hw buffer size table too big", __func__)); 437 for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { 438 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 439 sge_flbuf_sizes[i]); 440 } 441 442 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 443 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 444 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 445 446 KASSERT(intr_timer[0] <= timer_max, 447 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 448 timer_max)); 449 for (i = 1; i < nitems(intr_timer); i++) { 450 KASSERT(intr_timer[i] >= intr_timer[i - 1], 451 ("%s: timers not listed in increasing order (%d)", 452 __func__, i)); 453 454 while (intr_timer[i] > timer_max) { 455 if (i == nitems(intr_timer) - 1) { 456 intr_timer[i] = timer_max; 457 break; 458 } 459 intr_timer[i] += intr_timer[i - 1]; 460 intr_timer[i] /= 2; 461 } 462 } 463 464 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 465 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 466 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 467 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 468 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 469 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 470 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 471 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 472 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 473 474 if (cong_drop == 0) { 475 m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 476 F_TUNNELCNGDROP3; 477 t4_set_reg_field(sc, A_TP_PARA_REG3, m, 0); 478 } 479 480 /* 4K, 16K, 64K, 256K DDP "page sizes" */ 481 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 482 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 483 484 m = v = F_TDDPTAGTCB; 485 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 486 487 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 488 F_RESETDDPOFFSET; 489 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 490 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 491} 492 493/* 494 * SGE wants the buffer to be at least 64B and then a multiple of 16. If 495 * padding is is use the buffer's start and end need to be aligned to the pad 496 * boundary as well. We'll just make sure that the size is a multiple of the 497 * boundary here, it is up to the buffer allocation code to make sure the start 498 * of the buffer is aligned as well. 499 */ 500static inline int 501hwsz_ok(struct adapter *sc, int hwsz) 502{ 503 int mask = fl_pad ? sc->sge.pad_boundary - 1 : 16 - 1; 504 505 return (hwsz >= 64 && (hwsz & mask) == 0); 506} 507 508/* 509 * XXX: driver really should be able to deal with unexpected settings. 510 */ 511int 512t4_read_chip_settings(struct adapter *sc) 513{ 514 struct sge *s = &sc->sge; 515 int i, j, n, rc = 0; 516 uint32_t m, v, r; 517 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 518 static int sw_buf_sizes[] = { /* Sorted by size */ 519 MCLBYTES, 520#if MJUMPAGESIZE != MCLBYTES 521 MJUMPAGESIZE, 522#endif 523 MJUM9BYTES, 524 MJUM16BYTES 525 }; 526 struct sw_zone_info *swz, *safe_swz; 527 struct hw_buf_info *hwb; 528 529 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 530 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 531 V_EGRSTATUSPAGESIZE(spg_len == 128); 532 r = t4_read_reg(sc, A_SGE_CONTROL); 533 if ((r & m) != v) { 534 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 535 rc = EINVAL; 536 } 537 s->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5); 538 539 if (is_t4(sc)) 540 s->pack_boundary = s->pad_boundary; 541 else { 542 r = t4_read_reg(sc, A_SGE_CONTROL2); 543 if (G_INGPACKBOUNDARY(r) == 0) 544 s->pack_boundary = 16; 545 else 546 s->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 547 } 548 549 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 550 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 551 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 552 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 553 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 554 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 555 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 556 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 557 r = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE); 558 if (r != v) { 559 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 560 rc = EINVAL; 561 } 562 563 /* Filter out unusable hw buffer sizes entirely (mark with -2). */ 564 hwb = &s->hw_buf_info[0]; 565 for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { 566 r = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 567 hwb->size = r; 568 hwb->zidx = hwsz_ok(sc, r) ? -1 : -2; 569 hwb->next = -1; 570 } 571 572 /* 573 * Create a sorted list in decreasing order of hw buffer sizes (and so 574 * increasing order of spare area) for each software zone. 575 * 576 * If padding is enabled then the start and end of the buffer must align 577 * to the pad boundary; if packing is enabled then they must align with 578 * the pack boundary as well. Allocations from the cluster zones are 579 * aligned to min(size, 4K), so the buffer starts at that alignment and 580 * ends at hwb->size alignment. If mbuf inlining is allowed the 581 * starting alignment will be reduced to MSIZE and the driver will 582 * exercise appropriate caution when deciding on the best buffer layout 583 * to use. 584 */ 585 n = 0; /* no usable buffer size to begin with */ 586 swz = &s->sw_zone_info[0]; 587 safe_swz = NULL; 588 for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { 589 int8_t head = -1, tail = -1; 590 591 swz->size = sw_buf_sizes[i]; 592 swz->zone = m_getzone(swz->size); 593 swz->type = m_gettype(swz->size); 594 595 if (swz->size < PAGE_SIZE) { 596 MPASS(powerof2(swz->size)); 597 if (fl_pad && (swz->size % sc->sge.pad_boundary != 0)) 598 continue; 599 } 600 601 if (swz->size == safest_rx_cluster) 602 safe_swz = swz; 603 604 hwb = &s->hw_buf_info[0]; 605 for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { 606 if (hwb->zidx != -1 || hwb->size > swz->size) 607 continue; 608#ifdef INVARIANTS 609 if (fl_pad) 610 MPASS(hwb->size % sc->sge.pad_boundary == 0); 611#endif 612 hwb->zidx = i; 613 if (head == -1) 614 head = tail = j; 615 else if (hwb->size < s->hw_buf_info[tail].size) { 616 s->hw_buf_info[tail].next = j; 617 tail = j; 618 } else { 619 int8_t *cur; 620 struct hw_buf_info *t; 621 622 for (cur = &head; *cur != -1; cur = &t->next) { 623 t = &s->hw_buf_info[*cur]; 624 if (hwb->size == t->size) { 625 hwb->zidx = -2; 626 break; 627 } 628 if (hwb->size > t->size) { 629 hwb->next = *cur; 630 *cur = j; 631 break; 632 } 633 } 634 } 635 } 636 swz->head_hwidx = head; 637 swz->tail_hwidx = tail; 638 639 if (tail != -1) { 640 n++; 641 if (swz->size - s->hw_buf_info[tail].size >= 642 CL_METADATA_SIZE) 643 sc->flags |= BUF_PACKING_OK; 644 } 645 } 646 if (n == 0) { 647 device_printf(sc->dev, "no usable SGE FL buffer size.\n"); 648 rc = EINVAL; 649 } 650 651 s->safe_hwidx1 = -1; 652 s->safe_hwidx2 = -1; 653 if (safe_swz != NULL) { 654 s->safe_hwidx1 = safe_swz->head_hwidx; 655 for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { 656 int spare; 657 658 hwb = &s->hw_buf_info[i]; 659#ifdef INVARIANTS 660 if (fl_pad) 661 MPASS(hwb->size % sc->sge.pad_boundary == 0); 662#endif 663 spare = safe_swz->size - hwb->size; 664 if (spare >= CL_METADATA_SIZE) { 665 s->safe_hwidx2 = i; 666 break; 667 } 668 } 669 } 670 671 r = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD); 672 s->counter_val[0] = G_THRESHOLD_0(r); 673 s->counter_val[1] = G_THRESHOLD_1(r); 674 s->counter_val[2] = G_THRESHOLD_2(r); 675 s->counter_val[3] = G_THRESHOLD_3(r); 676 677 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1); 678 s->timer_val[0] = G_TIMERVALUE0(r) / core_ticks_per_usec(sc); 679 s->timer_val[1] = G_TIMERVALUE1(r) / core_ticks_per_usec(sc); 680 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3); 681 s->timer_val[2] = G_TIMERVALUE2(r) / core_ticks_per_usec(sc); 682 s->timer_val[3] = G_TIMERVALUE3(r) / core_ticks_per_usec(sc); 683 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5); 684 s->timer_val[4] = G_TIMERVALUE4(r) / core_ticks_per_usec(sc); 685 s->timer_val[5] = G_TIMERVALUE5(r) / core_ticks_per_usec(sc); 686 687 if (cong_drop == 0) { 688 m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 689 F_TUNNELCNGDROP3; 690 r = t4_read_reg(sc, A_TP_PARA_REG3); 691 if (r & m) { 692 device_printf(sc->dev, 693 "invalid TP_PARA_REG3(0x%x)\n", r); 694 rc = EINVAL; 695 } 696 } 697 698 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 699 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 700 if (r != v) { 701 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 702 rc = EINVAL; 703 } 704 705 m = v = F_TDDPTAGTCB; 706 r = t4_read_reg(sc, A_ULP_RX_CTL); 707 if ((r & m) != v) { 708 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 709 rc = EINVAL; 710 } 711 712 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 713 F_RESETDDPOFFSET; 714 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 715 r = t4_read_reg(sc, A_TP_PARA_REG5); 716 if ((r & m) != v) { 717 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 718 rc = EINVAL; 719 } 720 721 r = t4_read_reg(sc, A_SGE_CONM_CTRL); 722 s->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 723 if (is_t4(sc)) 724 s->fl_starve_threshold2 = s->fl_starve_threshold; 725 else 726 s->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 727 728 /* egress queues: log2 of # of doorbells per BAR2 page */ 729 r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 730 r >>= S_QUEUESPERPAGEPF0 + 731 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 732 s->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 733 734 /* ingress queues: log2 of # of doorbells per BAR2 page */ 735 r = t4_read_reg(sc, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 736 r >>= S_QUEUESPERPAGEPF0 + 737 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 738 s->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 739 740 t4_init_tp_params(sc); 741 742 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 743 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 744 745 return (rc); 746} 747 748int 749t4_create_dma_tag(struct adapter *sc) 750{ 751 int rc; 752 753 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 754 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 755 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 756 NULL, &sc->dmat); 757 if (rc != 0) { 758 device_printf(sc->dev, 759 "failed to create main DMA tag: %d\n", rc); 760 } 761 762 return (rc); 763} 764 765void 766t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 767 struct sysctl_oid_list *children) 768{ 769 770 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 771 CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", 772 "freelist buffer sizes"); 773 774 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 775 NULL, fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 776 777 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 778 NULL, sc->sge.pad_boundary, "payload pad boundary (bytes)"); 779 780 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 781 NULL, spg_len, "status page size (bytes)"); 782 783 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 784 NULL, cong_drop, "congestion drop setting"); 785 786 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 787 NULL, sc->sge.pack_boundary, "payload pack boundary (bytes)"); 788} 789 790int 791t4_destroy_dma_tag(struct adapter *sc) 792{ 793 if (sc->dmat) 794 bus_dma_tag_destroy(sc->dmat); 795 796 return (0); 797} 798 799/* 800 * Allocate and initialize the firmware event queue and the management queue. 801 * 802 * Returns errno on failure. Resources allocated up to that point may still be 803 * allocated. Caller is responsible for cleanup in case this function fails. 804 */ 805int 806t4_setup_adapter_queues(struct adapter *sc) 807{ 808 int rc; 809 810 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 811 812 sysctl_ctx_init(&sc->ctx); 813 sc->flags |= ADAP_SYSCTL_CTX; 814 815 /* 816 * Firmware event queue 817 */ 818 rc = alloc_fwq(sc); 819 if (rc != 0) 820 return (rc); 821 822 /* 823 * Management queue. This is just a control queue that uses the fwq as 824 * its associated iq. 825 */ 826 rc = alloc_mgmtq(sc); 827 828 return (rc); 829} 830 831/* 832 * Idempotent 833 */ 834int 835t4_teardown_adapter_queues(struct adapter *sc) 836{ 837 838 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 839 840 /* Do this before freeing the queue */ 841 if (sc->flags & ADAP_SYSCTL_CTX) { 842 sysctl_ctx_free(&sc->ctx); 843 sc->flags &= ~ADAP_SYSCTL_CTX; 844 } 845 846 free_mgmtq(sc); 847 free_fwq(sc); 848 849 return (0); 850} 851 852static inline int 853port_intr_count(struct port_info *pi) 854{ 855 int rc = 0; 856 857 if (pi->flags & INTR_RXQ) 858 rc += pi->nrxq; 859#ifdef TCP_OFFLOAD 860 if (pi->flags & INTR_OFLD_RXQ) 861 rc += pi->nofldrxq; 862#endif 863#ifdef DEV_NETMAP 864 if (pi->flags & INTR_NM_RXQ) 865 rc += pi->nnmrxq; 866#endif 867 return (rc); 868} 869 870static inline int 871first_vector(struct port_info *pi) 872{ 873 struct adapter *sc = pi->adapter; 874 int rc = T4_EXTRA_INTR, i; 875 876 if (sc->intr_count == 1) 877 return (0); 878 879 for_each_port(sc, i) { 880 if (i == pi->port_id) 881 break; 882 883 rc += port_intr_count(sc->port[i]); 884 } 885 886 return (rc); 887} 888 889/* 890 * Given an arbitrary "index," come up with an iq that can be used by other 891 * queues (of this port) for interrupt forwarding, SGE egress updates, etc. 892 * The iq returned is guaranteed to be something that takes direct interrupts. 893 */ 894static struct sge_iq * 895port_intr_iq(struct port_info *pi, int idx) 896{ 897 struct adapter *sc = pi->adapter; 898 struct sge *s = &sc->sge; 899 struct sge_iq *iq = NULL; 900 int nintr, i; 901 902 if (sc->intr_count == 1) 903 return (&sc->sge.fwq); 904 905 nintr = port_intr_count(pi); 906 KASSERT(nintr != 0, 907 ("%s: pi %p has no exclusive interrupts, total interrupts = %d", 908 __func__, pi, sc->intr_count)); 909#ifdef DEV_NETMAP 910 /* Exclude netmap queues as they can't take anyone else's interrupts */ 911 if (pi->flags & INTR_NM_RXQ) 912 nintr -= pi->nnmrxq; 913 KASSERT(nintr > 0, 914 ("%s: pi %p has nintr %d after netmap adjustment of %d", __func__, 915 pi, nintr, pi->nnmrxq)); 916#endif 917 i = idx % nintr; 918 919 if (pi->flags & INTR_RXQ) { 920 if (i < pi->nrxq) { 921 iq = &s->rxq[pi->first_rxq + i].iq; 922 goto done; 923 } 924 i -= pi->nrxq; 925 } 926#ifdef TCP_OFFLOAD 927 if (pi->flags & INTR_OFLD_RXQ) { 928 if (i < pi->nofldrxq) { 929 iq = &s->ofld_rxq[pi->first_ofld_rxq + i].iq; 930 goto done; 931 } 932 i -= pi->nofldrxq; 933 } 934#endif 935 panic("%s: pi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__, 936 pi, pi->flags & INTR_ALL, idx, nintr); 937done: 938 MPASS(iq != NULL); 939 KASSERT(iq->flags & IQ_INTR, 940 ("%s: iq %p (port %p, intr_flags 0x%lx, idx %d)", __func__, iq, pi, 941 pi->flags & INTR_ALL, idx)); 942 return (iq); 943} 944 945/* Maximum payload that can be delivered with a single iq descriptor */ 946static inline int 947mtu_to_max_payload(struct adapter *sc, int mtu, const int toe) 948{ 949 int payload; 950 951#ifdef TCP_OFFLOAD 952 if (toe) { 953 payload = sc->tt.rx_coalesce ? 954 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)) : mtu; 955 } else { 956#endif 957 /* large enough even when hw VLAN extraction is disabled */ 958 payload = fl_pktshift + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 959 mtu; 960#ifdef TCP_OFFLOAD 961 } 962#endif 963 964 return (payload); 965} 966 967int 968t4_setup_port_queues(struct port_info *pi) 969{ 970 int rc = 0, i, j, intr_idx, iqid; 971 struct sge_rxq *rxq; 972 struct sge_txq *txq; 973 struct sge_wrq *ctrlq; 974#ifdef TCP_OFFLOAD 975 struct sge_ofld_rxq *ofld_rxq; 976 struct sge_wrq *ofld_txq; 977#endif 978#ifdef DEV_NETMAP 979 struct sge_nm_rxq *nm_rxq; 980 struct sge_nm_txq *nm_txq; 981#endif 982 char name[16]; 983 struct adapter *sc = pi->adapter; 984 struct ifnet *ifp = pi->ifp; 985 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 986 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 987 int maxp, mtu = ifp->if_mtu; 988 989 /* Interrupt vector to start from (when using multiple vectors) */ 990 intr_idx = first_vector(pi); 991 992 /* 993 * First pass over all NIC and TOE rx queues: 994 * a) initialize iq and fl 995 * b) allocate queue iff it will take direct interrupts. 996 */ 997 maxp = mtu_to_max_payload(sc, mtu, 0); 998 if (pi->flags & INTR_RXQ) { 999 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", 1000 CTLFLAG_RD, NULL, "rx queues"); 1001 } 1002 for_each_rxq(pi, i, rxq) { 1003 1004 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq); 1005 1006 snprintf(name, sizeof(name), "%s rxq%d-fl", 1007 device_get_nameunit(pi->dev), i); 1008 init_fl(sc, &rxq->fl, pi->qsize_rxq / 8, maxp, name); 1009 1010 if (pi->flags & INTR_RXQ) { 1011 rxq->iq.flags |= IQ_INTR; 1012 rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1013 if (rc != 0) 1014 goto done; 1015 intr_idx++; 1016 } 1017 } 1018#ifdef TCP_OFFLOAD 1019 maxp = mtu_to_max_payload(sc, mtu, 1); 1020 if (is_offload(sc) && pi->flags & INTR_OFLD_RXQ) { 1021 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 1022 CTLFLAG_RD, NULL, 1023 "rx queues for offloaded TCP connections"); 1024 } 1025 for_each_ofld_rxq(pi, i, ofld_rxq) { 1026 1027 init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 1028 pi->qsize_rxq); 1029 1030 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1031 device_get_nameunit(pi->dev), i); 1032 init_fl(sc, &ofld_rxq->fl, pi->qsize_rxq / 8, maxp, name); 1033 1034 if (pi->flags & INTR_OFLD_RXQ) { 1035 ofld_rxq->iq.flags |= IQ_INTR; 1036 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid); 1037 if (rc != 0) 1038 goto done; 1039 intr_idx++; 1040 } 1041 } 1042#endif 1043#ifdef DEV_NETMAP 1044 /* 1045 * We don't have buffers to back the netmap rx queues right now so we 1046 * create the queues in a way that doesn't set off any congestion signal 1047 * in the chip. 1048 */ 1049 if (pi->flags & INTR_NM_RXQ) { 1050 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_rxq", 1051 CTLFLAG_RD, NULL, "rx queues for netmap"); 1052 for_each_nm_rxq(pi, i, nm_rxq) { 1053 rc = alloc_nm_rxq(pi, nm_rxq, intr_idx, i, oid); 1054 if (rc != 0) 1055 goto done; 1056 intr_idx++; 1057 } 1058 } 1059#endif 1060 1061 /* 1062 * Second pass over all NIC and TOE rx queues. The queues forwarding 1063 * their interrupts are allocated now. 1064 */ 1065 j = 0; 1066 if (!(pi->flags & INTR_RXQ)) { 1067 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", 1068 CTLFLAG_RD, NULL, "rx queues"); 1069 for_each_rxq(pi, i, rxq) { 1070 MPASS(!(rxq->iq.flags & IQ_INTR)); 1071 1072 intr_idx = port_intr_iq(pi, j)->abs_id; 1073 1074 rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1075 if (rc != 0) 1076 goto done; 1077 j++; 1078 } 1079 } 1080#ifdef TCP_OFFLOAD 1081 if (is_offload(sc) && !(pi->flags & INTR_OFLD_RXQ)) { 1082 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 1083 CTLFLAG_RD, NULL, 1084 "rx queues for offloaded TCP connections"); 1085 for_each_ofld_rxq(pi, i, ofld_rxq) { 1086 MPASS(!(ofld_rxq->iq.flags & IQ_INTR)); 1087 1088 intr_idx = port_intr_iq(pi, j)->abs_id; 1089 1090 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid); 1091 if (rc != 0) 1092 goto done; 1093 j++; 1094 } 1095 } 1096#endif 1097#ifdef DEV_NETMAP 1098 if (!(pi->flags & INTR_NM_RXQ)) 1099 CXGBE_UNIMPLEMENTED(__func__); 1100#endif 1101 1102 /* 1103 * Now the tx queues. Only one pass needed. 1104 */ 1105 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, 1106 NULL, "tx queues"); 1107 j = 0; 1108 for_each_txq(pi, i, txq) { 1109 iqid = port_intr_iq(pi, j)->cntxt_id; 1110 snprintf(name, sizeof(name), "%s txq%d", 1111 device_get_nameunit(pi->dev), i); 1112 init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid, 1113 name); 1114 1115 rc = alloc_txq(pi, txq, i, oid); 1116 if (rc != 0) 1117 goto done; 1118 j++; 1119 } 1120#ifdef TCP_OFFLOAD 1121 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq", 1122 CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections"); 1123 for_each_ofld_txq(pi, i, ofld_txq) { 1124 struct sysctl_oid *oid2; 1125 1126 iqid = port_intr_iq(pi, j)->cntxt_id; 1127 snprintf(name, sizeof(name), "%s ofld_txq%d", 1128 device_get_nameunit(pi->dev), i); 1129 init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan, 1130 iqid, name); 1131 1132 snprintf(name, sizeof(name), "%d", i); 1133 oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1134 name, CTLFLAG_RD, NULL, "offload tx queue"); 1135 1136 rc = alloc_wrq(sc, pi, ofld_txq, oid2); 1137 if (rc != 0) 1138 goto done; 1139 j++; 1140 } 1141#endif 1142#ifdef DEV_NETMAP 1143 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_txq", 1144 CTLFLAG_RD, NULL, "tx queues for netmap use"); 1145 for_each_nm_txq(pi, i, nm_txq) { 1146 iqid = pi->first_nm_rxq + (j % pi->nnmrxq); 1147 rc = alloc_nm_txq(pi, nm_txq, iqid, i, oid); 1148 if (rc != 0) 1149 goto done; 1150 j++; 1151 } 1152#endif 1153 1154 /* 1155 * Finally, the control queue. 1156 */ 1157 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD, 1158 NULL, "ctrl queue"); 1159 ctrlq = &sc->sge.ctrlq[pi->port_id]; 1160 iqid = port_intr_iq(pi, 0)->cntxt_id; 1161 snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev)); 1162 init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name); 1163 rc = alloc_wrq(sc, pi, ctrlq, oid); 1164 1165done: 1166 if (rc) 1167 t4_teardown_port_queues(pi); 1168 1169 return (rc); 1170} 1171 1172/* 1173 * Idempotent 1174 */ 1175int 1176t4_teardown_port_queues(struct port_info *pi) 1177{ 1178 int i; 1179 struct adapter *sc = pi->adapter; 1180 struct sge_rxq *rxq; 1181 struct sge_txq *txq; 1182#ifdef TCP_OFFLOAD 1183 struct sge_ofld_rxq *ofld_rxq; 1184 struct sge_wrq *ofld_txq; 1185#endif 1186#ifdef DEV_NETMAP 1187 struct sge_nm_rxq *nm_rxq; 1188 struct sge_nm_txq *nm_txq; 1189#endif 1190 1191 /* Do this before freeing the queues */ 1192 if (pi->flags & PORT_SYSCTL_CTX) { 1193 sysctl_ctx_free(&pi->ctx); 1194 pi->flags &= ~PORT_SYSCTL_CTX; 1195 } 1196 1197 /* 1198 * Take down all the tx queues first, as they reference the rx queues 1199 * (for egress updates, etc.). 1200 */ 1201 1202 free_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 1203 1204 for_each_txq(pi, i, txq) { 1205 free_txq(pi, txq); 1206 } 1207#ifdef TCP_OFFLOAD 1208 for_each_ofld_txq(pi, i, ofld_txq) { 1209 free_wrq(sc, ofld_txq); 1210 } 1211#endif 1212#ifdef DEV_NETMAP 1213 for_each_nm_txq(pi, i, nm_txq) 1214 free_nm_txq(pi, nm_txq); 1215#endif 1216 1217 /* 1218 * Then take down the rx queues that forward their interrupts, as they 1219 * reference other rx queues. 1220 */ 1221 1222 for_each_rxq(pi, i, rxq) { 1223 if ((rxq->iq.flags & IQ_INTR) == 0) 1224 free_rxq(pi, rxq); 1225 } 1226#ifdef TCP_OFFLOAD 1227 for_each_ofld_rxq(pi, i, ofld_rxq) { 1228 if ((ofld_rxq->iq.flags & IQ_INTR) == 0) 1229 free_ofld_rxq(pi, ofld_rxq); 1230 } 1231#endif 1232#ifdef DEV_NETMAP 1233 for_each_nm_rxq(pi, i, nm_rxq) 1234 free_nm_rxq(pi, nm_rxq); 1235#endif 1236 1237 /* 1238 * Then take down the rx queues that take direct interrupts. 1239 */ 1240 1241 for_each_rxq(pi, i, rxq) { 1242 if (rxq->iq.flags & IQ_INTR) 1243 free_rxq(pi, rxq); 1244 } 1245#ifdef TCP_OFFLOAD 1246 for_each_ofld_rxq(pi, i, ofld_rxq) { 1247 if (ofld_rxq->iq.flags & IQ_INTR) 1248 free_ofld_rxq(pi, ofld_rxq); 1249 } 1250#endif 1251 1252 return (0); 1253} 1254 1255/* 1256 * Deals with errors and the firmware event queue. All data rx queues forward 1257 * their interrupt to the firmware event queue. 1258 */ 1259void 1260t4_intr_all(void *arg) 1261{ 1262 struct adapter *sc = arg; 1263 struct sge_iq *fwq = &sc->sge.fwq; 1264 1265 t4_intr_err(arg); 1266 if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) { 1267 service_iq(fwq, 0); 1268 atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE); 1269 } 1270} 1271 1272/* Deals with error interrupts */ 1273void 1274t4_intr_err(void *arg) 1275{ 1276 struct adapter *sc = arg; 1277 1278 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1279 t4_slow_intr_handler(sc); 1280} 1281 1282void 1283t4_intr_evt(void *arg) 1284{ 1285 struct sge_iq *iq = arg; 1286 1287 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1288 service_iq(iq, 0); 1289 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1290 } 1291} 1292 1293void 1294t4_intr(void *arg) 1295{ 1296 struct sge_iq *iq = arg; 1297 1298 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1299 service_iq(iq, 0); 1300 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1301 } 1302} 1303 1304/* 1305 * Deals with anything and everything on the given ingress queue. 1306 */ 1307static int 1308service_iq(struct sge_iq *iq, int budget) 1309{ 1310 struct sge_iq *q; 1311 struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ 1312 struct sge_fl *fl; /* Use iff IQ_HAS_FL */ 1313 struct adapter *sc = iq->adapter; 1314 struct iq_desc *d = &iq->desc[iq->cidx]; 1315 int ndescs = 0, limit; 1316 int rsp_type, refill; 1317 uint32_t lq; 1318 uint16_t fl_hw_cidx; 1319 struct mbuf *m0; 1320 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1321#if defined(INET) || defined(INET6) 1322 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1323#endif 1324 1325 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1326 1327 limit = budget ? budget : iq->qsize / 16; 1328 1329 if (iq->flags & IQ_HAS_FL) { 1330 fl = &rxq->fl; 1331 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1332 } else { 1333 fl = NULL; 1334 fl_hw_cidx = 0; /* to silence gcc warning */ 1335 } 1336 1337 /* 1338 * We always come back and check the descriptor ring for new indirect 1339 * interrupts and other responses after running a single handler. 1340 */ 1341 for (;;) { 1342 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1343 1344 rmb(); 1345 1346 refill = 0; 1347 m0 = NULL; 1348 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1349 lq = be32toh(d->rsp.pldbuflen_qid); 1350 1351 switch (rsp_type) { 1352 case X_RSPD_TYPE_FLBUF: 1353 1354 KASSERT(iq->flags & IQ_HAS_FL, 1355 ("%s: data for an iq (%p) with no freelist", 1356 __func__, iq)); 1357 1358 m0 = get_fl_payload(sc, fl, lq); 1359 if (__predict_false(m0 == NULL)) 1360 goto process_iql; 1361 refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; 1362#ifdef T4_PKT_TIMESTAMP 1363 /* 1364 * 60 bit timestamp for the payload is 1365 * *(uint64_t *)m0->m_pktdat. Note that it is 1366 * in the leading free-space in the mbuf. The 1367 * kernel can clobber it during a pullup, 1368 * m_copymdata, etc. You need to make sure that 1369 * the mbuf reaches you unmolested if you care 1370 * about the timestamp. 1371 */ 1372 *(uint64_t *)m0->m_pktdat = 1373 be64toh(ctrl->u.last_flit) & 1374 0xfffffffffffffff; 1375#endif 1376 1377 /* fall through */ 1378 1379 case X_RSPD_TYPE_CPL: 1380 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1381 ("%s: bad opcode %02x.", __func__, 1382 d->rss.opcode)); 1383 sc->cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1384 break; 1385 1386 case X_RSPD_TYPE_INTR: 1387 1388 /* 1389 * Interrupts should be forwarded only to queues 1390 * that are not forwarding their interrupts. 1391 * This means service_iq can recurse but only 1 1392 * level deep. 1393 */ 1394 KASSERT(budget == 0, 1395 ("%s: budget %u, rsp_type %u", __func__, 1396 budget, rsp_type)); 1397 1398 /* 1399 * There are 1K interrupt-capable queues (qids 0 1400 * through 1023). A response type indicating a 1401 * forwarded interrupt with a qid >= 1K is an 1402 * iWARP async notification. 1403 */ 1404 if (lq >= 1024) { 1405 sc->an_handler(iq, &d->rsp); 1406 break; 1407 } 1408 1409 q = sc->sge.iqmap[lq - sc->sge.iq_start]; 1410 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1411 IQS_BUSY)) { 1412 if (service_iq(q, q->qsize / 16) == 0) { 1413 atomic_cmpset_int(&q->state, 1414 IQS_BUSY, IQS_IDLE); 1415 } else { 1416 STAILQ_INSERT_TAIL(&iql, q, 1417 link); 1418 } 1419 } 1420 break; 1421 1422 default: 1423 KASSERT(0, 1424 ("%s: illegal response type %d on iq %p", 1425 __func__, rsp_type, iq)); 1426 log(LOG_ERR, 1427 "%s: illegal response type %d on iq %p", 1428 device_get_nameunit(sc->dev), rsp_type, iq); 1429 break; 1430 } 1431 1432 d++; 1433 if (__predict_false(++iq->cidx == iq->sidx)) { 1434 iq->cidx = 0; 1435 iq->gen ^= F_RSPD_GEN; 1436 d = &iq->desc[0]; 1437 } 1438 if (__predict_false(++ndescs == limit)) { 1439 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 1440 V_CIDXINC(ndescs) | 1441 V_INGRESSQID(iq->cntxt_id) | 1442 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1443 ndescs = 0; 1444 1445#if defined(INET) || defined(INET6) 1446 if (iq->flags & IQ_LRO_ENABLED && 1447 sc->lro_timeout != 0) { 1448 tcp_lro_flush_inactive(&rxq->lro, 1449 &lro_timeout); 1450 } 1451#endif 1452 1453 if (budget) { 1454 if (iq->flags & IQ_HAS_FL) { 1455 FL_LOCK(fl); 1456 refill_fl(sc, fl, 32); 1457 FL_UNLOCK(fl); 1458 } 1459 return (EINPROGRESS); 1460 } 1461 } 1462 if (refill) { 1463 FL_LOCK(fl); 1464 refill_fl(sc, fl, 32); 1465 FL_UNLOCK(fl); 1466 fl_hw_cidx = fl->hw_cidx; 1467 } 1468 } 1469 1470process_iql: 1471 if (STAILQ_EMPTY(&iql)) 1472 break; 1473 1474 /* 1475 * Process the head only, and send it to the back of the list if 1476 * it's still not done. 1477 */ 1478 q = STAILQ_FIRST(&iql); 1479 STAILQ_REMOVE_HEAD(&iql, link); 1480 if (service_iq(q, q->qsize / 8) == 0) 1481 atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1482 else 1483 STAILQ_INSERT_TAIL(&iql, q, link); 1484 } 1485 1486#if defined(INET) || defined(INET6) 1487 if (iq->flags & IQ_LRO_ENABLED) { 1488 struct lro_ctrl *lro = &rxq->lro; 1489 struct lro_entry *l; 1490 1491 while (!SLIST_EMPTY(&lro->lro_active)) { 1492 l = SLIST_FIRST(&lro->lro_active); 1493 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1494 tcp_lro_flush(lro, l); 1495 } 1496 } 1497#endif 1498 1499 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 1500 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1501 1502 if (iq->flags & IQ_HAS_FL) { 1503 int starved; 1504 1505 FL_LOCK(fl); 1506 starved = refill_fl(sc, fl, 64); 1507 FL_UNLOCK(fl); 1508 if (__predict_false(starved != 0)) 1509 add_fl_to_sfl(sc, fl); 1510 } 1511 1512 return (0); 1513} 1514 1515static inline int 1516cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) 1517{ 1518 int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; 1519 1520 if (rc) 1521 MPASS(cll->region3 >= CL_METADATA_SIZE); 1522 1523 return (rc); 1524} 1525 1526static inline struct cluster_metadata * 1527cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, 1528 caddr_t cl) 1529{ 1530 1531 if (cl_has_metadata(fl, cll)) { 1532 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1533 1534 return ((struct cluster_metadata *)(cl + swz->size) - 1); 1535 } 1536 return (NULL); 1537} 1538 1539static int 1540rxb_free(struct mbuf *m, void *arg1, void *arg2) 1541{ 1542 uma_zone_t zone = arg1; 1543 caddr_t cl = arg2; 1544 1545 uma_zfree(zone, cl); 1546 counter_u64_add(extfree_rels, 1); 1547 1548 return (EXT_FREE_OK); 1549} 1550 1551/* 1552 * The mbuf returned by this function could be allocated from zone_mbuf or 1553 * constructed in spare room in the cluster. 1554 * 1555 * The mbuf carries the payload in one of these ways 1556 * a) frame inside the mbuf (mbuf from zone_mbuf) 1557 * b) m_cljset (for clusters without metadata) zone_mbuf 1558 * c) m_extaddref (cluster with metadata) inline mbuf 1559 * d) m_extaddref (cluster with metadata) zone_mbuf 1560 */ 1561static struct mbuf * 1562get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1563 int remaining) 1564{ 1565 struct mbuf *m; 1566 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1567 struct cluster_layout *cll = &sd->cll; 1568 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1569 struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; 1570 struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); 1571 int len, blen; 1572 caddr_t payload; 1573 1574 blen = hwb->size - fl->rx_offset; /* max possible in this buf */ 1575 len = min(remaining, blen); 1576 payload = sd->cl + cll->region1 + fl->rx_offset; 1577 if (fl->flags & FL_BUF_PACKING) { 1578 const u_int l = fr_offset + len; 1579 const u_int pad = roundup2(l, fl->buf_boundary) - l; 1580 1581 if (fl->rx_offset + len + pad < hwb->size) 1582 blen = len + pad; 1583 MPASS(fl->rx_offset + blen <= hwb->size); 1584 } else { 1585 MPASS(fl->rx_offset == 0); /* not packing */ 1586 } 1587 1588 1589 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1590 1591 /* 1592 * Copy payload into a freshly allocated mbuf. 1593 */ 1594 1595 m = fr_offset == 0 ? 1596 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1597 if (m == NULL) 1598 return (NULL); 1599 fl->mbuf_allocated++; 1600#ifdef T4_PKT_TIMESTAMP 1601 /* Leave room for a timestamp */ 1602 m->m_data += 8; 1603#endif 1604 /* copy data to mbuf */ 1605 bcopy(payload, mtod(m, caddr_t), len); 1606 1607 } else if (sd->nmbuf * MSIZE < cll->region1) { 1608 1609 /* 1610 * There's spare room in the cluster for an mbuf. Create one 1611 * and associate it with the payload that's in the cluster. 1612 */ 1613 1614 MPASS(clm != NULL); 1615 m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); 1616 /* No bzero required */ 1617 if (m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 1618 fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) 1619 return (NULL); 1620 fl->mbuf_inlined++; 1621 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, 1622 swz->zone, sd->cl); 1623 if (sd->nmbuf++ == 0) 1624 counter_u64_add(extfree_refs, 1); 1625 1626 } else { 1627 1628 /* 1629 * Grab an mbuf from zone_mbuf and associate it with the 1630 * payload in the cluster. 1631 */ 1632 1633 m = fr_offset == 0 ? 1634 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1635 if (m == NULL) 1636 return (NULL); 1637 fl->mbuf_allocated++; 1638 if (clm != NULL) { 1639 m_extaddref(m, payload, blen, &clm->refcount, 1640 rxb_free, swz->zone, sd->cl); 1641 if (sd->nmbuf++ == 0) 1642 counter_u64_add(extfree_refs, 1); 1643 } else { 1644 m_cljset(m, sd->cl, swz->type); 1645 sd->cl = NULL; /* consumed, not a recycle candidate */ 1646 } 1647 } 1648 if (fr_offset == 0) 1649 m->m_pkthdr.len = remaining; 1650 m->m_len = len; 1651 1652 if (fl->flags & FL_BUF_PACKING) { 1653 fl->rx_offset += blen; 1654 MPASS(fl->rx_offset <= hwb->size); 1655 if (fl->rx_offset < hwb->size) 1656 return (m); /* without advancing the cidx */ 1657 } 1658 1659 if (__predict_false(++fl->cidx % 8 == 0)) { 1660 uint16_t cidx = fl->cidx / 8; 1661 1662 if (__predict_false(cidx == fl->sidx)) 1663 fl->cidx = cidx = 0; 1664 fl->hw_cidx = cidx; 1665 } 1666 fl->rx_offset = 0; 1667 1668 return (m); 1669} 1670 1671static struct mbuf * 1672get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf) 1673{ 1674 struct mbuf *m0, *m, **pnext; 1675 u_int remaining; 1676 const u_int total = G_RSPD_LEN(len_newbuf); 1677 1678 if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1679 M_ASSERTPKTHDR(fl->m0); 1680 MPASS(fl->m0->m_pkthdr.len == total); 1681 MPASS(fl->remaining < total); 1682 1683 m0 = fl->m0; 1684 pnext = fl->pnext; 1685 remaining = fl->remaining; 1686 fl->flags &= ~FL_BUF_RESUME; 1687 goto get_segment; 1688 } 1689 1690 if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { 1691 fl->rx_offset = 0; 1692 if (__predict_false(++fl->cidx % 8 == 0)) { 1693 uint16_t cidx = fl->cidx / 8; 1694 1695 if (__predict_false(cidx == fl->sidx)) 1696 fl->cidx = cidx = 0; 1697 fl->hw_cidx = cidx; 1698 } 1699 } 1700 1701 /* 1702 * Payload starts at rx_offset in the current hw buffer. Its length is 1703 * 'len' and it may span multiple hw buffers. 1704 */ 1705 1706 m0 = get_scatter_segment(sc, fl, 0, total); 1707 if (m0 == NULL) 1708 return (NULL); 1709 remaining = total - m0->m_len; 1710 pnext = &m0->m_next; 1711 while (remaining > 0) { 1712get_segment: 1713 MPASS(fl->rx_offset == 0); 1714 m = get_scatter_segment(sc, fl, total - remaining, remaining); 1715 if (__predict_false(m == NULL)) { 1716 fl->m0 = m0; 1717 fl->pnext = pnext; 1718 fl->remaining = remaining; 1719 fl->flags |= FL_BUF_RESUME; 1720 return (NULL); 1721 } 1722 *pnext = m; 1723 pnext = &m->m_next; 1724 remaining -= m->m_len; 1725 } 1726 *pnext = NULL; 1727 1728 return (m0); 1729} 1730 1731static int 1732t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 1733{ 1734 struct sge_rxq *rxq = iq_to_rxq(iq); 1735 struct ifnet *ifp = rxq->ifp; 1736 const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); 1737#if defined(INET) || defined(INET6) 1738 struct lro_ctrl *lro = &rxq->lro; 1739#endif 1740 1741 KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, 1742 rss->opcode)); 1743 1744 m0->m_pkthdr.len -= fl_pktshift; 1745 m0->m_len -= fl_pktshift; 1746 m0->m_data += fl_pktshift; 1747 1748 m0->m_pkthdr.rcvif = ifp; 1749 M_HASHTYPE_SET(m0, M_HASHTYPE_OPAQUE); 1750 m0->m_pkthdr.flowid = be32toh(rss->hash_val); 1751 1752 if (cpl->csum_calc && !cpl->err_vec) { 1753 if (ifp->if_capenable & IFCAP_RXCSUM && 1754 cpl->l2info & htobe32(F_RXF_IP)) { 1755 m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 1756 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1757 rxq->rxcsum++; 1758 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 1759 cpl->l2info & htobe32(F_RXF_IP6)) { 1760 m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 1761 CSUM_PSEUDO_HDR); 1762 rxq->rxcsum++; 1763 } 1764 1765 if (__predict_false(cpl->ip_frag)) 1766 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 1767 else 1768 m0->m_pkthdr.csum_data = 0xffff; 1769 } 1770 1771 if (cpl->vlan_ex) { 1772 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 1773 m0->m_flags |= M_VLANTAG; 1774 rxq->vlan_extraction++; 1775 } 1776 1777#if defined(INET) || defined(INET6) 1778 if (cpl->l2info & htobe32(F_RXF_LRO) && 1779 iq->flags & IQ_LRO_ENABLED && 1780 tcp_lro_rx(lro, m0, 0) == 0) { 1781 /* queued for LRO */ 1782 } else 1783#endif 1784 ifp->if_input(ifp, m0); 1785 1786 return (0); 1787} 1788 1789/* 1790 * Must drain the wrq or make sure that someone else will. 1791 */ 1792static void 1793wrq_tx_drain(void *arg, int n) 1794{ 1795 struct sge_wrq *wrq = arg; 1796 struct sge_eq *eq = &wrq->eq; 1797 1798 EQ_LOCK(eq); 1799 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 1800 drain_wrq_wr_list(wrq->adapter, wrq); 1801 EQ_UNLOCK(eq); 1802} 1803 1804static void 1805drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 1806{ 1807 struct sge_eq *eq = &wrq->eq; 1808 u_int available, dbdiff; /* # of hardware descriptors */ 1809 u_int n; 1810 struct wrqe *wr; 1811 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 1812 1813 EQ_LOCK_ASSERT_OWNED(eq); 1814 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 1815 wr = STAILQ_FIRST(&wrq->wr_list); 1816 MPASS(wr != NULL); /* Must be called with something useful to do */ 1817 dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx); 1818 1819 do { 1820 eq->cidx = read_hw_cidx(eq); 1821 if (eq->pidx == eq->cidx) 1822 available = eq->sidx - 1; 1823 else 1824 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 1825 1826 MPASS(wr->wrq == wrq); 1827 n = howmany(wr->wr_len, EQ_ESIZE); 1828 if (available < n) 1829 return; 1830 1831 dst = (void *)&eq->desc[eq->pidx]; 1832 if (__predict_true(eq->sidx - eq->pidx > n)) { 1833 /* Won't wrap, won't end exactly at the status page. */ 1834 bcopy(&wr->wr[0], dst, wr->wr_len); 1835 eq->pidx += n; 1836 } else { 1837 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 1838 1839 bcopy(&wr->wr[0], dst, first_portion); 1840 if (wr->wr_len > first_portion) { 1841 bcopy(&wr->wr[first_portion], &eq->desc[0], 1842 wr->wr_len - first_portion); 1843 } 1844 eq->pidx = n - (eq->sidx - eq->pidx); 1845 } 1846 1847 if (available < eq->sidx / 4 && 1848 atomic_cmpset_int(&eq->equiq, 0, 1)) { 1849 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 1850 F_FW_WR_EQUEQ); 1851 eq->equeqidx = eq->pidx; 1852 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 1853 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1854 eq->equeqidx = eq->pidx; 1855 } 1856 1857 dbdiff += n; 1858 if (dbdiff >= 16) { 1859 ring_eq_db(sc, eq, dbdiff); 1860 dbdiff = 0; 1861 } 1862 1863 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1864 free_wrqe(wr); 1865 MPASS(wrq->nwr_pending > 0); 1866 wrq->nwr_pending--; 1867 MPASS(wrq->ndesc_needed >= n); 1868 wrq->ndesc_needed -= n; 1869 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 1870 1871 if (dbdiff) 1872 ring_eq_db(sc, eq, dbdiff); 1873} 1874 1875/* 1876 * Doesn't fail. Holds on to work requests it can't send right away. 1877 */ 1878void 1879t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 1880{ 1881#ifdef INVARIANTS 1882 struct sge_eq *eq = &wrq->eq; 1883#endif 1884 1885 EQ_LOCK_ASSERT_OWNED(eq); 1886 MPASS(wr != NULL); 1887 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 1888 MPASS((wr->wr_len & 0x7) == 0); 1889 1890 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 1891 wrq->nwr_pending++; 1892 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 1893 1894 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 1895 return; /* commit_wrq_wr will drain wr_list as well. */ 1896 1897 drain_wrq_wr_list(sc, wrq); 1898 1899 /* Doorbell must have caught up to the pidx. */ 1900 MPASS(eq->pidx == eq->dbidx); 1901} 1902 1903void 1904t4_update_fl_bufsize(struct ifnet *ifp) 1905{ 1906 struct port_info *pi = ifp->if_softc; 1907 struct adapter *sc = pi->adapter; 1908 struct sge_rxq *rxq; 1909#ifdef TCP_OFFLOAD 1910 struct sge_ofld_rxq *ofld_rxq; 1911#endif 1912 struct sge_fl *fl; 1913 int i, maxp, mtu = ifp->if_mtu; 1914 1915 maxp = mtu_to_max_payload(sc, mtu, 0); 1916 for_each_rxq(pi, i, rxq) { 1917 fl = &rxq->fl; 1918 1919 FL_LOCK(fl); 1920 find_best_refill_source(sc, fl, maxp); 1921 FL_UNLOCK(fl); 1922 } 1923#ifdef TCP_OFFLOAD 1924 maxp = mtu_to_max_payload(sc, mtu, 1); 1925 for_each_ofld_rxq(pi, i, ofld_rxq) { 1926 fl = &ofld_rxq->fl; 1927 1928 FL_LOCK(fl); 1929 find_best_refill_source(sc, fl, maxp); 1930 FL_UNLOCK(fl); 1931 } 1932#endif 1933} 1934 1935static inline int 1936mbuf_nsegs(struct mbuf *m) 1937{ 1938 1939 M_ASSERTPKTHDR(m); 1940 KASSERT(m->m_pkthdr.l5hlen > 0, 1941 ("%s: mbuf %p missing information on # of segments.", __func__, m)); 1942 1943 return (m->m_pkthdr.l5hlen); 1944} 1945 1946static inline void 1947set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 1948{ 1949 1950 M_ASSERTPKTHDR(m); 1951 m->m_pkthdr.l5hlen = nsegs; 1952} 1953 1954static inline int 1955mbuf_len16(struct mbuf *m) 1956{ 1957 int n; 1958 1959 M_ASSERTPKTHDR(m); 1960 n = m->m_pkthdr.PH_loc.eigth[0]; 1961 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 1962 1963 return (n); 1964} 1965 1966static inline void 1967set_mbuf_len16(struct mbuf *m, uint8_t len16) 1968{ 1969 1970 M_ASSERTPKTHDR(m); 1971 m->m_pkthdr.PH_loc.eigth[0] = len16; 1972} 1973 1974static inline int 1975needs_tso(struct mbuf *m) 1976{ 1977 1978 M_ASSERTPKTHDR(m); 1979 1980 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1981 KASSERT(m->m_pkthdr.tso_segsz > 0, 1982 ("%s: TSO requested in mbuf %p but MSS not provided", 1983 __func__, m)); 1984 return (1); 1985 } 1986 1987 return (0); 1988} 1989 1990static inline int 1991needs_l3_csum(struct mbuf *m) 1992{ 1993 1994 M_ASSERTPKTHDR(m); 1995 1996 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) 1997 return (1); 1998 return (0); 1999} 2000 2001static inline int 2002needs_l4_csum(struct mbuf *m) 2003{ 2004 2005 M_ASSERTPKTHDR(m); 2006 2007 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 2008 CSUM_TCP_IPV6 | CSUM_TSO)) 2009 return (1); 2010 return (0); 2011} 2012 2013static inline int 2014needs_vlan_insertion(struct mbuf *m) 2015{ 2016 2017 M_ASSERTPKTHDR(m); 2018 2019 if (m->m_flags & M_VLANTAG) { 2020 KASSERT(m->m_pkthdr.ether_vtag != 0, 2021 ("%s: HWVLAN requested in mbuf %p but tag not provided", 2022 __func__, m)); 2023 return (1); 2024 } 2025 return (0); 2026} 2027 2028static void * 2029m_advance(struct mbuf **pm, int *poffset, int len) 2030{ 2031 struct mbuf *m = *pm; 2032 int offset = *poffset; 2033 uintptr_t p = 0; 2034 2035 MPASS(len > 0); 2036 2037 while (len) { 2038 if (offset + len < m->m_len) { 2039 offset += len; 2040 p = mtod(m, uintptr_t) + offset; 2041 break; 2042 } 2043 len -= m->m_len - offset; 2044 m = m->m_next; 2045 offset = 0; 2046 MPASS(m != NULL); 2047 } 2048 *poffset = offset; 2049 *pm = m; 2050 return ((void *)p); 2051} 2052 2053static inline int 2054same_paddr(char *a, char *b) 2055{ 2056 2057 if (a == b) 2058 return (1); 2059 else if (a != NULL && b != NULL) { 2060 vm_offset_t x = (vm_offset_t)a; 2061 vm_offset_t y = (vm_offset_t)b; 2062 2063 if ((x & PAGE_MASK) == (y & PAGE_MASK) && 2064 pmap_kextract(x) == pmap_kextract(y)) 2065 return (1); 2066 } 2067 2068 return (0); 2069} 2070 2071/* 2072 * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2073 * must have at least one mbuf that's not empty. 2074 */ 2075static inline int 2076count_mbuf_nsegs(struct mbuf *m) 2077{ 2078 char *prev_end, *start; 2079 int len, nsegs; 2080 2081 MPASS(m != NULL); 2082 2083 nsegs = 0; 2084 prev_end = NULL; 2085 for (; m; m = m->m_next) { 2086 2087 len = m->m_len; 2088 if (__predict_false(len == 0)) 2089 continue; 2090 start = mtod(m, char *); 2091 2092 nsegs += sglist_count(start, len); 2093 if (same_paddr(prev_end, start)) 2094 nsegs--; 2095 prev_end = start + len; 2096 } 2097 2098 MPASS(nsegs > 0); 2099 return (nsegs); 2100} 2101 2102/* 2103 * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 2104 * a) caller can assume it's been freed if this function returns with an error. 2105 * b) it may get defragged up if the gather list is too long for the hardware. 2106 */ 2107int 2108parse_pkt(struct mbuf **mp) 2109{ 2110 struct mbuf *m0 = *mp, *m; 2111 int rc, nsegs, defragged = 0, offset; 2112 struct ether_header *eh; 2113 void *l3hdr; 2114#if defined(INET) || defined(INET6) 2115 struct tcphdr *tcp; 2116#endif 2117 uint16_t eh_type; 2118 2119 M_ASSERTPKTHDR(m0); 2120 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 2121 rc = EINVAL; 2122fail: 2123 m_freem(m0); 2124 *mp = NULL; 2125 return (rc); 2126 } 2127restart: 2128 /* 2129 * First count the number of gather list segments in the payload. 2130 * Defrag the mbuf if nsegs exceeds the hardware limit. 2131 */ 2132 M_ASSERTPKTHDR(m0); 2133 MPASS(m0->m_pkthdr.len > 0); 2134 nsegs = count_mbuf_nsegs(m0); 2135 if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { 2136 if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { 2137 rc = EFBIG; 2138 goto fail; 2139 } 2140 *mp = m0 = m; /* update caller's copy after defrag */ 2141 goto restart; 2142 } 2143 2144 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) { 2145 m0 = m_pullup(m0, m0->m_pkthdr.len); 2146 if (m0 == NULL) { 2147 /* Should have left well enough alone. */ 2148 rc = EFBIG; 2149 goto fail; 2150 } 2151 *mp = m0; /* update caller's copy after pullup */ 2152 goto restart; 2153 } 2154 set_mbuf_nsegs(m0, nsegs); 2155 set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); 2156 2157 if (!needs_tso(m0)) 2158 return (0); 2159 2160 m = m0; 2161 eh = mtod(m, struct ether_header *); 2162 eh_type = ntohs(eh->ether_type); 2163 if (eh_type == ETHERTYPE_VLAN) { 2164 struct ether_vlan_header *evh = (void *)eh; 2165 2166 eh_type = ntohs(evh->evl_proto); 2167 m0->m_pkthdr.l2hlen = sizeof(*evh); 2168 } else 2169 m0->m_pkthdr.l2hlen = sizeof(*eh); 2170 2171 offset = 0; 2172 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 2173 2174 switch (eh_type) { 2175#ifdef INET6 2176 case ETHERTYPE_IPV6: 2177 { 2178 struct ip6_hdr *ip6 = l3hdr; 2179 2180 MPASS(ip6->ip6_nxt == IPPROTO_TCP); 2181 2182 m0->m_pkthdr.l3hlen = sizeof(*ip6); 2183 break; 2184 } 2185#endif 2186#ifdef INET 2187 case ETHERTYPE_IP: 2188 { 2189 struct ip *ip = l3hdr; 2190 2191 m0->m_pkthdr.l3hlen = ip->ip_hl * 4; 2192 break; 2193 } 2194#endif 2195 default: 2196 panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" 2197 " with the same INET/INET6 options as the kernel.", 2198 __func__, eh_type); 2199 } 2200 2201#if defined(INET) || defined(INET6) 2202 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 2203 m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2204#endif 2205 MPASS(m0 == *mp); 2206 return (0); 2207} 2208 2209void * 2210start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 2211{ 2212 struct sge_eq *eq = &wrq->eq; 2213 struct adapter *sc = wrq->adapter; 2214 int ndesc, available; 2215 struct wrqe *wr; 2216 void *w; 2217 2218 MPASS(len16 > 0); 2219 ndesc = howmany(len16, EQ_ESIZE / 16); 2220 MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 2221 2222 EQ_LOCK(eq); 2223 2224 if (!STAILQ_EMPTY(&wrq->wr_list)) 2225 drain_wrq_wr_list(sc, wrq); 2226 2227 if (!STAILQ_EMPTY(&wrq->wr_list)) { 2228slowpath: 2229 EQ_UNLOCK(eq); 2230 wr = alloc_wrqe(len16 * 16, wrq); 2231 if (__predict_false(wr == NULL)) 2232 return (NULL); 2233 cookie->pidx = -1; 2234 cookie->ndesc = ndesc; 2235 return (&wr->wr); 2236 } 2237 2238 eq->cidx = read_hw_cidx(eq); 2239 if (eq->pidx == eq->cidx) 2240 available = eq->sidx - 1; 2241 else 2242 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2243 if (available < ndesc) 2244 goto slowpath; 2245 2246 cookie->pidx = eq->pidx; 2247 cookie->ndesc = ndesc; 2248 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 2249 2250 w = &eq->desc[eq->pidx]; 2251 IDXINCR(eq->pidx, ndesc, eq->sidx); 2252 if (__predict_false(eq->pidx < ndesc - 1)) { 2253 w = &wrq->ss[0]; 2254 wrq->ss_pidx = cookie->pidx; 2255 wrq->ss_len = len16 * 16; 2256 } 2257 2258 EQ_UNLOCK(eq); 2259 2260 return (w); 2261} 2262 2263void 2264commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 2265{ 2266 struct sge_eq *eq = &wrq->eq; 2267 struct adapter *sc = wrq->adapter; 2268 int ndesc, pidx; 2269 struct wrq_cookie *prev, *next; 2270 2271 if (cookie->pidx == -1) { 2272 struct wrqe *wr = __containerof(w, struct wrqe, wr); 2273 2274 t4_wrq_tx(sc, wr); 2275 return; 2276 } 2277 2278 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 2279 pidx = cookie->pidx; 2280 MPASS(pidx >= 0 && pidx < eq->sidx); 2281 if (__predict_false(w == &wrq->ss[0])) { 2282 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 2283 2284 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 2285 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 2286 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 2287 wrq->tx_wrs_ss++; 2288 } else 2289 wrq->tx_wrs_direct++; 2290 2291 EQ_LOCK(eq); 2292 prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 2293 next = TAILQ_NEXT(cookie, link); 2294 if (prev == NULL) { 2295 MPASS(pidx == eq->dbidx); 2296 if (next == NULL || ndesc >= 16) 2297 ring_eq_db(wrq->adapter, eq, ndesc); 2298 else { 2299 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 2300 next->pidx = pidx; 2301 next->ndesc += ndesc; 2302 } 2303 } else { 2304 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 2305 prev->ndesc += ndesc; 2306 } 2307 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 2308 2309 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2310 drain_wrq_wr_list(sc, wrq); 2311 2312#ifdef INVARIANTS 2313 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 2314 /* Doorbell must have caught up to the pidx. */ 2315 MPASS(wrq->eq.pidx == wrq->eq.dbidx); 2316 } 2317#endif 2318 EQ_UNLOCK(eq); 2319} 2320 2321static u_int 2322can_resume_eth_tx(struct mp_ring *r) 2323{ 2324 struct sge_eq *eq = r->cookie; 2325 2326 return (total_available_tx_desc(eq) > eq->sidx / 8); 2327} 2328 2329static inline int 2330cannot_use_txpkts(struct mbuf *m) 2331{ 2332 /* maybe put a GL limit too, to avoid silliness? */ 2333 2334 return (needs_tso(m)); 2335} 2336 2337/* 2338 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 2339 * be consumed. Return the actual number consumed. 0 indicates a stall. 2340 */ 2341static u_int 2342eth_tx(struct mp_ring *r, u_int cidx, u_int pidx) 2343{ 2344 struct sge_txq *txq = r->cookie; 2345 struct sge_eq *eq = &txq->eq; 2346 struct ifnet *ifp = txq->ifp; 2347 struct port_info *pi = (void *)ifp->if_softc; 2348 struct adapter *sc = pi->adapter; 2349 u_int total, remaining; /* # of packets */ 2350 u_int available, dbdiff; /* # of hardware descriptors */ 2351 u_int n, next_cidx; 2352 struct mbuf *m0, *tail; 2353 struct txpkts txp; 2354 struct fw_eth_tx_pkts_wr *wr; /* any fw WR struct will do */ 2355 2356 remaining = IDXDIFF(pidx, cidx, r->size); 2357 MPASS(remaining > 0); /* Must not be called without work to do. */ 2358 total = 0; 2359 2360 TXQ_LOCK(txq); 2361 if (__predict_false((eq->flags & EQ_ENABLED) == 0)) { 2362 while (cidx != pidx) { 2363 m0 = r->items[cidx]; 2364 m_freem(m0); 2365 if (++cidx == r->size) 2366 cidx = 0; 2367 } 2368 reclaim_tx_descs(txq, 2048); 2369 total = remaining; 2370 goto done; 2371 } 2372 2373 /* How many hardware descriptors do we have readily available. */ 2374 if (eq->pidx == eq->cidx) 2375 available = eq->sidx - 1; 2376 else 2377 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2378 dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx); 2379 2380 while (remaining > 0) { 2381 2382 m0 = r->items[cidx]; 2383 M_ASSERTPKTHDR(m0); 2384 MPASS(m0->m_nextpkt == NULL); 2385 2386 if (available < SGE_MAX_WR_NDESC) { 2387 available += reclaim_tx_descs(txq, 64); 2388 if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16)) 2389 break; /* out of descriptors */ 2390 } 2391 2392 next_cidx = cidx + 1; 2393 if (__predict_false(next_cidx == r->size)) 2394 next_cidx = 0; 2395 2396 wr = (void *)&eq->desc[eq->pidx]; 2397 if (remaining > 1 && 2398 try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) { 2399 2400 /* pkts at cidx, next_cidx should both be in txp. */ 2401 MPASS(txp.npkt == 2); 2402 tail = r->items[next_cidx]; 2403 MPASS(tail->m_nextpkt == NULL); 2404 ETHER_BPF_MTAP(ifp, m0); 2405 ETHER_BPF_MTAP(ifp, tail); 2406 m0->m_nextpkt = tail; 2407 2408 if (__predict_false(++next_cidx == r->size)) 2409 next_cidx = 0; 2410 2411 while (next_cidx != pidx) { 2412 if (add_to_txpkts(r->items[next_cidx], &txp, 2413 available) != 0) 2414 break; 2415 tail->m_nextpkt = r->items[next_cidx]; 2416 tail = tail->m_nextpkt; 2417 ETHER_BPF_MTAP(ifp, tail); 2418 if (__predict_false(++next_cidx == r->size)) 2419 next_cidx = 0; 2420 } 2421 2422 n = write_txpkts_wr(txq, wr, m0, &txp, available); 2423 total += txp.npkt; 2424 remaining -= txp.npkt; 2425 } else { 2426 total++; 2427 remaining--; 2428 n = write_txpkt_wr(txq, (void *)wr, m0, available); 2429 ETHER_BPF_MTAP(ifp, m0); 2430 } 2431 MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC); 2432 2433 available -= n; 2434 dbdiff += n; 2435 IDXINCR(eq->pidx, n, eq->sidx); 2436 2437 if (total_available_tx_desc(eq) < eq->sidx / 4 && 2438 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2439 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2440 F_FW_WR_EQUEQ); 2441 eq->equeqidx = eq->pidx; 2442 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 2443 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 2444 eq->equeqidx = eq->pidx; 2445 } 2446 2447 if (dbdiff >= 16 && remaining >= 4) { 2448 ring_eq_db(sc, eq, dbdiff); 2449 available += reclaim_tx_descs(txq, 4 * dbdiff); 2450 dbdiff = 0; 2451 } 2452 2453 cidx = next_cidx; 2454 } 2455 if (dbdiff != 0) { 2456 ring_eq_db(sc, eq, dbdiff); 2457 reclaim_tx_descs(txq, 32); 2458 } 2459done: 2460 TXQ_UNLOCK(txq); 2461 2462 return (total); 2463} 2464 2465static inline void 2466init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 2467 int qsize) 2468{ 2469 2470 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 2471 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 2472 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 2473 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 2474 2475 iq->flags = 0; 2476 iq->adapter = sc; 2477 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 2478 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 2479 if (pktc_idx >= 0) { 2480 iq->intr_params |= F_QINTR_CNT_EN; 2481 iq->intr_pktc_idx = pktc_idx; 2482 } 2483 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 2484 iq->sidx = iq->qsize - spg_len / IQ_ESIZE; 2485} 2486 2487static inline void 2488init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 2489{ 2490 2491 fl->qsize = qsize; 2492 fl->sidx = qsize - spg_len / EQ_ESIZE; 2493 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 2494 if (sc->flags & BUF_PACKING_OK && 2495 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 2496 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 2497 fl->flags |= FL_BUF_PACKING; 2498 find_best_refill_source(sc, fl, maxp); 2499 find_safe_refill_source(sc, fl); 2500} 2501 2502static inline void 2503init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan, 2504 uint16_t iqid, char *name) 2505{ 2506 KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan)); 2507 KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 2508 2509 eq->flags = eqtype & EQ_TYPEMASK; 2510 eq->tx_chan = tx_chan; 2511 eq->iqid = iqid; 2512 eq->sidx = qsize - spg_len / EQ_ESIZE; 2513 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 2514} 2515 2516static int 2517alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 2518 bus_dmamap_t *map, bus_addr_t *pa, void **va) 2519{ 2520 int rc; 2521 2522 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 2523 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 2524 if (rc != 0) { 2525 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 2526 goto done; 2527 } 2528 2529 rc = bus_dmamem_alloc(*tag, va, 2530 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 2531 if (rc != 0) { 2532 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 2533 goto done; 2534 } 2535 2536 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 2537 if (rc != 0) { 2538 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 2539 goto done; 2540 } 2541done: 2542 if (rc) 2543 free_ring(sc, *tag, *map, *pa, *va); 2544 2545 return (rc); 2546} 2547 2548static int 2549free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 2550 bus_addr_t pa, void *va) 2551{ 2552 if (pa) 2553 bus_dmamap_unload(tag, map); 2554 if (va) 2555 bus_dmamem_free(tag, va, map); 2556 if (tag) 2557 bus_dma_tag_destroy(tag); 2558 2559 return (0); 2560} 2561 2562/* 2563 * Allocates the ring for an ingress queue and an optional freelist. If the 2564 * freelist is specified it will be allocated and then associated with the 2565 * ingress queue. 2566 * 2567 * Returns errno on failure. Resources allocated up to that point may still be 2568 * allocated. Caller is responsible for cleanup in case this function fails. 2569 * 2570 * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then 2571 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 2572 * the abs_id of the ingress queue to which its interrupts should be forwarded. 2573 */ 2574static int 2575alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 2576 int intr_idx, int cong) 2577{ 2578 int rc, i, cntxt_id; 2579 size_t len; 2580 struct fw_iq_cmd c; 2581 struct adapter *sc = iq->adapter; 2582 __be32 v = 0; 2583 2584 len = iq->qsize * IQ_ESIZE; 2585 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 2586 (void **)&iq->desc); 2587 if (rc != 0) 2588 return (rc); 2589 2590 bzero(&c, sizeof(c)); 2591 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 2592 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 2593 V_FW_IQ_CMD_VFN(0)); 2594 2595 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 2596 FW_LEN16(c)); 2597 2598 /* Special handling for firmware event queue */ 2599 if (iq == &sc->sge.fwq) 2600 v |= F_FW_IQ_CMD_IQASYNCH; 2601 2602 if (iq->flags & IQ_INTR) { 2603 KASSERT(intr_idx < sc->intr_count, 2604 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 2605 } else 2606 v |= F_FW_IQ_CMD_IQANDST; 2607 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 2608 2609 c.type_to_iqandstindex = htobe32(v | 2610 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2611 V_FW_IQ_CMD_VIID(pi->viid) | 2612 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 2613 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 2614 F_FW_IQ_CMD_IQGTSMODE | 2615 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 2616 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 2617 c.iqsize = htobe16(iq->qsize); 2618 c.iqaddr = htobe64(iq->ba); 2619 if (cong >= 0) 2620 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 2621 2622 if (fl) { 2623 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 2624 2625 len = fl->qsize * EQ_ESIZE; 2626 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 2627 &fl->ba, (void **)&fl->desc); 2628 if (rc) 2629 return (rc); 2630 2631 /* Allocate space for one software descriptor per buffer. */ 2632 rc = alloc_fl_sdesc(fl); 2633 if (rc != 0) { 2634 device_printf(sc->dev, 2635 "failed to setup fl software descriptors: %d\n", 2636 rc); 2637 return (rc); 2638 } 2639 2640 if (fl->flags & FL_BUF_PACKING) { 2641 fl->lowat = roundup2(sc->sge.fl_starve_threshold2, 8); 2642 fl->buf_boundary = sc->sge.pack_boundary; 2643 } else { 2644 fl->lowat = roundup2(sc->sge.fl_starve_threshold, 8); 2645 fl->buf_boundary = 16; 2646 } 2647 if (fl_pad && fl->buf_boundary < sc->sge.pad_boundary) 2648 fl->buf_boundary = sc->sge.pad_boundary; 2649 2650 c.iqns_to_fl0congen |= 2651 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 2652 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 2653 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 2654 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 2655 0)); 2656 if (cong >= 0) { 2657 c.iqns_to_fl0congen |= 2658 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 2659 F_FW_IQ_CMD_FL0CONGCIF | 2660 F_FW_IQ_CMD_FL0CONGEN); 2661 } 2662 c.fl0dcaen_to_fl0cidxfthresh = 2663 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | 2664 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 2665 c.fl0size = htobe16(fl->qsize); 2666 c.fl0addr = htobe64(fl->ba); 2667 } 2668 2669 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2670 if (rc != 0) { 2671 device_printf(sc->dev, 2672 "failed to create ingress queue: %d\n", rc); 2673 return (rc); 2674 } 2675 2676 iq->cidx = 0; 2677 iq->gen = F_RSPD_GEN; 2678 iq->intr_next = iq->intr_params; 2679 iq->cntxt_id = be16toh(c.iqid); 2680 iq->abs_id = be16toh(c.physiqid); 2681 iq->flags |= IQ_ALLOCATED; 2682 2683 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 2684 if (cntxt_id >= sc->sge.niq) { 2685 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 2686 cntxt_id, sc->sge.niq - 1); 2687 } 2688 sc->sge.iqmap[cntxt_id] = iq; 2689 2690 if (fl) { 2691 u_int qid; 2692 2693 iq->flags |= IQ_HAS_FL; 2694 fl->cntxt_id = be16toh(c.fl0id); 2695 fl->pidx = fl->cidx = 0; 2696 2697 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 2698 if (cntxt_id >= sc->sge.neq) { 2699 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 2700 __func__, cntxt_id, sc->sge.neq - 1); 2701 } 2702 sc->sge.eqmap[cntxt_id] = (void *)fl; 2703 2704 qid = fl->cntxt_id; 2705 if (isset(&sc->doorbells, DOORBELL_UDB)) { 2706 uint32_t s_qpp = sc->sge.eq_s_qpp; 2707 uint32_t mask = (1 << s_qpp) - 1; 2708 volatile uint8_t *udb; 2709 2710 udb = sc->udbs_base + UDBS_DB_OFFSET; 2711 udb += (qid >> s_qpp) << PAGE_SHIFT; 2712 qid &= mask; 2713 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 2714 udb += qid << UDBS_SEG_SHIFT; 2715 qid = 0; 2716 } 2717 fl->udb = (volatile void *)udb; 2718 } 2719 fl->dbval = F_DBPRIO | V_QID(qid); 2720 if (is_t5(sc)) 2721 fl->dbval |= F_DBTYPE; 2722 2723 FL_LOCK(fl); 2724 /* Enough to make sure the SGE doesn't think it's starved */ 2725 refill_fl(sc, fl, fl->lowat); 2726 FL_UNLOCK(fl); 2727 } 2728 2729 if (is_t5(sc) && cong >= 0) { 2730 uint32_t param, val; 2731 2732 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 2733 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 2734 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 2735 if (cong == 0) 2736 val = 1 << 19; 2737 else { 2738 val = 2 << 19; 2739 for (i = 0; i < 4; i++) { 2740 if (cong & (1 << i)) 2741 val |= 1 << (i << 2); 2742 } 2743 } 2744 2745 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2746 if (rc != 0) { 2747 /* report error but carry on */ 2748 device_printf(sc->dev, 2749 "failed to set congestion manager context for " 2750 "ingress queue %d: %d\n", iq->cntxt_id, rc); 2751 } 2752 } 2753 2754 /* Enable IQ interrupts */ 2755 atomic_store_rel_int(&iq->state, IQS_IDLE); 2756 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 2757 V_INGRESSQID(iq->cntxt_id)); 2758 2759 return (0); 2760} 2761 2762static int 2763free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 2764{ 2765 int rc; 2766 struct adapter *sc = iq->adapter; 2767 device_t dev; 2768 2769 if (sc == NULL) 2770 return (0); /* nothing to do */ 2771 2772 dev = pi ? pi->dev : sc->dev; 2773 2774 if (iq->flags & IQ_ALLOCATED) { 2775 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 2776 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 2777 fl ? fl->cntxt_id : 0xffff, 0xffff); 2778 if (rc != 0) { 2779 device_printf(dev, 2780 "failed to free queue %p: %d\n", iq, rc); 2781 return (rc); 2782 } 2783 iq->flags &= ~IQ_ALLOCATED; 2784 } 2785 2786 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 2787 2788 bzero(iq, sizeof(*iq)); 2789 2790 if (fl) { 2791 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 2792 fl->desc); 2793 2794 if (fl->sdesc) 2795 free_fl_sdesc(sc, fl); 2796 2797 if (mtx_initialized(&fl->fl_lock)) 2798 mtx_destroy(&fl->fl_lock); 2799 2800 bzero(fl, sizeof(*fl)); 2801 } 2802 2803 return (0); 2804} 2805 2806static void 2807add_fl_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 2808 struct sge_fl *fl) 2809{ 2810 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2811 2812 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 2813 "freelist"); 2814 children = SYSCTL_CHILDREN(oid); 2815 2816 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2817 CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", 2818 "SGE context id of the freelist"); 2819 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 2820 fl_pad ? 1 : 0, "padding enabled"); 2821 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 2822 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 2823 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 2824 0, "consumer index"); 2825 if (fl->flags & FL_BUF_PACKING) { 2826 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 2827 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 2828 } 2829 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 2830 0, "producer index"); 2831 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", 2832 CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); 2833 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", 2834 CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); 2835 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 2836 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 2837 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 2838 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 2839 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 2840 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 2841} 2842 2843static int 2844alloc_fwq(struct adapter *sc) 2845{ 2846 int rc, intr_idx; 2847 struct sge_iq *fwq = &sc->sge.fwq; 2848 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2849 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2850 2851 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 2852 fwq->flags |= IQ_INTR; /* always */ 2853 intr_idx = sc->intr_count > 1 ? 1 : 0; 2854 rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1); 2855 if (rc != 0) { 2856 device_printf(sc->dev, 2857 "failed to create firmware event queue: %d\n", rc); 2858 return (rc); 2859 } 2860 2861 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, 2862 NULL, "firmware event queue"); 2863 children = SYSCTL_CHILDREN(oid); 2864 2865 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id", 2866 CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I", 2867 "absolute id of the queue"); 2868 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id", 2869 CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I", 2870 "SGE context id of the queue"); 2871 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx", 2872 CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I", 2873 "consumer index"); 2874 2875 return (0); 2876} 2877 2878static int 2879free_fwq(struct adapter *sc) 2880{ 2881 return free_iq_fl(NULL, &sc->sge.fwq, NULL); 2882} 2883 2884static int 2885alloc_mgmtq(struct adapter *sc) 2886{ 2887 int rc; 2888 struct sge_wrq *mgmtq = &sc->sge.mgmtq; 2889 char name[16]; 2890 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2891 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2892 2893 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD, 2894 NULL, "management queue"); 2895 2896 snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev)); 2897 init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan, 2898 sc->sge.fwq.cntxt_id, name); 2899 rc = alloc_wrq(sc, NULL, mgmtq, oid); 2900 if (rc != 0) { 2901 device_printf(sc->dev, 2902 "failed to create management queue: %d\n", rc); 2903 return (rc); 2904 } 2905 2906 return (0); 2907} 2908 2909static int 2910free_mgmtq(struct adapter *sc) 2911{ 2912 2913 return free_wrq(sc, &sc->sge.mgmtq); 2914} 2915 2916int 2917tnl_cong(struct port_info *pi) 2918{ 2919 2920 if (cong_drop == -1) 2921 return (-1); 2922 else if (cong_drop == 1) 2923 return (0); 2924 else 2925 return (pi->rx_chan_map); 2926} 2927 2928static int 2929alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx, 2930 struct sysctl_oid *oid) 2931{ 2932 int rc; 2933 struct sysctl_oid_list *children; 2934 char name[16]; 2935 2936 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(pi)); 2937 if (rc != 0) 2938 return (rc); 2939 2940 /* 2941 * The freelist is just barely above the starvation threshold right now, 2942 * fill it up a bit more. 2943 */ 2944 FL_LOCK(&rxq->fl); 2945 refill_fl(pi->adapter, &rxq->fl, 128); 2946 FL_UNLOCK(&rxq->fl); 2947 2948#if defined(INET) || defined(INET6) 2949 rc = tcp_lro_init(&rxq->lro); 2950 if (rc != 0) 2951 return (rc); 2952 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 2953 2954 if (pi->ifp->if_capenable & IFCAP_LRO) 2955 rxq->iq.flags |= IQ_LRO_ENABLED; 2956#endif 2957 rxq->ifp = pi->ifp; 2958 2959 children = SYSCTL_CHILDREN(oid); 2960 2961 snprintf(name, sizeof(name), "%d", idx); 2962 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2963 NULL, "rx queue"); 2964 children = SYSCTL_CHILDREN(oid); 2965 2966 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 2967 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I", 2968 "absolute id of the queue"); 2969 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 2970 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I", 2971 "SGE context id of the queue"); 2972 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2973 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I", 2974 "consumer index"); 2975#if defined(INET) || defined(INET6) 2976 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 2977 &rxq->lro.lro_queued, 0, NULL); 2978 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 2979 &rxq->lro.lro_flushed, 0, NULL); 2980#endif 2981 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 2982 &rxq->rxcsum, "# of times hardware assisted with checksum"); 2983 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 2984 CTLFLAG_RD, &rxq->vlan_extraction, 2985 "# of times hardware extracted 802.1Q tag"); 2986 2987 add_fl_sysctls(&pi->ctx, oid, &rxq->fl); 2988 2989 return (rc); 2990} 2991 2992static int 2993free_rxq(struct port_info *pi, struct sge_rxq *rxq) 2994{ 2995 int rc; 2996 2997#if defined(INET) || defined(INET6) 2998 if (rxq->lro.ifp) { 2999 tcp_lro_free(&rxq->lro); 3000 rxq->lro.ifp = NULL; 3001 } 3002#endif 3003 3004 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 3005 if (rc == 0) 3006 bzero(rxq, sizeof(*rxq)); 3007 3008 return (rc); 3009} 3010 3011#ifdef TCP_OFFLOAD 3012static int 3013alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq, 3014 int intr_idx, int idx, struct sysctl_oid *oid) 3015{ 3016 int rc; 3017 struct sysctl_oid_list *children; 3018 char name[16]; 3019 3020 rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 3021 pi->rx_chan_map); 3022 if (rc != 0) 3023 return (rc); 3024 3025 children = SYSCTL_CHILDREN(oid); 3026 3027 snprintf(name, sizeof(name), "%d", idx); 3028 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3029 NULL, "rx queue"); 3030 children = SYSCTL_CHILDREN(oid); 3031 3032 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 3033 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16, 3034 "I", "absolute id of the queue"); 3035 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 3036 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16, 3037 "I", "SGE context id of the queue"); 3038 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 3039 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I", 3040 "consumer index"); 3041 3042 add_fl_sysctls(&pi->ctx, oid, &ofld_rxq->fl); 3043 3044 return (rc); 3045} 3046 3047static int 3048free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq) 3049{ 3050 int rc; 3051 3052 rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl); 3053 if (rc == 0) 3054 bzero(ofld_rxq, sizeof(*ofld_rxq)); 3055 3056 return (rc); 3057} 3058#endif 3059 3060#ifdef DEV_NETMAP 3061static int 3062alloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx, 3063 int idx, struct sysctl_oid *oid) 3064{ 3065 int rc; 3066 struct sysctl_oid_list *children; 3067 struct sysctl_ctx_list *ctx; 3068 char name[16]; 3069 size_t len; 3070 struct adapter *sc = pi->adapter; 3071 struct netmap_adapter *na = NA(pi->nm_ifp); 3072 3073 MPASS(na != NULL); 3074 3075 len = pi->qsize_rxq * IQ_ESIZE; 3076 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 3077 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 3078 if (rc != 0) 3079 return (rc); 3080 3081 len = na->num_rx_desc * EQ_ESIZE + spg_len; 3082 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 3083 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 3084 if (rc != 0) 3085 return (rc); 3086 3087 nm_rxq->pi = pi; 3088 nm_rxq->nid = idx; 3089 nm_rxq->iq_cidx = 0; 3090 nm_rxq->iq_sidx = pi->qsize_rxq - spg_len / IQ_ESIZE; 3091 nm_rxq->iq_gen = F_RSPD_GEN; 3092 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 3093 nm_rxq->fl_sidx = na->num_rx_desc; 3094 nm_rxq->intr_idx = intr_idx; 3095 3096 ctx = &pi->ctx; 3097 children = SYSCTL_CHILDREN(oid); 3098 3099 snprintf(name, sizeof(name), "%d", idx); 3100 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, 3101 "rx queue"); 3102 children = SYSCTL_CHILDREN(oid); 3103 3104 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3105 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, 3106 "I", "absolute id of the queue"); 3107 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3108 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, 3109 "I", "SGE context id of the queue"); 3110 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3111 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", 3112 "consumer index"); 3113 3114 children = SYSCTL_CHILDREN(oid); 3115 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 3116 "freelist"); 3117 children = SYSCTL_CHILDREN(oid); 3118 3119 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3120 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, 3121 "I", "SGE context id of the freelist"); 3122 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 3123 &nm_rxq->fl_cidx, 0, "consumer index"); 3124 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 3125 &nm_rxq->fl_pidx, 0, "producer index"); 3126 3127 return (rc); 3128} 3129 3130 3131static int 3132free_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq) 3133{ 3134 struct adapter *sc = pi->adapter; 3135 3136 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 3137 nm_rxq->iq_desc); 3138 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 3139 nm_rxq->fl_desc); 3140 3141 return (0); 3142} 3143 3144static int 3145alloc_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 3146 struct sysctl_oid *oid) 3147{ 3148 int rc; 3149 size_t len; 3150 struct adapter *sc = pi->adapter; 3151 struct netmap_adapter *na = NA(pi->nm_ifp); 3152 char name[16]; 3153 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3154 3155 len = na->num_tx_desc * EQ_ESIZE + spg_len; 3156 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 3157 &nm_txq->ba, (void **)&nm_txq->desc); 3158 if (rc) 3159 return (rc); 3160 3161 nm_txq->pidx = nm_txq->cidx = 0; 3162 nm_txq->sidx = na->num_tx_desc; 3163 nm_txq->nid = idx; 3164 nm_txq->iqidx = iqidx; 3165 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3166 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf)); 3167 3168 snprintf(name, sizeof(name), "%d", idx); 3169 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3170 NULL, "netmap tx queue"); 3171 children = SYSCTL_CHILDREN(oid); 3172 3173 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3174 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 3175 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 3176 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", 3177 "consumer index"); 3178 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 3179 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", 3180 "producer index"); 3181 3182 return (rc); 3183} 3184 3185static int 3186free_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq) 3187{ 3188 struct adapter *sc = pi->adapter; 3189 3190 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 3191 nm_txq->desc); 3192 3193 return (0); 3194} 3195#endif 3196 3197static int 3198ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 3199{ 3200 int rc, cntxt_id; 3201 struct fw_eq_ctrl_cmd c; 3202 int qsize = eq->sidx + spg_len / EQ_ESIZE; 3203 3204 bzero(&c, sizeof(c)); 3205 3206 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 3207 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 3208 V_FW_EQ_CTRL_CMD_VFN(0)); 3209 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 3210 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 3211 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 3212 c.physeqid_pkd = htobe32(0); 3213 c.fetchszm_to_iqid = 3214 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3215 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 3216 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 3217 c.dcaen_to_eqsize = 3218 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3219 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3220 V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 3221 c.eqaddr = htobe64(eq->ba); 3222 3223 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3224 if (rc != 0) { 3225 device_printf(sc->dev, 3226 "failed to create control queue %d: %d\n", eq->tx_chan, rc); 3227 return (rc); 3228 } 3229 eq->flags |= EQ_ALLOCATED; 3230 3231 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 3232 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3233 if (cntxt_id >= sc->sge.neq) 3234 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3235 cntxt_id, sc->sge.neq - 1); 3236 sc->sge.eqmap[cntxt_id] = eq; 3237 3238 return (rc); 3239} 3240 3241static int 3242eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 3243{ 3244 int rc, cntxt_id; 3245 struct fw_eq_eth_cmd c; 3246 int qsize = eq->sidx + spg_len / EQ_ESIZE; 3247 3248 bzero(&c, sizeof(c)); 3249 3250 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 3251 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 3252 V_FW_EQ_ETH_CMD_VFN(0)); 3253 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 3254 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 3255 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 3256 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->viid)); 3257 c.fetchszm_to_iqid = 3258 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3259 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 3260 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 3261 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3262 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3263 V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 3264 c.eqaddr = htobe64(eq->ba); 3265 3266 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3267 if (rc != 0) { 3268 device_printf(pi->dev, 3269 "failed to create Ethernet egress queue: %d\n", rc); 3270 return (rc); 3271 } 3272 eq->flags |= EQ_ALLOCATED; 3273 3274 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 3275 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3276 if (cntxt_id >= sc->sge.neq) 3277 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3278 cntxt_id, sc->sge.neq - 1); 3279 sc->sge.eqmap[cntxt_id] = eq; 3280 3281 return (rc); 3282} 3283 3284#ifdef TCP_OFFLOAD 3285static int 3286ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 3287{ 3288 int rc, cntxt_id; 3289 struct fw_eq_ofld_cmd c; 3290 int qsize = eq->sidx + spg_len / EQ_ESIZE; 3291 3292 bzero(&c, sizeof(c)); 3293 3294 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 3295 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 3296 V_FW_EQ_OFLD_CMD_VFN(0)); 3297 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 3298 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 3299 c.fetchszm_to_iqid = 3300 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3301 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 3302 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 3303 c.dcaen_to_eqsize = 3304 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3305 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3306 V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 3307 c.eqaddr = htobe64(eq->ba); 3308 3309 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3310 if (rc != 0) { 3311 device_printf(pi->dev, 3312 "failed to create egress queue for TCP offload: %d\n", rc); 3313 return (rc); 3314 } 3315 eq->flags |= EQ_ALLOCATED; 3316 3317 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 3318 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3319 if (cntxt_id >= sc->sge.neq) 3320 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3321 cntxt_id, sc->sge.neq - 1); 3322 sc->sge.eqmap[cntxt_id] = eq; 3323 3324 return (rc); 3325} 3326#endif 3327 3328static int 3329alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 3330{ 3331 int rc, qsize; 3332 size_t len; 3333 3334 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 3335 3336 qsize = eq->sidx + spg_len / EQ_ESIZE; 3337 len = qsize * EQ_ESIZE; 3338 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 3339 &eq->ba, (void **)&eq->desc); 3340 if (rc) 3341 return (rc); 3342 3343 eq->pidx = eq->cidx = 0; 3344 eq->equeqidx = eq->dbidx = 0; 3345 eq->doorbells = sc->doorbells; 3346 3347 switch (eq->flags & EQ_TYPEMASK) { 3348 case EQ_CTRL: 3349 rc = ctrl_eq_alloc(sc, eq); 3350 break; 3351 3352 case EQ_ETH: 3353 rc = eth_eq_alloc(sc, pi, eq); 3354 break; 3355 3356#ifdef TCP_OFFLOAD 3357 case EQ_OFLD: 3358 rc = ofld_eq_alloc(sc, pi, eq); 3359 break; 3360#endif 3361 3362 default: 3363 panic("%s: invalid eq type %d.", __func__, 3364 eq->flags & EQ_TYPEMASK); 3365 } 3366 if (rc != 0) { 3367 device_printf(sc->dev, 3368 "failed to allocate egress queue(%d): %d\n", 3369 eq->flags & EQ_TYPEMASK, rc); 3370 } 3371 3372 if (isset(&eq->doorbells, DOORBELL_UDB) || 3373 isset(&eq->doorbells, DOORBELL_UDBWC) || 3374 isset(&eq->doorbells, DOORBELL_WCWR)) { 3375 uint32_t s_qpp = sc->sge.eq_s_qpp; 3376 uint32_t mask = (1 << s_qpp) - 1; 3377 volatile uint8_t *udb; 3378 3379 udb = sc->udbs_base + UDBS_DB_OFFSET; 3380 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 3381 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 3382 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 3383 clrbit(&eq->doorbells, DOORBELL_WCWR); 3384 else { 3385 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 3386 eq->udb_qid = 0; 3387 } 3388 eq->udb = (volatile void *)udb; 3389 } 3390 3391 return (rc); 3392} 3393 3394static int 3395free_eq(struct adapter *sc, struct sge_eq *eq) 3396{ 3397 int rc; 3398 3399 if (eq->flags & EQ_ALLOCATED) { 3400 switch (eq->flags & EQ_TYPEMASK) { 3401 case EQ_CTRL: 3402 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 3403 eq->cntxt_id); 3404 break; 3405 3406 case EQ_ETH: 3407 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 3408 eq->cntxt_id); 3409 break; 3410 3411#ifdef TCP_OFFLOAD 3412 case EQ_OFLD: 3413 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 3414 eq->cntxt_id); 3415 break; 3416#endif 3417 3418 default: 3419 panic("%s: invalid eq type %d.", __func__, 3420 eq->flags & EQ_TYPEMASK); 3421 } 3422 if (rc != 0) { 3423 device_printf(sc->dev, 3424 "failed to free egress queue (%d): %d\n", 3425 eq->flags & EQ_TYPEMASK, rc); 3426 return (rc); 3427 } 3428 eq->flags &= ~EQ_ALLOCATED; 3429 } 3430 3431 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 3432 3433 if (mtx_initialized(&eq->eq_lock)) 3434 mtx_destroy(&eq->eq_lock); 3435 3436 bzero(eq, sizeof(*eq)); 3437 return (0); 3438} 3439 3440static int 3441alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq, 3442 struct sysctl_oid *oid) 3443{ 3444 int rc; 3445 struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx; 3446 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3447 3448 rc = alloc_eq(sc, pi, &wrq->eq); 3449 if (rc) 3450 return (rc); 3451 3452 wrq->adapter = sc; 3453 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 3454 TAILQ_INIT(&wrq->incomplete_wrs); 3455 STAILQ_INIT(&wrq->wr_list); 3456 wrq->nwr_pending = 0; 3457 wrq->ndesc_needed = 0; 3458 3459 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3460 &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 3461 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3462 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", 3463 "consumer index"); 3464 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 3465 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", 3466 "producer index"); 3467 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 3468 &wrq->tx_wrs_direct, "# of work requests (direct)"); 3469 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 3470 &wrq->tx_wrs_copied, "# of work requests (copied)"); 3471 3472 return (rc); 3473} 3474 3475static int 3476free_wrq(struct adapter *sc, struct sge_wrq *wrq) 3477{ 3478 int rc; 3479 3480 rc = free_eq(sc, &wrq->eq); 3481 if (rc) 3482 return (rc); 3483 3484 bzero(wrq, sizeof(*wrq)); 3485 return (0); 3486} 3487 3488static int 3489alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx, 3490 struct sysctl_oid *oid) 3491{ 3492 int rc; 3493 struct adapter *sc = pi->adapter; 3494 struct sge_eq *eq = &txq->eq; 3495 char name[16]; 3496 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3497 3498 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, 3499 M_CXGBE, M_WAITOK); 3500 if (rc != 0) { 3501 device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); 3502 return (rc); 3503 } 3504 3505 rc = alloc_eq(sc, pi, eq); 3506 if (rc != 0) { 3507 mp_ring_free(txq->r); 3508 txq->r = NULL; 3509 return (rc); 3510 } 3511 3512 /* Can't fail after this point. */ 3513 3514 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 3515 txq->ifp = pi->ifp; 3516 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 3517 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3518 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf)); 3519 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 3520 M_ZERO | M_WAITOK); 3521 3522 snprintf(name, sizeof(name), "%d", idx); 3523 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3524 NULL, "tx queue"); 3525 children = SYSCTL_CHILDREN(oid); 3526 3527 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3528 &eq->cntxt_id, 0, "SGE context id of the queue"); 3529 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 3530 CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", 3531 "consumer index"); 3532 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 3533 CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", 3534 "producer index"); 3535 3536 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 3537 &txq->txcsum, "# of times hardware assisted with checksum"); 3538 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 3539 CTLFLAG_RD, &txq->vlan_insertion, 3540 "# of times hardware inserted 802.1Q tag"); 3541 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 3542 &txq->tso_wrs, "# of TSO work requests"); 3543 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 3544 &txq->imm_wrs, "# of work requests with immediate data"); 3545 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 3546 &txq->sgl_wrs, "# of work requests with direct SGL"); 3547 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 3548 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 3549 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts0_wrs", 3550 CTLFLAG_RD, &txq->txpkts0_wrs, 3551 "# of txpkts (type 0) work requests"); 3552 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts1_wrs", 3553 CTLFLAG_RD, &txq->txpkts1_wrs, 3554 "# of txpkts (type 1) work requests"); 3555 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts0_pkts", 3556 CTLFLAG_RD, &txq->txpkts0_pkts, 3557 "# of frames tx'd using type0 txpkts work requests"); 3558 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts1_pkts", 3559 CTLFLAG_RD, &txq->txpkts1_pkts, 3560 "# of frames tx'd using type1 txpkts work requests"); 3561 3562 SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_enqueues", 3563 CTLFLAG_RD, &txq->r->enqueues, 3564 "# of enqueues to the mp_ring for this queue"); 3565 SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_drops", 3566 CTLFLAG_RD, &txq->r->drops, 3567 "# of drops in the mp_ring for this queue"); 3568 SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_starts", 3569 CTLFLAG_RD, &txq->r->starts, 3570 "# of normal consumer starts in the mp_ring for this queue"); 3571 SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_stalls", 3572 CTLFLAG_RD, &txq->r->stalls, 3573 "# of consumer stalls in the mp_ring for this queue"); 3574 SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_restarts", 3575 CTLFLAG_RD, &txq->r->restarts, 3576 "# of consumer restarts in the mp_ring for this queue"); 3577 SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_abdications", 3578 CTLFLAG_RD, &txq->r->abdications, 3579 "# of consumer abdications in the mp_ring for this queue"); 3580 3581 return (0); 3582} 3583 3584static int 3585free_txq(struct port_info *pi, struct sge_txq *txq) 3586{ 3587 int rc; 3588 struct adapter *sc = pi->adapter; 3589 struct sge_eq *eq = &txq->eq; 3590 3591 rc = free_eq(sc, eq); 3592 if (rc) 3593 return (rc); 3594 3595 sglist_free(txq->gl); 3596 free(txq->sdesc, M_CXGBE); 3597 mp_ring_free(txq->r); 3598 3599 bzero(txq, sizeof(*txq)); 3600 return (0); 3601} 3602 3603static void 3604oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3605{ 3606 bus_addr_t *ba = arg; 3607 3608 KASSERT(nseg == 1, 3609 ("%s meant for single segment mappings only.", __func__)); 3610 3611 *ba = error ? 0 : segs->ds_addr; 3612} 3613 3614static inline void 3615ring_fl_db(struct adapter *sc, struct sge_fl *fl) 3616{ 3617 uint32_t n, v; 3618 3619 n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx); 3620 MPASS(n > 0); 3621 3622 wmb(); 3623 v = fl->dbval | V_PIDX(n); 3624 if (fl->udb) 3625 *fl->udb = htole32(v); 3626 else 3627 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v); 3628 IDXINCR(fl->dbidx, n, fl->sidx); 3629} 3630 3631/* 3632 * Fills up the freelist by allocating upto 'n' buffers. Buffers that are 3633 * recycled do not count towards this allocation budget. 3634 * 3635 * Returns non-zero to indicate that this freelist should be added to the list 3636 * of starving freelists. 3637 */ 3638static int 3639refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 3640{ 3641 __be64 *d; 3642 struct fl_sdesc *sd; 3643 uintptr_t pa; 3644 caddr_t cl; 3645 struct cluster_layout *cll; 3646 struct sw_zone_info *swz; 3647 struct cluster_metadata *clm; 3648 uint16_t max_pidx; 3649 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 3650 3651 FL_LOCK_ASSERT_OWNED(fl); 3652 3653 /* 3654 * We always stop at the begining of the hardware descriptor that's just 3655 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 3656 * which would mean an empty freelist to the chip. 3657 */ 3658 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 3659 if (fl->pidx == max_pidx * 8) 3660 return (0); 3661 3662 d = &fl->desc[fl->pidx]; 3663 sd = &fl->sdesc[fl->pidx]; 3664 cll = &fl->cll_def; /* default layout */ 3665 swz = &sc->sge.sw_zone_info[cll->zidx]; 3666 3667 while (n > 0) { 3668 3669 if (sd->cl != NULL) { 3670 3671 if (sd->nmbuf == 0) { 3672 /* 3673 * Fast recycle without involving any atomics on 3674 * the cluster's metadata (if the cluster has 3675 * metadata). This happens when all frames 3676 * received in the cluster were small enough to 3677 * fit within a single mbuf each. 3678 */ 3679 fl->cl_fast_recycled++; 3680#ifdef INVARIANTS 3681 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3682 if (clm != NULL) 3683 MPASS(clm->refcount == 1); 3684#endif 3685 goto recycled_fast; 3686 } 3687 3688 /* 3689 * Cluster is guaranteed to have metadata. Clusters 3690 * without metadata always take the fast recycle path 3691 * when they're recycled. 3692 */ 3693 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3694 MPASS(clm != NULL); 3695 3696 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3697 fl->cl_recycled++; 3698 counter_u64_add(extfree_rels, 1); 3699 goto recycled; 3700 } 3701 sd->cl = NULL; /* gave up my reference */ 3702 } 3703 MPASS(sd->cl == NULL); 3704alloc: 3705 cl = uma_zalloc(swz->zone, M_NOWAIT); 3706 if (__predict_false(cl == NULL)) { 3707 if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || 3708 fl->cll_def.zidx == fl->cll_alt.zidx) 3709 break; 3710 3711 /* fall back to the safe zone */ 3712 cll = &fl->cll_alt; 3713 swz = &sc->sge.sw_zone_info[cll->zidx]; 3714 goto alloc; 3715 } 3716 fl->cl_allocated++; 3717 n--; 3718 3719 pa = pmap_kextract((vm_offset_t)cl); 3720 pa += cll->region1; 3721 sd->cl = cl; 3722 sd->cll = *cll; 3723 *d = htobe64(pa | cll->hwidx); 3724 clm = cl_metadata(sc, fl, cll, cl); 3725 if (clm != NULL) { 3726recycled: 3727#ifdef INVARIANTS 3728 clm->sd = sd; 3729#endif 3730 clm->refcount = 1; 3731 } 3732 sd->nmbuf = 0; 3733recycled_fast: 3734 d++; 3735 sd++; 3736 if (__predict_false(++fl->pidx % 8 == 0)) { 3737 uint16_t pidx = fl->pidx / 8; 3738 3739 if (__predict_false(pidx == fl->sidx)) { 3740 fl->pidx = 0; 3741 pidx = 0; 3742 sd = fl->sdesc; 3743 d = fl->desc; 3744 } 3745 if (pidx == max_pidx) 3746 break; 3747 3748 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 3749 ring_fl_db(sc, fl); 3750 } 3751 } 3752 3753 if (fl->pidx / 8 != fl->dbidx) 3754 ring_fl_db(sc, fl); 3755 3756 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 3757} 3758 3759/* 3760 * Attempt to refill all starving freelists. 3761 */ 3762static void 3763refill_sfl(void *arg) 3764{ 3765 struct adapter *sc = arg; 3766 struct sge_fl *fl, *fl_temp; 3767 3768 mtx_lock(&sc->sfl_lock); 3769 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 3770 FL_LOCK(fl); 3771 refill_fl(sc, fl, 64); 3772 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 3773 TAILQ_REMOVE(&sc->sfl, fl, link); 3774 fl->flags &= ~FL_STARVING; 3775 } 3776 FL_UNLOCK(fl); 3777 } 3778 3779 if (!TAILQ_EMPTY(&sc->sfl)) 3780 callout_schedule(&sc->sfl_callout, hz / 5); 3781 mtx_unlock(&sc->sfl_lock); 3782} 3783 3784static int 3785alloc_fl_sdesc(struct sge_fl *fl) 3786{ 3787 3788 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 3789 M_ZERO | M_WAITOK); 3790 3791 return (0); 3792} 3793 3794static void 3795free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 3796{ 3797 struct fl_sdesc *sd; 3798 struct cluster_metadata *clm; 3799 struct cluster_layout *cll; 3800 int i; 3801 3802 sd = fl->sdesc; 3803 for (i = 0; i < fl->sidx * 8; i++, sd++) { 3804 if (sd->cl == NULL) 3805 continue; 3806 3807 cll = &sd->cll; 3808 clm = cl_metadata(sc, fl, cll, sd->cl); 3809 if (sd->nmbuf == 0) 3810 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3811 else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3812 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3813 counter_u64_add(extfree_rels, 1); 3814 } 3815 sd->cl = NULL; 3816 } 3817 3818 free(fl->sdesc, M_CXGBE); 3819 fl->sdesc = NULL; 3820} 3821 3822static inline void 3823get_pkt_gl(struct mbuf *m, struct sglist *gl) 3824{ 3825 int rc; 3826 3827 M_ASSERTPKTHDR(m); 3828 3829 sglist_reset(gl); 3830 rc = sglist_append_mbuf(gl, m); 3831 if (__predict_false(rc != 0)) { 3832 panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 3833 "with %d.", __func__, m, mbuf_nsegs(m), rc); 3834 } 3835 3836 KASSERT(gl->sg_nseg == mbuf_nsegs(m), 3837 ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 3838 mbuf_nsegs(m), gl->sg_nseg)); 3839 KASSERT(gl->sg_nseg > 0 && 3840 gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), 3841 ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 3842 gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); 3843} 3844 3845/* 3846 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 3847 */ 3848static inline u_int 3849txpkt_len16(u_int nsegs, u_int tso) 3850{ 3851 u_int n; 3852 3853 MPASS(nsegs > 0); 3854 3855 nsegs--; /* first segment is part of ulptx_sgl */ 3856 n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + 3857 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 3858 if (tso) 3859 n += sizeof(struct cpl_tx_pkt_lso_core); 3860 3861 return (howmany(n, 16)); 3862} 3863 3864/* 3865 * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 3866 * request header. 3867 */ 3868static inline u_int 3869txpkts0_len16(u_int nsegs) 3870{ 3871 u_int n; 3872 3873 MPASS(nsegs > 0); 3874 3875 nsegs--; /* first segment is part of ulptx_sgl */ 3876 n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 3877 sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 3878 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 3879 3880 return (howmany(n, 16)); 3881} 3882 3883/* 3884 * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 3885 * request header. 3886 */ 3887static inline u_int 3888txpkts1_len16(void) 3889{ 3890 u_int n; 3891 3892 n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 3893 3894 return (howmany(n, 16)); 3895} 3896 3897static inline u_int 3898imm_payload(u_int ndesc) 3899{ 3900 u_int n; 3901 3902 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 3903 sizeof(struct cpl_tx_pkt_core); 3904 3905 return (n); 3906} 3907 3908/* 3909 * Write a txpkt WR for this packet to the hardware descriptors, update the 3910 * software descriptor, and advance the pidx. It is guaranteed that enough 3911 * descriptors are available. 3912 * 3913 * The return value is the # of hardware descriptors used. 3914 */ 3915static u_int 3916write_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr, 3917 struct mbuf *m0, u_int available) 3918{ 3919 struct sge_eq *eq = &txq->eq; 3920 struct tx_sdesc *txsd; 3921 struct cpl_tx_pkt_core *cpl; 3922 uint32_t ctrl; /* used in many unrelated places */ 3923 uint64_t ctrl1; 3924 int len16, ndesc, pktlen, nsegs; 3925 caddr_t dst; 3926 3927 TXQ_LOCK_ASSERT_OWNED(txq); 3928 M_ASSERTPKTHDR(m0); 3929 MPASS(available > 0 && available < eq->sidx); 3930 3931 len16 = mbuf_len16(m0); 3932 nsegs = mbuf_nsegs(m0); 3933 pktlen = m0->m_pkthdr.len; 3934 ctrl = sizeof(struct cpl_tx_pkt_core); 3935 if (needs_tso(m0)) 3936 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 3937 else if (pktlen <= imm_payload(2) && available >= 2) { 3938 /* Immediate data. Recalculate len16 and set nsegs to 0. */ 3939 ctrl += pktlen; 3940 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 3941 sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 3942 nsegs = 0; 3943 } 3944 ndesc = howmany(len16, EQ_ESIZE / 16); 3945 MPASS(ndesc <= available); 3946 3947 /* Firmware work request header */ 3948 MPASS(wr == (void *)&eq->desc[eq->pidx]); 3949 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 3950 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 3951 3952 ctrl = V_FW_WR_LEN16(len16); 3953 wr->equiq_to_len16 = htobe32(ctrl); 3954 wr->r3 = 0; 3955 3956 if (needs_tso(m0)) { 3957 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 3958 3959 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 3960 m0->m_pkthdr.l4hlen > 0, 3961 ("%s: mbuf %p needs TSO but missing header lengths", 3962 __func__, m0)); 3963 3964 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 3965 F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) 3966 | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 3967 if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) 3968 ctrl |= V_LSO_ETHHDR_LEN(1); 3969 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 3970 ctrl |= F_LSO_IPV6; 3971 3972 lso->lso_ctrl = htobe32(ctrl); 3973 lso->ipid_ofst = htobe16(0); 3974 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 3975 lso->seqno_offset = htobe32(0); 3976 lso->len = htobe32(pktlen); 3977 3978 cpl = (void *)(lso + 1); 3979 3980 txq->tso_wrs++; 3981 } else 3982 cpl = (void *)(wr + 1); 3983 3984 /* Checksum offload */ 3985 ctrl1 = 0; 3986 if (needs_l3_csum(m0) == 0) 3987 ctrl1 |= F_TXPKT_IPCSUM_DIS; 3988 if (needs_l4_csum(m0) == 0) 3989 ctrl1 |= F_TXPKT_L4CSUM_DIS; 3990 if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 3991 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 3992 txq->txcsum++; /* some hardware assistance provided */ 3993 3994 /* VLAN tag insertion */ 3995 if (needs_vlan_insertion(m0)) { 3996 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 3997 txq->vlan_insertion++; 3998 } 3999 4000 /* CPL header */ 4001 cpl->ctrl0 = txq->cpl_ctrl0; 4002 cpl->pack = 0; 4003 cpl->len = htobe16(pktlen); 4004 cpl->ctrl1 = htobe64(ctrl1); 4005 4006 /* SGL */ 4007 dst = (void *)(cpl + 1); 4008 if (nsegs > 0) { 4009 4010 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 4011 txq->sgl_wrs++; 4012 } else { 4013 struct mbuf *m; 4014 4015 for (m = m0; m != NULL; m = m->m_next) { 4016 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 4017#ifdef INVARIANTS 4018 pktlen -= m->m_len; 4019#endif 4020 } 4021#ifdef INVARIANTS 4022 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 4023#endif 4024 txq->imm_wrs++; 4025 } 4026 4027 txq->txpkt_wrs++; 4028 4029 txsd = &txq->sdesc[eq->pidx]; 4030 txsd->m = m0; 4031 txsd->desc_used = ndesc; 4032 4033 return (ndesc); 4034} 4035 4036static int 4037try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available) 4038{ 4039 u_int needed, nsegs1, nsegs2, l1, l2; 4040 4041 if (cannot_use_txpkts(m) || cannot_use_txpkts(n)) 4042 return (1); 4043 4044 nsegs1 = mbuf_nsegs(m); 4045 nsegs2 = mbuf_nsegs(n); 4046 if (nsegs1 + nsegs2 == 2) { 4047 txp->wr_type = 1; 4048 l1 = l2 = txpkts1_len16(); 4049 } else { 4050 txp->wr_type = 0; 4051 l1 = txpkts0_len16(nsegs1); 4052 l2 = txpkts0_len16(nsegs2); 4053 } 4054 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2; 4055 needed = howmany(txp->len16, EQ_ESIZE / 16); 4056 if (needed > SGE_MAX_WR_NDESC || needed > available) 4057 return (1); 4058 4059 txp->plen = m->m_pkthdr.len + n->m_pkthdr.len; 4060 if (txp->plen > 65535) 4061 return (1); 4062 4063 txp->npkt = 2; 4064 set_mbuf_len16(m, l1); 4065 set_mbuf_len16(n, l2); 4066 4067 return (0); 4068} 4069 4070static int 4071add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available) 4072{ 4073 u_int plen, len16, needed, nsegs; 4074 4075 MPASS(txp->wr_type == 0 || txp->wr_type == 1); 4076 4077 nsegs = mbuf_nsegs(m); 4078 if (needs_tso(m) || (txp->wr_type == 1 && nsegs != 1)) 4079 return (1); 4080 4081 plen = txp->plen + m->m_pkthdr.len; 4082 if (plen > 65535) 4083 return (1); 4084 4085 if (txp->wr_type == 0) 4086 len16 = txpkts0_len16(nsegs); 4087 else 4088 len16 = txpkts1_len16(); 4089 needed = howmany(txp->len16 + len16, EQ_ESIZE / 16); 4090 if (needed > SGE_MAX_WR_NDESC || needed > available) 4091 return (1); 4092 4093 txp->npkt++; 4094 txp->plen = plen; 4095 txp->len16 += len16; 4096 set_mbuf_len16(m, len16); 4097 4098 return (0); 4099} 4100 4101/* 4102 * Write a txpkts WR for the packets in txp to the hardware descriptors, update 4103 * the software descriptor, and advance the pidx. It is guaranteed that enough 4104 * descriptors are available. 4105 * 4106 * The return value is the # of hardware descriptors used. 4107 */ 4108static u_int 4109write_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr, 4110 struct mbuf *m0, const struct txpkts *txp, u_int available) 4111{ 4112 struct sge_eq *eq = &txq->eq; 4113 struct tx_sdesc *txsd; 4114 struct cpl_tx_pkt_core *cpl; 4115 uint32_t ctrl; 4116 uint64_t ctrl1; 4117 int ndesc, checkwrap; 4118 struct mbuf *m; 4119 void *flitp; 4120 4121 TXQ_LOCK_ASSERT_OWNED(txq); 4122 MPASS(txp->npkt > 0); 4123 MPASS(txp->plen < 65536); 4124 MPASS(m0 != NULL); 4125 MPASS(m0->m_nextpkt != NULL); 4126 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 4127 MPASS(available > 0 && available < eq->sidx); 4128 4129 ndesc = howmany(txp->len16, EQ_ESIZE / 16); 4130 MPASS(ndesc <= available); 4131 4132 MPASS(wr == (void *)&eq->desc[eq->pidx]); 4133 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 4134 ctrl = V_FW_WR_LEN16(txp->len16); 4135 wr->equiq_to_len16 = htobe32(ctrl); 4136 wr->plen = htobe16(txp->plen); 4137 wr->npkt = txp->npkt; 4138 wr->r3 = 0; 4139 wr->type = txp->wr_type; 4140 flitp = wr + 1; 4141 4142 /* 4143 * At this point we are 16B into a hardware descriptor. If checkwrap is 4144 * set then we know the WR is going to wrap around somewhere. We'll 4145 * check for that at appropriate points. 4146 */ 4147 checkwrap = eq->sidx - ndesc < eq->pidx; 4148 for (m = m0; m != NULL; m = m->m_nextpkt) { 4149 if (txp->wr_type == 0) { 4150 struct ulp_txpkt *ulpmc; 4151 struct ulptx_idata *ulpsc; 4152 4153 /* ULP master command */ 4154 ulpmc = flitp; 4155 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 4156 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 4157 ulpmc->len = htobe32(mbuf_len16(m)); 4158 4159 /* ULP subcommand */ 4160 ulpsc = (void *)(ulpmc + 1); 4161 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 4162 F_ULP_TX_SC_MORE); 4163 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 4164 4165 cpl = (void *)(ulpsc + 1); 4166 if (checkwrap && 4167 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 4168 cpl = (void *)&eq->desc[0]; 4169 txq->txpkts0_pkts += txp->npkt; 4170 txq->txpkts0_wrs++; 4171 } else { 4172 cpl = flitp; 4173 txq->txpkts1_pkts += txp->npkt; 4174 txq->txpkts1_wrs++; 4175 } 4176 4177 /* Checksum offload */ 4178 ctrl1 = 0; 4179 if (needs_l3_csum(m) == 0) 4180 ctrl1 |= F_TXPKT_IPCSUM_DIS; 4181 if (needs_l4_csum(m) == 0) 4182 ctrl1 |= F_TXPKT_L4CSUM_DIS; 4183 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 4184 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 4185 txq->txcsum++; /* some hardware assistance provided */ 4186 4187 /* VLAN tag insertion */ 4188 if (needs_vlan_insertion(m)) { 4189 ctrl1 |= F_TXPKT_VLAN_VLD | 4190 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 4191 txq->vlan_insertion++; 4192 } 4193 4194 /* CPL header */ 4195 cpl->ctrl0 = txq->cpl_ctrl0; 4196 cpl->pack = 0; 4197 cpl->len = htobe16(m->m_pkthdr.len); 4198 cpl->ctrl1 = htobe64(ctrl1); 4199 4200 flitp = cpl + 1; 4201 if (checkwrap && 4202 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 4203 flitp = (void *)&eq->desc[0]; 4204 4205 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 4206 4207 } 4208 4209 txsd = &txq->sdesc[eq->pidx]; 4210 txsd->m = m0; 4211 txsd->desc_used = ndesc; 4212 4213 return (ndesc); 4214} 4215 4216/* 4217 * If the SGL ends on an address that is not 16 byte aligned, this function will 4218 * add a 0 filled flit at the end. 4219 */ 4220static void 4221write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 4222{ 4223 struct sge_eq *eq = &txq->eq; 4224 struct sglist *gl = txq->gl; 4225 struct sglist_seg *seg; 4226 __be64 *flitp, *wrap; 4227 struct ulptx_sgl *usgl; 4228 int i, nflits, nsegs; 4229 4230 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 4231 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 4232 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 4233 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 4234 4235 get_pkt_gl(m, gl); 4236 nsegs = gl->sg_nseg; 4237 MPASS(nsegs > 0); 4238 4239 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 4240 flitp = (__be64 *)(*to); 4241 wrap = (__be64 *)(&eq->desc[eq->sidx]); 4242 seg = &gl->sg_segs[0]; 4243 usgl = (void *)flitp; 4244 4245 /* 4246 * We start at a 16 byte boundary somewhere inside the tx descriptor 4247 * ring, so we're at least 16 bytes away from the status page. There is 4248 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 4249 */ 4250 4251 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 4252 V_ULPTX_NSGE(nsegs)); 4253 usgl->len0 = htobe32(seg->ss_len); 4254 usgl->addr0 = htobe64(seg->ss_paddr); 4255 seg++; 4256 4257 if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 4258 4259 /* Won't wrap around at all */ 4260 4261 for (i = 0; i < nsegs - 1; i++, seg++) { 4262 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 4263 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 4264 } 4265 if (i & 1) 4266 usgl->sge[i / 2].len[1] = htobe32(0); 4267 flitp += nflits; 4268 } else { 4269 4270 /* Will wrap somewhere in the rest of the SGL */ 4271 4272 /* 2 flits already written, write the rest flit by flit */ 4273 flitp = (void *)(usgl + 1); 4274 for (i = 0; i < nflits - 2; i++) { 4275 if (flitp == wrap) 4276 flitp = (void *)eq->desc; 4277 *flitp++ = get_flit(seg, nsegs - 1, i); 4278 } 4279 } 4280 4281 if (nflits & 1) { 4282 MPASS(((uintptr_t)flitp) & 0xf); 4283 *flitp++ = 0; 4284 } 4285 4286 MPASS((((uintptr_t)flitp) & 0xf) == 0); 4287 if (__predict_false(flitp == wrap)) 4288 *to = (void *)eq->desc; 4289 else 4290 *to = (void *)flitp; 4291} 4292 4293static inline void 4294copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 4295{ 4296 4297 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 4298 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 4299 4300 if (__predict_true((uintptr_t)(*to) + len <= 4301 (uintptr_t)&eq->desc[eq->sidx])) { 4302 bcopy(from, *to, len); 4303 (*to) += len; 4304 } else { 4305 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 4306 4307 bcopy(from, *to, portion); 4308 from += portion; 4309 portion = len - portion; /* remaining */ 4310 bcopy(from, (void *)eq->desc, portion); 4311 (*to) = (caddr_t)eq->desc + portion; 4312 } 4313} 4314 4315static inline void 4316ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 4317{ 4318 u_int db; 4319 4320 MPASS(n > 0); 4321 4322 db = eq->doorbells; 4323 if (n > 1) 4324 clrbit(&db, DOORBELL_WCWR); 4325 wmb(); 4326 4327 switch (ffs(db) - 1) { 4328 case DOORBELL_UDB: 4329 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 4330 break; 4331 4332 case DOORBELL_WCWR: { 4333 volatile uint64_t *dst, *src; 4334 int i; 4335 4336 /* 4337 * Queues whose 128B doorbell segment fits in the page do not 4338 * use relative qid (udb_qid is always 0). Only queues with 4339 * doorbell segments can do WCWR. 4340 */ 4341 KASSERT(eq->udb_qid == 0 && n == 1, 4342 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 4343 __func__, eq->doorbells, n, eq->dbidx, eq)); 4344 4345 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 4346 UDBS_DB_OFFSET); 4347 i = eq->dbidx; 4348 src = (void *)&eq->desc[i]; 4349 while (src != (void *)&eq->desc[i + 1]) 4350 *dst++ = *src++; 4351 wmb(); 4352 break; 4353 } 4354 4355 case DOORBELL_UDBWC: 4356 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 4357 wmb(); 4358 break; 4359 4360 case DOORBELL_KDB: 4361 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 4362 V_QID(eq->cntxt_id) | V_PIDX(n)); 4363 break; 4364 } 4365 4366 IDXINCR(eq->dbidx, n, eq->sidx); 4367} 4368 4369static inline u_int 4370reclaimable_tx_desc(struct sge_eq *eq) 4371{ 4372 uint16_t hw_cidx; 4373 4374 hw_cidx = read_hw_cidx(eq); 4375 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 4376} 4377 4378static inline u_int 4379total_available_tx_desc(struct sge_eq *eq) 4380{ 4381 uint16_t hw_cidx, pidx; 4382 4383 hw_cidx = read_hw_cidx(eq); 4384 pidx = eq->pidx; 4385 4386 if (pidx == hw_cidx) 4387 return (eq->sidx - 1); 4388 else 4389 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 4390} 4391 4392static inline uint16_t 4393read_hw_cidx(struct sge_eq *eq) 4394{ 4395 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4396 uint16_t cidx = spg->cidx; /* stable snapshot */ 4397 4398 return (be16toh(cidx)); 4399} 4400 4401/* 4402 * Reclaim 'n' descriptors approximately. 4403 */ 4404static u_int 4405reclaim_tx_descs(struct sge_txq *txq, u_int n) 4406{ 4407 struct tx_sdesc *txsd; 4408 struct sge_eq *eq = &txq->eq; 4409 u_int can_reclaim, reclaimed; 4410 4411 TXQ_LOCK_ASSERT_OWNED(txq); 4412 MPASS(n > 0); 4413 4414 reclaimed = 0; 4415 can_reclaim = reclaimable_tx_desc(eq); 4416 while (can_reclaim && reclaimed < n) { 4417 int ndesc; 4418 struct mbuf *m, *nextpkt; 4419 4420 txsd = &txq->sdesc[eq->cidx]; 4421 ndesc = txsd->desc_used; 4422 4423 /* Firmware doesn't return "partial" credits. */ 4424 KASSERT(can_reclaim >= ndesc, 4425 ("%s: unexpected number of credits: %d, %d", 4426 __func__, can_reclaim, ndesc)); 4427 4428 for (m = txsd->m; m != NULL; m = nextpkt) { 4429 nextpkt = m->m_nextpkt; 4430 m->m_nextpkt = NULL; 4431 m_freem(m); 4432 } 4433 reclaimed += ndesc; 4434 can_reclaim -= ndesc; 4435 IDXINCR(eq->cidx, ndesc, eq->sidx); 4436 } 4437 4438 return (reclaimed); 4439} 4440 4441static void 4442tx_reclaim(void *arg, int n) 4443{ 4444 struct sge_txq *txq = arg; 4445 struct sge_eq *eq = &txq->eq; 4446 4447 do { 4448 if (TXQ_TRYLOCK(txq) == 0) 4449 break; 4450 n = reclaim_tx_descs(txq, 32); 4451 if (eq->cidx == eq->pidx) 4452 eq->equeqidx = eq->pidx; 4453 TXQ_UNLOCK(txq); 4454 } while (n > 0); 4455} 4456 4457static __be64 4458get_flit(struct sglist_seg *segs, int nsegs, int idx) 4459{ 4460 int i = (idx / 3) * 2; 4461 4462 switch (idx % 3) { 4463 case 0: { 4464 __be64 rc; 4465 4466 rc = htobe32(segs[i].ss_len); 4467 if (i + 1 < nsegs) 4468 rc |= (uint64_t)htobe32(segs[i + 1].ss_len) << 32; 4469 4470 return (rc); 4471 } 4472 case 1: 4473 return (htobe64(segs[i].ss_paddr)); 4474 case 2: 4475 return (htobe64(segs[i + 1].ss_paddr)); 4476 } 4477 4478 return (0); 4479} 4480 4481static void 4482find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) 4483{ 4484 int8_t zidx, hwidx, idx; 4485 uint16_t region1, region3; 4486 int spare, spare_needed, n; 4487 struct sw_zone_info *swz; 4488 struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; 4489 4490 /* 4491 * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize 4492 * large enough for the max payload and cluster metadata. Otherwise 4493 * settle for the largest bufsize that leaves enough room in the cluster 4494 * for metadata. 4495 * 4496 * Without buffer packing: Look for the smallest zone which has a 4497 * bufsize large enough for the max payload. Settle for the largest 4498 * bufsize available if there's nothing big enough for max payload. 4499 */ 4500 spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; 4501 swz = &sc->sge.sw_zone_info[0]; 4502 hwidx = -1; 4503 for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { 4504 if (swz->size > largest_rx_cluster) { 4505 if (__predict_true(hwidx != -1)) 4506 break; 4507 4508 /* 4509 * This is a misconfiguration. largest_rx_cluster is 4510 * preventing us from finding a refill source. See 4511 * dev.t5nex.<n>.buffer_sizes to figure out why. 4512 */ 4513 device_printf(sc->dev, "largest_rx_cluster=%u leaves no" 4514 " refill source for fl %p (dma %u). Ignored.\n", 4515 largest_rx_cluster, fl, maxp); 4516 } 4517 for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { 4518 hwb = &hwb_list[idx]; 4519 spare = swz->size - hwb->size; 4520 if (spare < spare_needed) 4521 continue; 4522 4523 hwidx = idx; /* best option so far */ 4524 if (hwb->size >= maxp) { 4525 4526 if ((fl->flags & FL_BUF_PACKING) == 0) 4527 goto done; /* stop looking (not packing) */ 4528 4529 if (swz->size >= safest_rx_cluster) 4530 goto done; /* stop looking (packing) */ 4531 } 4532 break; /* keep looking, next zone */ 4533 } 4534 } 4535done: 4536 /* A usable hwidx has been located. */ 4537 MPASS(hwidx != -1); 4538 hwb = &hwb_list[hwidx]; 4539 zidx = hwb->zidx; 4540 swz = &sc->sge.sw_zone_info[zidx]; 4541 region1 = 0; 4542 region3 = swz->size - hwb->size; 4543 4544 /* 4545 * Stay within this zone and see if there is a better match when mbuf 4546 * inlining is allowed. Remember that the hwidx's are sorted in 4547 * decreasing order of size (so in increasing order of spare area). 4548 */ 4549 for (idx = hwidx; idx != -1; idx = hwb->next) { 4550 hwb = &hwb_list[idx]; 4551 spare = swz->size - hwb->size; 4552 4553 if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) 4554 break; 4555 4556 /* 4557 * Do not inline mbufs if doing so would violate the pad/pack 4558 * boundary alignment requirement. 4559 */ 4560 if (fl_pad && (MSIZE % sc->sge.pad_boundary) != 0) 4561 continue; 4562 if (fl->flags & FL_BUF_PACKING && 4563 (MSIZE % sc->sge.pack_boundary) != 0) 4564 continue; 4565 4566 if (spare < CL_METADATA_SIZE + MSIZE) 4567 continue; 4568 n = (spare - CL_METADATA_SIZE) / MSIZE; 4569 if (n > howmany(hwb->size, maxp)) 4570 break; 4571 4572 hwidx = idx; 4573 if (fl->flags & FL_BUF_PACKING) { 4574 region1 = n * MSIZE; 4575 region3 = spare - region1; 4576 } else { 4577 region1 = MSIZE; 4578 region3 = spare - region1; 4579 break; 4580 } 4581 } 4582 4583 KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, 4584 ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); 4585 KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, 4586 ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); 4587 KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == 4588 sc->sge.sw_zone_info[zidx].size, 4589 ("%s: bad buffer layout for fl %p, maxp %d. " 4590 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4591 sc->sge.sw_zone_info[zidx].size, region1, 4592 sc->sge.hw_buf_info[hwidx].size, region3)); 4593 if (fl->flags & FL_BUF_PACKING || region1 > 0) { 4594 KASSERT(region3 >= CL_METADATA_SIZE, 4595 ("%s: no room for metadata. fl %p, maxp %d; " 4596 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4597 sc->sge.sw_zone_info[zidx].size, region1, 4598 sc->sge.hw_buf_info[hwidx].size, region3)); 4599 KASSERT(region1 % MSIZE == 0, 4600 ("%s: bad mbuf region for fl %p, maxp %d. " 4601 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4602 sc->sge.sw_zone_info[zidx].size, region1, 4603 sc->sge.hw_buf_info[hwidx].size, region3)); 4604 } 4605 4606 fl->cll_def.zidx = zidx; 4607 fl->cll_def.hwidx = hwidx; 4608 fl->cll_def.region1 = region1; 4609 fl->cll_def.region3 = region3; 4610} 4611 4612static void 4613find_safe_refill_source(struct adapter *sc, struct sge_fl *fl) 4614{ 4615 struct sge *s = &sc->sge; 4616 struct hw_buf_info *hwb; 4617 struct sw_zone_info *swz; 4618 int spare; 4619 int8_t hwidx; 4620 4621 if (fl->flags & FL_BUF_PACKING) 4622 hwidx = s->safe_hwidx2; /* with room for metadata */ 4623 else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { 4624 hwidx = s->safe_hwidx2; 4625 hwb = &s->hw_buf_info[hwidx]; 4626 swz = &s->sw_zone_info[hwb->zidx]; 4627 spare = swz->size - hwb->size; 4628 4629 /* no good if there isn't room for an mbuf as well */ 4630 if (spare < CL_METADATA_SIZE + MSIZE) 4631 hwidx = s->safe_hwidx1; 4632 } else 4633 hwidx = s->safe_hwidx1; 4634 4635 if (hwidx == -1) { 4636 /* No fallback source */ 4637 fl->cll_alt.hwidx = -1; 4638 fl->cll_alt.zidx = -1; 4639 4640 return; 4641 } 4642 4643 hwb = &s->hw_buf_info[hwidx]; 4644 swz = &s->sw_zone_info[hwb->zidx]; 4645 spare = swz->size - hwb->size; 4646 fl->cll_alt.hwidx = hwidx; 4647 fl->cll_alt.zidx = hwb->zidx; 4648 if (allow_mbufs_in_cluster && 4649 (fl_pad == 0 || (MSIZE % sc->sge.pad_boundary) == 0)) 4650 fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; 4651 else 4652 fl->cll_alt.region1 = 0; 4653 fl->cll_alt.region3 = spare - fl->cll_alt.region1; 4654} 4655 4656static void 4657add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 4658{ 4659 mtx_lock(&sc->sfl_lock); 4660 FL_LOCK(fl); 4661 if ((fl->flags & FL_DOOMED) == 0) { 4662 fl->flags |= FL_STARVING; 4663 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 4664 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 4665 } 4666 FL_UNLOCK(fl); 4667 mtx_unlock(&sc->sfl_lock); 4668} 4669 4670static void 4671handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 4672{ 4673 struct sge_wrq *wrq = (void *)eq; 4674 4675 atomic_readandclear_int(&eq->equiq); 4676 taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); 4677} 4678 4679static void 4680handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 4681{ 4682 struct sge_txq *txq = (void *)eq; 4683 4684 MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); 4685 4686 atomic_readandclear_int(&eq->equiq); 4687 mp_ring_check_drainage(txq->r, 0); 4688 taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); 4689} 4690 4691static int 4692handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 4693 struct mbuf *m) 4694{ 4695 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 4696 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 4697 struct adapter *sc = iq->adapter; 4698 struct sge *s = &sc->sge; 4699 struct sge_eq *eq; 4700 static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 4701 &handle_wrq_egr_update, &handle_eth_egr_update, 4702 &handle_wrq_egr_update}; 4703 4704 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4705 rss->opcode)); 4706 4707 eq = s->eqmap[qid - s->eq_start]; 4708 (*h[eq->flags & EQ_TYPEMASK])(sc, eq); 4709 4710 return (0); 4711} 4712 4713/* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 4714CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 4715 offsetof(struct cpl_fw6_msg, data)); 4716 4717static int 4718handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4719{ 4720 struct adapter *sc = iq->adapter; 4721 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 4722 4723 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4724 rss->opcode)); 4725 4726 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 4727 const struct rss_header *rss2; 4728 4729 rss2 = (const struct rss_header *)&cpl->data[0]; 4730 return (sc->cpl_handler[rss2->opcode](iq, rss2, m)); 4731 } 4732 4733 return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0])); 4734} 4735 4736static int 4737sysctl_uint16(SYSCTL_HANDLER_ARGS) 4738{ 4739 uint16_t *id = arg1; 4740 int i = *id; 4741 4742 return sysctl_handle_int(oidp, &i, 0, req); 4743} 4744 4745static int 4746sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 4747{ 4748 struct sge *s = arg1; 4749 struct hw_buf_info *hwb = &s->hw_buf_info[0]; 4750 struct sw_zone_info *swz = &s->sw_zone_info[0]; 4751 int i, rc; 4752 struct sbuf sb; 4753 char c; 4754 4755 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 4756 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 4757 if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) 4758 c = '*'; 4759 else 4760 c = '\0'; 4761 4762 sbuf_printf(&sb, "%u%c ", hwb->size, c); 4763 } 4764 sbuf_trim(&sb); 4765 sbuf_finish(&sb); 4766 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 4767 sbuf_delete(&sb); 4768 return (rc); 4769} 4770