en_tx.c revision 324685
1/* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#define LINUXKPI_PARAM_PREFIX mlx4_ 35 36#include <linux/page.h> 37#include <linux/mlx4/cq.h> 38#include <linux/slab.h> 39#include <linux/mlx4/qp.h> 40#include <linux/if_vlan.h> 41#include <linux/vmalloc.h> 42#include <linux/moduleparam.h> 43 44#include <netinet/in_systm.h> 45#include <netinet/in.h> 46#include <netinet/if_ether.h> 47#include <netinet/ip.h> 48#include <netinet/ip6.h> 49#include <netinet/tcp.h> 50#include <netinet/tcp_lro.h> 51#include <netinet/udp.h> 52 53#include "mlx4_en.h" 54#include "utils.h" 55 56enum { 57 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 58 MAX_BF = 256, 59 MIN_PKT_LEN = 17, 60}; 61 62static int inline_thold __read_mostly = MAX_INLINE; 63 64module_param_named(inline_thold, inline_thold, uint, 0444); 65MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 66 67int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 68 struct mlx4_en_tx_ring **pring, u32 size, 69 u16 stride, int node, int queue_idx) 70{ 71 struct mlx4_en_dev *mdev = priv->mdev; 72 struct mlx4_en_tx_ring *ring; 73 uint32_t x; 74 int tmp; 75 int err; 76 77 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); 78 if (!ring) { 79 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); 80 if (!ring) { 81 en_err(priv, "Failed allocating TX ring\n"); 82 return -ENOMEM; 83 } 84 } 85 86 /* Create DMA descriptor TAG */ 87 if ((err = -bus_dma_tag_create( 88 bus_get_dma_tag(mdev->pdev->dev.bsddev), 89 1, /* any alignment */ 90 0, /* no boundary */ 91 BUS_SPACE_MAXADDR, /* lowaddr */ 92 BUS_SPACE_MAXADDR, /* highaddr */ 93 NULL, NULL, /* filter, filterarg */ 94 MLX4_EN_TX_MAX_PAYLOAD_SIZE, /* maxsize */ 95 MLX4_EN_TX_MAX_MBUF_FRAGS, /* nsegments */ 96 MLX4_EN_TX_MAX_MBUF_SIZE, /* maxsegsize */ 97 0, /* flags */ 98 NULL, NULL, /* lockfunc, lockfuncarg */ 99 &ring->dma_tag))) 100 goto done; 101 102 ring->size = size; 103 ring->size_mask = size - 1; 104 ring->stride = stride; 105 ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE)); 106 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); 107 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); 108 109 /* Allocate the buf ring */ 110 ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, 111 M_WAITOK, &ring->tx_lock.m); 112 if (ring->br == NULL) { 113 en_err(priv, "Failed allocating tx_info ring\n"); 114 err = -ENOMEM; 115 goto err_free_dma_tag; 116 } 117 118 tmp = size * sizeof(struct mlx4_en_tx_info); 119 ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node); 120 if (!ring->tx_info) { 121 ring->tx_info = kzalloc(tmp, GFP_KERNEL); 122 if (!ring->tx_info) { 123 err = -ENOMEM; 124 goto err_ring; 125 } 126 } 127 128 /* Create DMA descriptor MAPs */ 129 for (x = 0; x != size; x++) { 130 err = -bus_dmamap_create(ring->dma_tag, 0, 131 &ring->tx_info[x].dma_map); 132 if (err != 0) { 133 while (x--) { 134 bus_dmamap_destroy(ring->dma_tag, 135 ring->tx_info[x].dma_map); 136 } 137 goto err_info; 138 } 139 } 140 141 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 142 ring->tx_info, tmp); 143 144 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 145 146 /* Allocate HW buffers on provided NUMA node */ 147 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 148 2 * PAGE_SIZE); 149 if (err) { 150 en_err(priv, "Failed allocating hwq resources\n"); 151 goto err_dma_map; 152 } 153 154 err = mlx4_en_map_buffer(&ring->wqres.buf); 155 if (err) { 156 en_err(priv, "Failed to map TX buffer\n"); 157 goto err_hwq_res; 158 } 159 160 ring->buf = ring->wqres.buf.direct.buf; 161 162 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 163 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 164 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 165 166 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, 167 MLX4_RESERVE_BF_QP); 168 if (err) { 169 en_err(priv, "failed reserving qp for TX ring\n"); 170 goto err_map; 171 } 172 173 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 174 if (err) { 175 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 176 goto err_reserve; 177 } 178 ring->qp.event = mlx4_en_sqp_event; 179 180 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); 181 if (err) { 182 en_dbg(DRV, priv, "working without blueflame (%d)", err); 183 ring->bf.uar = &mdev->priv_uar; 184 ring->bf.uar->map = mdev->uar_map; 185 ring->bf_enabled = false; 186 } else 187 ring->bf_enabled = true; 188 ring->queue_index = queue_idx; 189 if (queue_idx < priv->num_tx_rings_p_up ) 190 CPU_SET(queue_idx, &ring->affinity_mask); 191 192 *pring = ring; 193 return 0; 194 195err_reserve: 196 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 197err_map: 198 mlx4_en_unmap_buffer(&ring->wqres.buf); 199err_hwq_res: 200 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 201err_dma_map: 202 for (x = 0; x != size; x++) 203 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); 204err_info: 205 vfree(ring->tx_info); 206err_ring: 207 buf_ring_free(ring->br, M_DEVBUF); 208err_free_dma_tag: 209 bus_dma_tag_destroy(ring->dma_tag); 210done: 211 kfree(ring); 212 return err; 213} 214 215void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 216 struct mlx4_en_tx_ring **pring) 217{ 218 struct mlx4_en_dev *mdev = priv->mdev; 219 struct mlx4_en_tx_ring *ring = *pring; 220 uint32_t x; 221 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 222 223 buf_ring_free(ring->br, M_DEVBUF); 224 if (ring->bf_enabled) 225 mlx4_bf_free(mdev->dev, &ring->bf); 226 mlx4_qp_remove(mdev->dev, &ring->qp); 227 mlx4_qp_free(mdev->dev, &ring->qp); 228 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); 229 mlx4_en_unmap_buffer(&ring->wqres.buf); 230 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 231 for (x = 0; x != ring->size; x++) 232 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); 233 vfree(ring->tx_info); 234 mtx_destroy(&ring->tx_lock.m); 235 mtx_destroy(&ring->comp_lock.m); 236 bus_dma_tag_destroy(ring->dma_tag); 237 kfree(ring); 238 *pring = NULL; 239} 240 241int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 242 struct mlx4_en_tx_ring *ring, 243 int cq, int user_prio) 244{ 245 struct mlx4_en_dev *mdev = priv->mdev; 246 int err; 247 248 ring->cqn = cq; 249 ring->prod = 0; 250 ring->cons = 0xffffffff; 251 ring->last_nr_txbb = 1; 252 ring->poll_cnt = 0; 253 ring->blocked = 0; 254 memset(ring->buf, 0, ring->buf_size); 255 256 ring->qp_state = MLX4_QP_STATE_RST; 257 ring->doorbell_qpn = ring->qp.qpn << 8; 258 259 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 260 ring->cqn, user_prio, &ring->context); 261 if (ring->bf_enabled) 262 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 263 264 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 265 &ring->qp, &ring->qp_state); 266 return err; 267} 268 269void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 270 struct mlx4_en_tx_ring *ring) 271{ 272 struct mlx4_en_dev *mdev = priv->mdev; 273 274 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, 275 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 276} 277 278static volatile struct mlx4_wqe_data_seg * 279mlx4_en_store_inline_lso_data(volatile struct mlx4_wqe_data_seg *dseg, 280 struct mbuf *mb, int len, __be32 owner_bit) 281{ 282 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 283 284 /* copy data into place */ 285 m_copydata(mb, 0, len, inl + 4); 286 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); 287 return (dseg); 288} 289 290static void 291mlx4_en_store_inline_lso_header(volatile struct mlx4_wqe_data_seg *dseg, 292 int len, __be32 owner_bit) 293{ 294} 295 296static void 297mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, 298 struct mlx4_en_tx_ring *ring, u32 index, u8 owner) 299{ 300 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; 301 struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *) 302 (ring->buf + (index * TXBB_SIZE)); 303 volatile __be32 *ptr = (__be32 *)tx_desc; 304 const __be32 stamp = cpu_to_be32(STAMP_VAL | 305 ((u32)owner << STAMP_SHIFT)); 306 u32 i; 307 308 /* Stamp the freed descriptor */ 309 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 310 *ptr = stamp; 311 ptr += STAMP_DWORDS; 312 } 313} 314 315static u32 316mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 317 struct mlx4_en_tx_ring *ring, u32 index) 318{ 319 struct mlx4_en_tx_info *tx_info; 320 struct mbuf *mb; 321 322 tx_info = &ring->tx_info[index]; 323 mb = tx_info->mb; 324 325 if (mb == NULL) 326 goto done; 327 328 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, 329 BUS_DMASYNC_POSTWRITE); 330 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); 331 332 m_freem(mb); 333done: 334 return (tx_info->nr_txbb); 335} 336 337int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) 338{ 339 struct mlx4_en_priv *priv = netdev_priv(dev); 340 int cnt = 0; 341 342 /* Skip last polled descriptor */ 343 ring->cons += ring->last_nr_txbb; 344 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 345 ring->cons, ring->prod); 346 347 if ((u32) (ring->prod - ring->cons) > ring->size) { 348 en_warn(priv, "Tx consumer passed producer!\n"); 349 return 0; 350 } 351 352 while (ring->cons != ring->prod) { 353 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, 354 ring->cons & ring->size_mask); 355 ring->cons += ring->last_nr_txbb; 356 cnt++; 357 } 358 359 if (cnt) 360 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 361 362 return cnt; 363} 364 365static bool 366mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring) 367{ 368 int wqs; 369 wqs = ring->size - (ring->prod - ring->cons); 370 return (wqs < (HEADROOM + (2 * MLX4_EN_TX_WQE_MAX_WQEBBS))); 371} 372 373static int mlx4_en_process_tx_cq(struct net_device *dev, 374 struct mlx4_en_cq *cq) 375{ 376 struct mlx4_en_priv *priv = netdev_priv(dev); 377 struct mlx4_cq *mcq = &cq->mcq; 378 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 379 struct mlx4_cqe *cqe; 380 u16 index; 381 u16 new_index, ring_index, stamp_index; 382 u32 txbbs_skipped = 0; 383 u32 txbbs_stamp = 0; 384 u32 cons_index = mcq->cons_index; 385 int size = cq->size; 386 u32 size_mask = ring->size_mask; 387 struct mlx4_cqe *buf = cq->buf; 388 int factor = priv->cqe_factor; 389 390 if (!priv->port_up) 391 return 0; 392 393 index = cons_index & size_mask; 394 cqe = &buf[(index << factor) + factor]; 395 ring_index = ring->cons & size_mask; 396 stamp_index = ring_index; 397 398 /* Process all completed CQEs */ 399 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 400 cons_index & size)) { 401 /* 402 * make sure we read the CQE after we read the 403 * ownership bit 404 */ 405 rmb(); 406 407 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 408 MLX4_CQE_OPCODE_ERROR)) { 409 en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n", 410 ((struct mlx4_err_cqe *)cqe)-> 411 vendor_err_syndrome, 412 ((struct mlx4_err_cqe *)cqe)->syndrome); 413 } 414 415 /* Skip over last polled CQE */ 416 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 417 418 do { 419 txbbs_skipped += ring->last_nr_txbb; 420 ring_index = (ring_index + ring->last_nr_txbb) & size_mask; 421 /* free next descriptor */ 422 ring->last_nr_txbb = mlx4_en_free_tx_desc( 423 priv, ring, ring_index); 424 mlx4_en_stamp_wqe(priv, ring, stamp_index, 425 !!((ring->cons + txbbs_stamp) & 426 ring->size)); 427 stamp_index = ring_index; 428 txbbs_stamp = txbbs_skipped; 429 } while (ring_index != new_index); 430 431 ++cons_index; 432 index = cons_index & size_mask; 433 cqe = &buf[(index << factor) + factor]; 434 } 435 436 437 /* 438 * To prevent CQ overflow we first update CQ consumer and only then 439 * the ring consumer. 440 */ 441 mcq->cons_index = cons_index; 442 mlx4_cq_set_ci(mcq); 443 wmb(); 444 ring->cons += txbbs_skipped; 445 446 /* Wakeup Tx queue if it was stopped and ring is not full */ 447 if (unlikely(ring->blocked) && !mlx4_en_tx_ring_is_full(ring)) { 448 ring->blocked = 0; 449 if (atomic_fetchadd_int(&priv->blocked, -1) == 1) 450 atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE); 451 ring->wake_queue++; 452 priv->port_stats.wake_queue++; 453 } 454 return (0); 455} 456 457void mlx4_en_tx_irq(struct mlx4_cq *mcq) 458{ 459 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 460 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 461 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 462 463 if (priv->port_up == 0 || !spin_trylock(&ring->comp_lock)) 464 return; 465 mlx4_en_process_tx_cq(cq->dev, cq); 466 mod_timer(&cq->timer, jiffies + 1); 467 spin_unlock(&ring->comp_lock); 468} 469 470void mlx4_en_poll_tx_cq(unsigned long data) 471{ 472 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; 473 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 474 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 475 u32 inflight; 476 477 INC_PERF_COUNTER(priv->pstats.tx_poll); 478 479 if (priv->port_up == 0) 480 return; 481 if (!spin_trylock(&ring->comp_lock)) { 482 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 483 return; 484 } 485 mlx4_en_process_tx_cq(cq->dev, cq); 486 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); 487 488 /* If there are still packets in flight and the timer has not already 489 * been scheduled by the Tx routine then schedule it here to guarantee 490 * completion processing of these packets */ 491 if (inflight && priv->port_up) 492 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 493 494 spin_unlock(&ring->comp_lock); 495} 496 497static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) 498{ 499 struct mlx4_en_cq *cq = priv->tx_cq[tx_ind]; 500 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; 501 502 if (priv->port_up == 0) 503 return; 504 505 /* If we don't have a pending timer, set one up to catch our recent 506 post in case the interface becomes idle */ 507 if (!timer_pending(&cq->timer)) 508 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 509 510 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 511 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 512 if (spin_trylock(&ring->comp_lock)) { 513 mlx4_en_process_tx_cq(priv->dev, cq); 514 spin_unlock(&ring->comp_lock); 515 } 516} 517 518static u16 519mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb) 520{ 521 u16 retval; 522 523 /* only copy from first fragment, if possible */ 524 retval = MIN(ring->inline_thold, mb->m_len); 525 526 /* check for too little data */ 527 if (unlikely(retval < MIN_PKT_LEN)) 528 retval = MIN(ring->inline_thold, mb->m_pkthdr.len); 529 return (retval); 530} 531 532static int 533mlx4_en_get_header_size(struct mbuf *mb) 534{ 535 struct ether_vlan_header *eh; 536 struct tcphdr *th; 537 struct ip *ip; 538 int ip_hlen, tcp_hlen; 539 struct ip6_hdr *ip6; 540 uint16_t eth_type; 541 int eth_hdr_len; 542 543 eh = mtod(mb, struct ether_vlan_header *); 544 if (mb->m_len < ETHER_HDR_LEN) 545 return (0); 546 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 547 eth_type = ntohs(eh->evl_proto); 548 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 549 } else { 550 eth_type = ntohs(eh->evl_encap_proto); 551 eth_hdr_len = ETHER_HDR_LEN; 552 } 553 if (mb->m_len < eth_hdr_len) 554 return (0); 555 switch (eth_type) { 556 case ETHERTYPE_IP: 557 ip = (struct ip *)(mb->m_data + eth_hdr_len); 558 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 559 return (0); 560 if (ip->ip_p != IPPROTO_TCP) 561 return (0); 562 ip_hlen = ip->ip_hl << 2; 563 eth_hdr_len += ip_hlen; 564 break; 565 case ETHERTYPE_IPV6: 566 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); 567 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 568 return (0); 569 if (ip6->ip6_nxt != IPPROTO_TCP) 570 return (0); 571 eth_hdr_len += sizeof(*ip6); 572 break; 573 default: 574 return (0); 575 } 576 if (mb->m_len < eth_hdr_len + sizeof(*th)) 577 return (0); 578 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); 579 tcp_hlen = th->th_off << 2; 580 eth_hdr_len += tcp_hlen; 581 if (mb->m_len < eth_hdr_len) 582 return (0); 583 return (eth_hdr_len); 584} 585 586static volatile struct mlx4_wqe_data_seg * 587mlx4_en_store_inline_data(volatile struct mlx4_wqe_data_seg *dseg, 588 struct mbuf *mb, int len, __be32 owner_bit) 589{ 590 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 591 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; 592 593 if (unlikely(len < MIN_PKT_LEN)) { 594 m_copydata(mb, 0, len, inl + 4); 595 memset(inl + 4 + len, 0, MIN_PKT_LEN - len); 596 dseg += DIV_ROUND_UP(4 + MIN_PKT_LEN, DS_SIZE_ALIGNMENT); 597 } else if (len <= spc) { 598 m_copydata(mb, 0, len, inl + 4); 599 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); 600 } else { 601 m_copydata(mb, 0, spc, inl + 4); 602 m_copydata(mb, spc, len - spc, inl + 8 + spc); 603 dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT); 604 } 605 return (dseg); 606} 607 608static void 609mlx4_en_store_inline_header(volatile struct mlx4_wqe_data_seg *dseg, 610 int len, __be32 owner_bit) 611{ 612 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 613 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; 614 615 if (unlikely(len < MIN_PKT_LEN)) { 616 *(volatile uint32_t *)inl = 617 SET_BYTE_COUNT((1 << 31) | MIN_PKT_LEN); 618 } else if (len <= spc) { 619 *(volatile uint32_t *)inl = 620 SET_BYTE_COUNT((1 << 31) | len); 621 } else { 622 *(volatile uint32_t *)(inl + 4 + spc) = 623 SET_BYTE_COUNT((1 << 31) | (len - spc)); 624 wmb(); 625 *(volatile uint32_t *)inl = 626 SET_BYTE_COUNT((1 << 31) | spc); 627 } 628} 629 630static unsigned long hashrandom; 631static void hashrandom_init(void *arg) 632{ 633 hashrandom = random(); 634} 635SYSINIT(hashrandom_init, SI_SUB_KLD, SI_ORDER_SECOND, &hashrandom_init, NULL); 636 637u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb) 638{ 639 struct mlx4_en_priv *priv = netdev_priv(dev); 640 u32 rings_p_up = priv->num_tx_rings_p_up; 641 u32 up = 0; 642 u32 queue_index; 643 644#if (MLX4_EN_NUM_UP > 1) 645 /* Obtain VLAN information if present */ 646 if (mb->m_flags & M_VLANTAG) { 647 u32 vlan_tag = mb->m_pkthdr.ether_vtag; 648 up = (vlan_tag >> 13) % MLX4_EN_NUM_UP; 649 } 650#endif 651 queue_index = mlx4_en_hashmbuf(MLX4_F_HASHL3 | MLX4_F_HASHL4, mb, hashrandom); 652 653 return ((queue_index % rings_p_up) + (up * rings_p_up)); 654} 655 656static void mlx4_bf_copy(void __iomem *dst, volatile unsigned long *src, unsigned bytecnt) 657{ 658 __iowrite64_copy(dst, __DEVOLATILE(void *, src), bytecnt / 8); 659} 660 661static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp) 662{ 663 enum { 664 DS_FACT = TXBB_SIZE / DS_SIZE_ALIGNMENT, 665 CTRL_FLAGS = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 666 MLX4_WQE_CTRL_SOLICITED), 667 }; 668 bus_dma_segment_t segs[MLX4_EN_TX_MAX_MBUF_FRAGS]; 669 volatile struct mlx4_wqe_data_seg *dseg; 670 volatile struct mlx4_wqe_data_seg *dseg_inline; 671 volatile struct mlx4_en_tx_desc *tx_desc; 672 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; 673 struct ifnet *ifp = priv->dev; 674 struct mlx4_en_tx_info *tx_info; 675 struct mbuf *mb = *mbp; 676 struct mbuf *m; 677 __be32 owner_bit; 678 int nr_segs; 679 int pad; 680 int err; 681 u32 bf_size; 682 u32 bf_prod; 683 u32 opcode; 684 u16 index; 685 u16 ds_cnt; 686 u16 ihs; 687 688 if (unlikely(!priv->port_up)) { 689 err = EINVAL; 690 goto tx_drop; 691 } 692 693 /* check if TX ring is full */ 694 if (unlikely(mlx4_en_tx_ring_is_full(ring))) { 695 /* every full native Tx ring stops queue */ 696 if (ring->blocked == 0) 697 atomic_add_int(&priv->blocked, 1); 698 /* Set HW-queue-is-full flag */ 699 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); 700 priv->port_stats.queue_stopped++; 701 ring->blocked = 1; 702 ring->queue_stopped++; 703 704 /* Use interrupts to find out when queue opened */ 705 mlx4_en_arm_cq(priv, priv->tx_cq[tx_ind]); 706 return (ENOBUFS); 707 } 708 709 /* sanity check we are not wrapping around */ 710 KASSERT(((~ring->prod) & ring->size_mask) >= 711 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring")); 712 713 /* Track current inflight packets for performance analysis */ 714 AVG_PERF_COUNTER(priv->pstats.inflight_avg, 715 (u32) (ring->prod - ring->cons - 1)); 716 717 /* Track current mbuf packet header length */ 718 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len); 719 720 /* Grab an index and try to transmit packet */ 721 owner_bit = (ring->prod & ring->size) ? 722 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0; 723 index = ring->prod & ring->size_mask; 724 tx_desc = (volatile struct mlx4_en_tx_desc *) 725 (ring->buf + index * TXBB_SIZE); 726 tx_info = &ring->tx_info[index]; 727 dseg = &tx_desc->data; 728 729 /* send a copy of the frame to the BPF listener, if any */ 730 if (ifp != NULL && ifp->if_bpf != NULL) 731 ETHER_BPF_MTAP(ifp, mb); 732 733 /* get default flags */ 734 tx_desc->ctrl.srcrb_flags = CTRL_FLAGS; 735 736 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) 737 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); 738 739 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | 740 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 741 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM); 742 743 /* do statistics */ 744 if (likely(tx_desc->ctrl.srcrb_flags != CTRL_FLAGS)) { 745 priv->port_stats.tx_chksum_offload++; 746 ring->tx_csum++; 747 } 748 749 /* check for VLAN tag */ 750 if (mb->m_flags & M_VLANTAG) { 751 tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag); 752 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN; 753 } else { 754 tx_desc->ctrl.vlan_tag = 0; 755 tx_desc->ctrl.ins_vlan = 0; 756 } 757 758 if (unlikely(mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)) { 759 /* 760 * Copy destination MAC address to WQE. This allows 761 * loopback in eSwitch, so that VFs and PF can 762 * communicate with each other: 763 */ 764 m_copydata(mb, 0, 2, __DEVOLATILE(void *, &tx_desc->ctrl.srcrb_flags16[0])); 765 m_copydata(mb, 2, 4, __DEVOLATILE(void *, &tx_desc->ctrl.imm)); 766 } else { 767 /* clear immediate field */ 768 tx_desc->ctrl.imm = 0; 769 } 770 771 /* Handle LSO (TSO) packets */ 772 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 773 u32 payload_len; 774 u32 mss = mb->m_pkthdr.tso_segsz; 775 u32 num_pkts; 776 777 opcode = cpu_to_be32(MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR) | 778 owner_bit; 779 ihs = mlx4_en_get_header_size(mb); 780 if (unlikely(ihs > MAX_INLINE)) { 781 ring->oversized_packets++; 782 err = EINVAL; 783 goto tx_drop; 784 } 785 tx_desc->lso.mss_hdr_size = cpu_to_be32((mss << 16) | ihs); 786 payload_len = mb->m_pkthdr.len - ihs; 787 if (unlikely(payload_len == 0)) 788 num_pkts = 1; 789 else 790 num_pkts = DIV_ROUND_UP(payload_len, mss); 791 ring->bytes += payload_len + (num_pkts * ihs); 792 ring->packets += num_pkts; 793 ring->tso_packets++; 794 /* store pointer to inline header */ 795 dseg_inline = dseg; 796 /* copy data inline */ 797 dseg = mlx4_en_store_inline_lso_data(dseg, 798 mb, ihs, owner_bit); 799 } else { 800 opcode = cpu_to_be32(MLX4_OPCODE_SEND) | 801 owner_bit; 802 ihs = mlx4_en_get_inline_hdr_size(ring, mb); 803 ring->bytes += max_t (unsigned int, 804 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 805 ring->packets++; 806 /* store pointer to inline header */ 807 dseg_inline = dseg; 808 /* copy data inline */ 809 dseg = mlx4_en_store_inline_data(dseg, 810 mb, ihs, owner_bit); 811 } 812 m_adj(mb, ihs); 813 814 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, 815 mb, segs, &nr_segs, BUS_DMA_NOWAIT); 816 if (unlikely(err == EFBIG)) { 817 /* Too many mbuf fragments */ 818 ring->defrag_attempts++; 819 m = m_defrag(mb, M_NOWAIT); 820 if (m == NULL) { 821 ring->oversized_packets++; 822 goto tx_drop; 823 } 824 mb = m; 825 /* Try again */ 826 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, 827 mb, segs, &nr_segs, BUS_DMA_NOWAIT); 828 } 829 /* catch errors */ 830 if (unlikely(err != 0)) { 831 ring->oversized_packets++; 832 goto tx_drop; 833 } 834 /* If there were no errors and we didn't load anything, don't sync. */ 835 if (nr_segs != 0) { 836 /* make sure all mbuf data is written to RAM */ 837 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, 838 BUS_DMASYNC_PREWRITE); 839 } else { 840 /* All data was inlined, free the mbuf. */ 841 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); 842 m_freem(mb); 843 mb = NULL; 844 } 845 846 /* compute number of DS needed */ 847 ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs; 848 849 /* 850 * Check if the next request can wrap around and fill the end 851 * of the current request with zero immediate data: 852 */ 853 pad = DIV_ROUND_UP(ds_cnt, DS_FACT); 854 pad = (~(ring->prod + pad)) & ring->size_mask; 855 856 if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) { 857 /* 858 * Compute the least number of DS blocks we need to 859 * pad in order to achieve a TX ring wraparound: 860 */ 861 pad = (DS_FACT * (pad + 1)); 862 } else { 863 /* 864 * The hardware will automatically jump to the next 865 * TXBB. No need for padding. 866 */ 867 pad = 0; 868 } 869 870 /* compute total number of DS blocks */ 871 ds_cnt += pad; 872 /* 873 * When modifying this code, please ensure that the following 874 * computation is always less than or equal to 0x3F: 875 * 876 * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) + 877 * (MLX4_EN_TX_WQE_MAX_WQEBBS * DS_FACT) 878 * 879 * Else the "ds_cnt" variable can become too big. 880 */ 881 tx_desc->ctrl.fence_size = (ds_cnt & 0x3f); 882 883 /* store pointer to mbuf */ 884 tx_info->mb = mb; 885 tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT); 886 bf_size = ds_cnt * DS_SIZE_ALIGNMENT; 887 bf_prod = ring->prod; 888 889 /* compute end of "dseg" array */ 890 dseg += nr_segs + pad; 891 892 /* pad using zero immediate dseg */ 893 while (pad--) { 894 dseg--; 895 dseg->addr = 0; 896 dseg->lkey = 0; 897 wmb(); 898 dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0); 899 } 900 901 /* fill segment list */ 902 while (nr_segs--) { 903 if (unlikely(segs[nr_segs].ds_len == 0)) { 904 dseg--; 905 dseg->addr = 0; 906 dseg->lkey = 0; 907 wmb(); 908 dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0); 909 } else { 910 dseg--; 911 dseg->addr = cpu_to_be64((uint64_t)segs[nr_segs].ds_addr); 912 dseg->lkey = cpu_to_be32(priv->mdev->mr.key); 913 wmb(); 914 dseg->byte_count = SET_BYTE_COUNT((uint32_t)segs[nr_segs].ds_len); 915 } 916 } 917 918 wmb(); 919 920 /* write owner bits in reverse order */ 921 if ((opcode & cpu_to_be32(0x1F)) == cpu_to_be32(MLX4_OPCODE_LSO)) 922 mlx4_en_store_inline_lso_header(dseg_inline, ihs, owner_bit); 923 else 924 mlx4_en_store_inline_header(dseg_inline, ihs, owner_bit); 925 926 /* update producer counter */ 927 ring->prod += tx_info->nr_txbb; 928 929 if (ring->bf_enabled && bf_size <= MAX_BF && 930 (tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_VLAN)) { 931 932 /* store doorbell number */ 933 *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 934 935 /* or in producer number for this WQE */ 936 opcode |= cpu_to_be32((bf_prod & 0xffff) << 8); 937 938 /* 939 * Ensure the new descriptor hits memory before 940 * setting ownership of this descriptor to HW: 941 */ 942 wmb(); 943 tx_desc->ctrl.owner_opcode = opcode; 944 wmb(); 945 mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset, 946 (volatile unsigned long *) &tx_desc->ctrl, bf_size); 947 wmb(); 948 ring->bf.offset ^= ring->bf.buf_size; 949 } else { 950 /* 951 * Ensure the new descriptor hits memory before 952 * setting ownership of this descriptor to HW: 953 */ 954 wmb(); 955 tx_desc->ctrl.owner_opcode = opcode; 956 wmb(); 957 writel(cpu_to_be32(ring->doorbell_qpn), 958 ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL); 959 } 960 961 return (0); 962tx_drop: 963 *mbp = NULL; 964 m_freem(mb); 965 return (err); 966} 967 968static int 969mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m) 970{ 971 struct mlx4_en_priv *priv = netdev_priv(dev); 972 struct mlx4_en_tx_ring *ring; 973 struct mbuf *next; 974 int enqueued, err = 0; 975 976 ring = priv->tx_ring[tx_ind]; 977 if ((dev->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 978 IFF_DRV_RUNNING || priv->port_up == 0) { 979 if (m != NULL) 980 err = drbr_enqueue(dev, ring->br, m); 981 return (err); 982 } 983 984 enqueued = 0; 985 if (m != NULL) 986 /* 987 * If we can't insert mbuf into drbr, try to xmit anyway. 988 * We keep the error we got so we could return that after xmit. 989 */ 990 err = drbr_enqueue(dev, ring->br, m); 991 992 /* Process the queue */ 993 while ((next = drbr_peek(dev, ring->br)) != NULL) { 994 if (mlx4_en_xmit(priv, tx_ind, &next) != 0) { 995 if (next == NULL) { 996 drbr_advance(dev, ring->br); 997 } else { 998 drbr_putback(dev, ring->br, next); 999 } 1000 break; 1001 } 1002 drbr_advance(dev, ring->br); 1003 enqueued++; 1004 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 1005 break; 1006 } 1007 1008 if (enqueued > 0) 1009 ring->watchdog_time = ticks; 1010 1011 return (err); 1012} 1013 1014void 1015mlx4_en_tx_que(void *context, int pending) 1016{ 1017 struct mlx4_en_tx_ring *ring; 1018 struct mlx4_en_priv *priv; 1019 struct net_device *dev; 1020 struct mlx4_en_cq *cq; 1021 int tx_ind; 1022 cq = context; 1023 dev = cq->dev; 1024 priv = dev->if_softc; 1025 tx_ind = cq->ring; 1026 ring = priv->tx_ring[tx_ind]; 1027 1028 if (priv->port_up != 0 && 1029 (dev->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1030 mlx4_en_xmit_poll(priv, tx_ind); 1031 spin_lock(&ring->tx_lock); 1032 if (!drbr_empty(dev, ring->br)) 1033 mlx4_en_transmit_locked(dev, tx_ind, NULL); 1034 spin_unlock(&ring->tx_lock); 1035 } 1036} 1037 1038int 1039mlx4_en_transmit(struct ifnet *dev, struct mbuf *m) 1040{ 1041 struct mlx4_en_priv *priv = netdev_priv(dev); 1042 struct mlx4_en_tx_ring *ring; 1043 struct mlx4_en_cq *cq; 1044 int i, err = 0; 1045 1046 if (priv->port_up == 0) { 1047 m_freem(m); 1048 return (ENETDOWN); 1049 } 1050 1051 /* Compute which queue to use */ 1052 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 1053 i = (m->m_pkthdr.flowid % 128) % priv->tx_ring_num; 1054 } 1055 else { 1056 i = mlx4_en_select_queue(dev, m); 1057 } 1058 1059 ring = priv->tx_ring[i]; 1060 if (spin_trylock(&ring->tx_lock)) { 1061 err = mlx4_en_transmit_locked(dev, i, m); 1062 spin_unlock(&ring->tx_lock); 1063 /* Poll CQ here */ 1064 mlx4_en_xmit_poll(priv, i); 1065 } else { 1066 err = drbr_enqueue(dev, ring->br, m); 1067 cq = priv->tx_cq[i]; 1068 taskqueue_enqueue(cq->tq, &cq->cq_task); 1069 } 1070 1071 return (err); 1072} 1073 1074/* 1075 * Flush ring buffers. 1076 */ 1077void 1078mlx4_en_qflush(struct ifnet *dev) 1079{ 1080 struct mlx4_en_priv *priv = netdev_priv(dev); 1081 struct mlx4_en_tx_ring *ring; 1082 struct mbuf *m; 1083 1084 if (priv->port_up == 0) 1085 return; 1086 1087 for (int i = 0; i < priv->tx_ring_num; i++) { 1088 ring = priv->tx_ring[i]; 1089 spin_lock(&ring->tx_lock); 1090 while ((m = buf_ring_dequeue_sc(ring->br)) != NULL) 1091 m_freem(m); 1092 spin_unlock(&ring->tx_lock); 1093 } 1094 if_qflush(dev); 1095} 1096