en_tx.c revision 292107
1/* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#include <linux/page.h> 35#include <linux/mlx4/cq.h> 36#include <linux/slab.h> 37#include <linux/mlx4/qp.h> 38#include <linux/if_vlan.h> 39#include <linux/vmalloc.h> 40#include <linux/moduleparam.h> 41 42#include <netinet/in_systm.h> 43#include <netinet/in.h> 44#include <netinet/if_ether.h> 45#include <netinet/ip.h> 46#include <netinet/ip6.h> 47#include <netinet/tcp.h> 48#include <netinet/tcp_lro.h> 49#include <netinet/udp.h> 50 51#include "mlx4_en.h" 52#include "utils.h" 53 54enum { 55 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 56 MAX_BF = 256, 57 MIN_PKT_LEN = 17, 58}; 59 60static int inline_thold __read_mostly = MAX_INLINE; 61 62module_param_named(inline_thold, inline_thold, uint, 0444); 63MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 64 65int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 66 struct mlx4_en_tx_ring **pring, u32 size, 67 u16 stride, int node, int queue_idx) 68{ 69 struct mlx4_en_dev *mdev = priv->mdev; 70 struct mlx4_en_tx_ring *ring; 71 uint32_t x; 72 int tmp; 73 int err; 74 75 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); 76 if (!ring) { 77 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); 78 if (!ring) { 79 en_err(priv, "Failed allocating TX ring\n"); 80 return -ENOMEM; 81 } 82 } 83 84 /* Create DMA descriptor TAG */ 85 if ((err = -bus_dma_tag_create( 86 bus_get_dma_tag(mdev->pdev->dev.bsddev), 87 1, /* any alignment */ 88 0, /* no boundary */ 89 BUS_SPACE_MAXADDR, /* lowaddr */ 90 BUS_SPACE_MAXADDR, /* highaddr */ 91 NULL, NULL, /* filter, filterarg */ 92 MLX4_EN_TX_MAX_PAYLOAD_SIZE, /* maxsize */ 93 MLX4_EN_TX_MAX_MBUF_FRAGS, /* nsegments */ 94 MLX4_EN_TX_MAX_MBUF_SIZE, /* maxsegsize */ 95 0, /* flags */ 96 NULL, NULL, /* lockfunc, lockfuncarg */ 97 &ring->dma_tag))) 98 goto done; 99 100 ring->size = size; 101 ring->size_mask = size - 1; 102 ring->stride = stride; 103 ring->inline_thold = MAX(MIN_PKT_LEN, MIN(inline_thold, MAX_INLINE)); 104 mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); 105 mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); 106 107 /* Allocate the buf ring */ 108 ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, 109 M_WAITOK, &ring->tx_lock.m); 110 if (ring->br == NULL) { 111 en_err(priv, "Failed allocating tx_info ring\n"); 112 err = -ENOMEM; 113 goto err_free_dma_tag; 114 } 115 116 tmp = size * sizeof(struct mlx4_en_tx_info); 117 ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node); 118 if (!ring->tx_info) { 119 ring->tx_info = kzalloc(tmp, GFP_KERNEL); 120 if (!ring->tx_info) { 121 err = -ENOMEM; 122 goto err_ring; 123 } 124 } 125 126 /* Create DMA descriptor MAPs */ 127 for (x = 0; x != size; x++) { 128 err = -bus_dmamap_create(ring->dma_tag, 0, 129 &ring->tx_info[x].dma_map); 130 if (err != 0) { 131 while (x--) { 132 bus_dmamap_destroy(ring->dma_tag, 133 ring->tx_info[x].dma_map); 134 } 135 goto err_info; 136 } 137 } 138 139 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 140 ring->tx_info, tmp); 141 142 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 143 144 /* Allocate HW buffers on provided NUMA node */ 145 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 146 2 * PAGE_SIZE); 147 if (err) { 148 en_err(priv, "Failed allocating hwq resources\n"); 149 goto err_dma_map; 150 } 151 152 err = mlx4_en_map_buffer(&ring->wqres.buf); 153 if (err) { 154 en_err(priv, "Failed to map TX buffer\n"); 155 goto err_hwq_res; 156 } 157 158 ring->buf = ring->wqres.buf.direct.buf; 159 160 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 161 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 162 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 163 164 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, 165 MLX4_RESERVE_BF_QP); 166 if (err) { 167 en_err(priv, "failed reserving qp for TX ring\n"); 168 goto err_map; 169 } 170 171 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 172 if (err) { 173 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 174 goto err_reserve; 175 } 176 ring->qp.event = mlx4_en_sqp_event; 177 178 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); 179 if (err) { 180 en_dbg(DRV, priv, "working without blueflame (%d)", err); 181 ring->bf.uar = &mdev->priv_uar; 182 ring->bf.uar->map = mdev->uar_map; 183 ring->bf_enabled = false; 184 } else 185 ring->bf_enabled = true; 186 ring->queue_index = queue_idx; 187 if (queue_idx < priv->num_tx_rings_p_up ) 188 CPU_SET(queue_idx, &ring->affinity_mask); 189 190 *pring = ring; 191 return 0; 192 193err_reserve: 194 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 195err_map: 196 mlx4_en_unmap_buffer(&ring->wqres.buf); 197err_hwq_res: 198 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 199err_dma_map: 200 for (x = 0; x != size; x++) 201 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); 202err_info: 203 vfree(ring->tx_info); 204err_ring: 205 buf_ring_free(ring->br, M_DEVBUF); 206err_free_dma_tag: 207 bus_dma_tag_destroy(ring->dma_tag); 208done: 209 kfree(ring); 210 return err; 211} 212 213void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 214 struct mlx4_en_tx_ring **pring) 215{ 216 struct mlx4_en_dev *mdev = priv->mdev; 217 struct mlx4_en_tx_ring *ring = *pring; 218 uint32_t x; 219 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 220 221 buf_ring_free(ring->br, M_DEVBUF); 222 if (ring->bf_enabled) 223 mlx4_bf_free(mdev->dev, &ring->bf); 224 mlx4_qp_remove(mdev->dev, &ring->qp); 225 mlx4_qp_free(mdev->dev, &ring->qp); 226 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); 227 mlx4_en_unmap_buffer(&ring->wqres.buf); 228 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 229 for (x = 0; x != ring->size; x++) 230 bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map); 231 vfree(ring->tx_info); 232 mtx_destroy(&ring->tx_lock.m); 233 mtx_destroy(&ring->comp_lock.m); 234 bus_dma_tag_destroy(ring->dma_tag); 235 kfree(ring); 236 *pring = NULL; 237} 238 239int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 240 struct mlx4_en_tx_ring *ring, 241 int cq, int user_prio) 242{ 243 struct mlx4_en_dev *mdev = priv->mdev; 244 int err; 245 246 ring->cqn = cq; 247 ring->prod = 0; 248 ring->cons = 0xffffffff; 249 ring->last_nr_txbb = 1; 250 ring->poll_cnt = 0; 251 ring->blocked = 0; 252 memset(ring->buf, 0, ring->buf_size); 253 254 ring->qp_state = MLX4_QP_STATE_RST; 255 ring->doorbell_qpn = ring->qp.qpn << 8; 256 257 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 258 ring->cqn, user_prio, &ring->context); 259 if (ring->bf_enabled) 260 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 261 262 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 263 &ring->qp, &ring->qp_state); 264 return err; 265} 266 267void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 268 struct mlx4_en_tx_ring *ring) 269{ 270 struct mlx4_en_dev *mdev = priv->mdev; 271 272 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, 273 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 274} 275 276static volatile struct mlx4_wqe_data_seg * 277mlx4_en_store_inline_lso_data(volatile struct mlx4_wqe_data_seg *dseg, 278 struct mbuf *mb, int len, __be32 owner_bit) 279{ 280 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 281 282 /* copy data into place */ 283 m_copydata(mb, 0, len, inl + 4); 284 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); 285 return (dseg); 286} 287 288static void 289mlx4_en_store_inline_lso_header(volatile struct mlx4_wqe_data_seg *dseg, 290 int len, __be32 owner_bit) 291{ 292} 293 294static void 295mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, 296 struct mlx4_en_tx_ring *ring, u32 index, u8 owner) 297{ 298 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; 299 struct mlx4_en_tx_desc *tx_desc = (struct mlx4_en_tx_desc *) 300 (ring->buf + (index * TXBB_SIZE)); 301 volatile __be32 *ptr = (__be32 *)tx_desc; 302 const __be32 stamp = cpu_to_be32(STAMP_VAL | 303 ((u32)owner << STAMP_SHIFT)); 304 u32 i; 305 306 /* Stamp the freed descriptor */ 307 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 308 *ptr = stamp; 309 ptr += STAMP_DWORDS; 310 } 311} 312 313static u32 314mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 315 struct mlx4_en_tx_ring *ring, u32 index) 316{ 317 struct mlx4_en_tx_info *tx_info; 318 struct mbuf *mb; 319 320 tx_info = &ring->tx_info[index]; 321 mb = tx_info->mb; 322 323 if (mb == NULL) 324 goto done; 325 326 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, 327 BUS_DMASYNC_POSTWRITE); 328 bus_dmamap_unload(ring->dma_tag, tx_info->dma_map); 329 330 m_freem(mb); 331done: 332 return (tx_info->nr_txbb); 333} 334 335int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) 336{ 337 struct mlx4_en_priv *priv = netdev_priv(dev); 338 int cnt = 0; 339 340 /* Skip last polled descriptor */ 341 ring->cons += ring->last_nr_txbb; 342 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 343 ring->cons, ring->prod); 344 345 if ((u32) (ring->prod - ring->cons) > ring->size) { 346 en_warn(priv, "Tx consumer passed producer!\n"); 347 return 0; 348 } 349 350 while (ring->cons != ring->prod) { 351 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, 352 ring->cons & ring->size_mask); 353 ring->cons += ring->last_nr_txbb; 354 cnt++; 355 } 356 357 if (cnt) 358 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 359 360 return cnt; 361} 362 363static bool 364mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring) 365{ 366 int wqs; 367 wqs = ring->size - (ring->prod - ring->cons); 368 return (wqs < (HEADROOM + (2 * MLX4_EN_TX_WQE_MAX_WQEBBS))); 369} 370 371static int mlx4_en_process_tx_cq(struct net_device *dev, 372 struct mlx4_en_cq *cq) 373{ 374 struct mlx4_en_priv *priv = netdev_priv(dev); 375 struct mlx4_cq *mcq = &cq->mcq; 376 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 377 struct mlx4_cqe *cqe; 378 u16 index; 379 u16 new_index, ring_index, stamp_index; 380 u32 txbbs_skipped = 0; 381 u32 txbbs_stamp = 0; 382 u32 cons_index = mcq->cons_index; 383 int size = cq->size; 384 u32 size_mask = ring->size_mask; 385 struct mlx4_cqe *buf = cq->buf; 386 int factor = priv->cqe_factor; 387 388 if (!priv->port_up) 389 return 0; 390 391 index = cons_index & size_mask; 392 cqe = &buf[(index << factor) + factor]; 393 ring_index = ring->cons & size_mask; 394 stamp_index = ring_index; 395 396 /* Process all completed CQEs */ 397 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 398 cons_index & size)) { 399 /* 400 * make sure we read the CQE after we read the 401 * ownership bit 402 */ 403 rmb(); 404 405 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 406 MLX4_CQE_OPCODE_ERROR)) { 407 en_err(priv, "CQE completed in error - vendor syndrom: 0x%x syndrom: 0x%x\n", 408 ((struct mlx4_err_cqe *)cqe)-> 409 vendor_err_syndrome, 410 ((struct mlx4_err_cqe *)cqe)->syndrome); 411 } 412 413 /* Skip over last polled CQE */ 414 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 415 416 do { 417 txbbs_skipped += ring->last_nr_txbb; 418 ring_index = (ring_index + ring->last_nr_txbb) & size_mask; 419 /* free next descriptor */ 420 ring->last_nr_txbb = mlx4_en_free_tx_desc( 421 priv, ring, ring_index); 422 mlx4_en_stamp_wqe(priv, ring, stamp_index, 423 !!((ring->cons + txbbs_stamp) & 424 ring->size)); 425 stamp_index = ring_index; 426 txbbs_stamp = txbbs_skipped; 427 } while (ring_index != new_index); 428 429 ++cons_index; 430 index = cons_index & size_mask; 431 cqe = &buf[(index << factor) + factor]; 432 } 433 434 435 /* 436 * To prevent CQ overflow we first update CQ consumer and only then 437 * the ring consumer. 438 */ 439 mcq->cons_index = cons_index; 440 mlx4_cq_set_ci(mcq); 441 wmb(); 442 ring->cons += txbbs_skipped; 443 444 /* Wakeup Tx queue if it was stopped and ring is not full */ 445 if (unlikely(ring->blocked) && !mlx4_en_tx_ring_is_full(ring)) { 446 ring->blocked = 0; 447 if (atomic_fetchadd_int(&priv->blocked, -1) == 1) 448 atomic_clear_int(&dev->if_drv_flags ,IFF_DRV_OACTIVE); 449 ring->wake_queue++; 450 priv->port_stats.wake_queue++; 451 } 452 return (0); 453} 454 455void mlx4_en_tx_irq(struct mlx4_cq *mcq) 456{ 457 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 458 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 459 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 460 461 if (!spin_trylock(&ring->comp_lock)) 462 return; 463 mlx4_en_process_tx_cq(cq->dev, cq); 464 mod_timer(&cq->timer, jiffies + 1); 465 spin_unlock(&ring->comp_lock); 466} 467 468void mlx4_en_poll_tx_cq(unsigned long data) 469{ 470 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; 471 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 472 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; 473 u32 inflight; 474 475 INC_PERF_COUNTER(priv->pstats.tx_poll); 476 477 if (!spin_trylock(&ring->comp_lock)) { 478 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 479 return; 480 } 481 mlx4_en_process_tx_cq(cq->dev, cq); 482 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); 483 484 /* If there are still packets in flight and the timer has not already 485 * been scheduled by the Tx routine then schedule it here to guarantee 486 * completion processing of these packets */ 487 if (inflight && priv->port_up) 488 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 489 490 spin_unlock(&ring->comp_lock); 491} 492 493static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) 494{ 495 struct mlx4_en_cq *cq = priv->tx_cq[tx_ind]; 496 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; 497 498 /* If we don't have a pending timer, set one up to catch our recent 499 post in case the interface becomes idle */ 500 if (!timer_pending(&cq->timer)) 501 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 502 503 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 504 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 505 if (spin_trylock(&ring->comp_lock)) { 506 mlx4_en_process_tx_cq(priv->dev, cq); 507 spin_unlock(&ring->comp_lock); 508 } 509} 510 511static u16 512mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb) 513{ 514 u16 retval; 515 516 /* only copy from first fragment, if possible */ 517 retval = MIN(ring->inline_thold, mb->m_len); 518 519 /* check for too little data */ 520 if (unlikely(retval < MIN_PKT_LEN)) 521 retval = MIN(ring->inline_thold, mb->m_pkthdr.len); 522 return (retval); 523} 524 525static int 526mlx4_en_get_header_size(struct mbuf *mb) 527{ 528 struct ether_vlan_header *eh; 529 struct tcphdr *th; 530 struct ip *ip; 531 int ip_hlen, tcp_hlen; 532 struct ip6_hdr *ip6; 533 uint16_t eth_type; 534 int eth_hdr_len; 535 536 eh = mtod(mb, struct ether_vlan_header *); 537 if (mb->m_len < ETHER_HDR_LEN) 538 return (0); 539 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 540 eth_type = ntohs(eh->evl_proto); 541 eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 542 } else { 543 eth_type = ntohs(eh->evl_encap_proto); 544 eth_hdr_len = ETHER_HDR_LEN; 545 } 546 if (mb->m_len < eth_hdr_len) 547 return (0); 548 switch (eth_type) { 549 case ETHERTYPE_IP: 550 ip = (struct ip *)(mb->m_data + eth_hdr_len); 551 if (mb->m_len < eth_hdr_len + sizeof(*ip)) 552 return (0); 553 if (ip->ip_p != IPPROTO_TCP) 554 return (0); 555 ip_hlen = ip->ip_hl << 2; 556 eth_hdr_len += ip_hlen; 557 break; 558 case ETHERTYPE_IPV6: 559 ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len); 560 if (mb->m_len < eth_hdr_len + sizeof(*ip6)) 561 return (0); 562 if (ip6->ip6_nxt != IPPROTO_TCP) 563 return (0); 564 eth_hdr_len += sizeof(*ip6); 565 break; 566 default: 567 return (0); 568 } 569 if (mb->m_len < eth_hdr_len + sizeof(*th)) 570 return (0); 571 th = (struct tcphdr *)(mb->m_data + eth_hdr_len); 572 tcp_hlen = th->th_off << 2; 573 eth_hdr_len += tcp_hlen; 574 if (mb->m_len < eth_hdr_len) 575 return (0); 576 return (eth_hdr_len); 577} 578 579static volatile struct mlx4_wqe_data_seg * 580mlx4_en_store_inline_data(volatile struct mlx4_wqe_data_seg *dseg, 581 struct mbuf *mb, int len, __be32 owner_bit) 582{ 583 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 584 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; 585 586 if (unlikely(len < MIN_PKT_LEN)) { 587 m_copydata(mb, 0, len, inl + 4); 588 memset(inl + 4 + len, 0, MIN_PKT_LEN - len); 589 dseg += DIV_ROUND_UP(4 + MIN_PKT_LEN, DS_SIZE_ALIGNMENT); 590 } else if (len <= spc) { 591 m_copydata(mb, 0, len, inl + 4); 592 dseg += DIV_ROUND_UP(4 + len, DS_SIZE_ALIGNMENT); 593 } else { 594 m_copydata(mb, 0, spc, inl + 4); 595 m_copydata(mb, spc, len - spc, inl + 8 + spc); 596 dseg += DIV_ROUND_UP(8 + len, DS_SIZE_ALIGNMENT); 597 } 598 return (dseg); 599} 600 601static void 602mlx4_en_store_inline_header(volatile struct mlx4_wqe_data_seg *dseg, 603 int len, __be32 owner_bit) 604{ 605 uint8_t *inl = __DEVOLATILE(uint8_t *, dseg); 606 const int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - 4; 607 608 if (unlikely(len < MIN_PKT_LEN)) { 609 *(volatile uint32_t *)inl = 610 SET_BYTE_COUNT((1 << 31) | MIN_PKT_LEN); 611 } else if (len <= spc) { 612 *(volatile uint32_t *)inl = 613 SET_BYTE_COUNT((1 << 31) | len); 614 } else { 615 *(volatile uint32_t *)(inl + 4 + spc) = 616 SET_BYTE_COUNT((1 << 31) | (len - spc)); 617 wmb(); 618 *(volatile uint32_t *)inl = 619 SET_BYTE_COUNT((1 << 31) | spc); 620 } 621} 622 623static unsigned long hashrandom; 624static void hashrandom_init(void *arg) 625{ 626 hashrandom = random(); 627} 628SYSINIT(hashrandom_init, SI_SUB_KLD, SI_ORDER_SECOND, &hashrandom_init, NULL); 629 630u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb) 631{ 632 struct mlx4_en_priv *priv = netdev_priv(dev); 633 u32 rings_p_up = priv->num_tx_rings_p_up; 634 u32 up = 0; 635 u32 queue_index; 636 637#if (MLX4_EN_NUM_UP > 1) 638 /* Obtain VLAN information if present */ 639 if (mb->m_flags & M_VLANTAG) { 640 u32 vlan_tag = mb->m_pkthdr.ether_vtag; 641 up = (vlan_tag >> 13) % MLX4_EN_NUM_UP; 642 } 643#endif 644 queue_index = mlx4_en_hashmbuf(MLX4_F_HASHL3 | MLX4_F_HASHL4, mb, hashrandom); 645 646 return ((queue_index % rings_p_up) + (up * rings_p_up)); 647} 648 649static void mlx4_bf_copy(void __iomem *dst, volatile unsigned long *src, unsigned bytecnt) 650{ 651 __iowrite64_copy(dst, __DEVOLATILE(void *, src), bytecnt / 8); 652} 653 654static u64 mlx4_en_mac_to_u64(u8 *addr) 655{ 656 u64 mac = 0; 657 int i; 658 659 for (i = 0; i < ETHER_ADDR_LEN; i++) { 660 mac <<= 8; 661 mac |= addr[i]; 662 } 663 return mac; 664} 665 666static int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp) 667{ 668 enum { 669 DS_FACT = TXBB_SIZE / DS_SIZE_ALIGNMENT, 670 CTRL_FLAGS = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 671 MLX4_WQE_CTRL_SOLICITED), 672 }; 673 bus_dma_segment_t segs[MLX4_EN_TX_MAX_MBUF_FRAGS]; 674 volatile struct mlx4_wqe_data_seg *dseg; 675 volatile struct mlx4_wqe_data_seg *dseg_inline; 676 volatile struct mlx4_en_tx_desc *tx_desc; 677 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; 678 struct ifnet *ifp = priv->dev; 679 struct mlx4_en_tx_info *tx_info; 680 struct mbuf *mb = *mbp; 681 struct mbuf *m; 682 __be32 owner_bit; 683 int nr_segs; 684 int pad; 685 int err; 686 u32 bf_size; 687 u32 bf_prod; 688 u32 opcode; 689 u16 index; 690 u16 ds_cnt; 691 u16 ihs; 692 693 if (unlikely(!priv->port_up)) { 694 err = EINVAL; 695 goto tx_drop; 696 } 697 698 /* check if TX ring is full */ 699 if (unlikely(mlx4_en_tx_ring_is_full(ring))) { 700 /* every full native Tx ring stops queue */ 701 if (ring->blocked == 0) 702 atomic_add_int(&priv->blocked, 1); 703 /* Set HW-queue-is-full flag */ 704 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); 705 priv->port_stats.queue_stopped++; 706 ring->blocked = 1; 707 priv->port_stats.queue_stopped++; 708 ring->queue_stopped++; 709 710 /* Use interrupts to find out when queue opened */ 711 mlx4_en_arm_cq(priv, priv->tx_cq[tx_ind]); 712 return (ENOBUFS); 713 } 714 715 /* sanity check we are not wrapping around */ 716 KASSERT(((~ring->prod) & ring->size_mask) >= 717 (MLX4_EN_TX_WQE_MAX_WQEBBS - 1), ("Wrapping around TX ring")); 718 719 /* Track current inflight packets for performance analysis */ 720 AVG_PERF_COUNTER(priv->pstats.inflight_avg, 721 (u32) (ring->prod - ring->cons - 1)); 722 723 /* Track current mbuf packet header length */ 724 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, mb->m_pkthdr.len); 725 726 /* Grab an index and try to transmit packet */ 727 owner_bit = (ring->prod & ring->size) ? 728 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0; 729 index = ring->prod & ring->size_mask; 730 tx_desc = (volatile struct mlx4_en_tx_desc *) 731 (ring->buf + index * TXBB_SIZE); 732 tx_info = &ring->tx_info[index]; 733 dseg = &tx_desc->data; 734 735 /* send a copy of the frame to the BPF listener, if any */ 736 if (ifp != NULL && ifp->if_bpf != NULL) 737 ETHER_BPF_MTAP(ifp, mb); 738 739 /* get default flags */ 740 tx_desc->ctrl.srcrb_flags = CTRL_FLAGS; 741 742 if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) 743 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); 744 745 if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | 746 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 747 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_TCP_UDP_CSUM); 748 749 /* do statistics */ 750 if (likely(tx_desc->ctrl.srcrb_flags != CTRL_FLAGS)) { 751 priv->port_stats.tx_chksum_offload++; 752 ring->tx_csum++; 753 } 754 755 /* check for VLAN tag */ 756 if (mb->m_flags & M_VLANTAG) { 757 tx_desc->ctrl.vlan_tag = cpu_to_be16(mb->m_pkthdr.ether_vtag); 758 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN; 759 } else { 760 tx_desc->ctrl.vlan_tag = 0; 761 tx_desc->ctrl.ins_vlan = 0; 762 } 763 764 /* clear immediate field */ 765 tx_desc->ctrl.imm = 0; 766 767 /* Handle LSO (TSO) packets */ 768 if (mb->m_pkthdr.csum_flags & CSUM_TSO) { 769 u32 payload_len; 770 u32 mss = mb->m_pkthdr.tso_segsz; 771 u32 num_pkts; 772 773 opcode = cpu_to_be32(MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR) | 774 owner_bit; 775 ihs = mlx4_en_get_header_size(mb); 776 if (unlikely(ihs > MAX_INLINE)) { 777 ring->oversized_packets++; 778 err = EINVAL; 779 goto tx_drop; 780 } 781 tx_desc->lso.mss_hdr_size = cpu_to_be32((mss << 16) | ihs); 782 payload_len = mb->m_pkthdr.len - ihs; 783 if (unlikely(payload_len == 0)) 784 num_pkts = 1; 785 else 786 num_pkts = DIV_ROUND_UP(payload_len, mss); 787 ring->bytes += payload_len + (num_pkts * ihs); 788 ring->packets += num_pkts; 789 priv->port_stats.tso_packets++; 790 /* store pointer to inline header */ 791 dseg_inline = dseg; 792 /* copy data inline */ 793 dseg = mlx4_en_store_inline_lso_data(dseg, 794 mb, ihs, owner_bit); 795 } else { 796 opcode = cpu_to_be32(MLX4_OPCODE_SEND) | 797 owner_bit; 798 ihs = mlx4_en_get_inline_hdr_size(ring, mb); 799 ring->bytes += max_t (unsigned int, 800 mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); 801 ring->packets++; 802 /* store pointer to inline header */ 803 dseg_inline = dseg; 804 /* copy data inline */ 805 dseg = mlx4_en_store_inline_data(dseg, 806 mb, ihs, owner_bit); 807 } 808 m_adj(mb, ihs); 809 810 /* trim off empty mbufs */ 811 while (mb->m_len == 0) { 812 mb = m_free(mb); 813 /* check if all data has been inlined */ 814 if (mb == NULL) { 815 nr_segs = 0; 816 goto skip_dma; 817 } 818 } 819 820 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, 821 mb, segs, &nr_segs, BUS_DMA_NOWAIT); 822 if (unlikely(err == EFBIG)) { 823 /* Too many mbuf fragments */ 824 m = m_defrag(mb, M_NOWAIT); 825 if (m == NULL) { 826 ring->oversized_packets++; 827 goto tx_drop; 828 } 829 mb = m; 830 /* Try again */ 831 err = bus_dmamap_load_mbuf_sg(ring->dma_tag, tx_info->dma_map, 832 mb, segs, &nr_segs, BUS_DMA_NOWAIT); 833 } 834 /* catch errors */ 835 if (unlikely(err != 0)) { 836 ring->oversized_packets++; 837 goto tx_drop; 838 } 839 /* make sure all mbuf data is written to RAM */ 840 bus_dmamap_sync(ring->dma_tag, tx_info->dma_map, 841 BUS_DMASYNC_PREWRITE); 842 843skip_dma: 844 /* compute number of DS needed */ 845 ds_cnt = (dseg - ((volatile struct mlx4_wqe_data_seg *)tx_desc)) + nr_segs; 846 847 /* 848 * Check if the next request can wrap around and fill the end 849 * of the current request with zero immediate data: 850 */ 851 pad = DIV_ROUND_UP(ds_cnt, DS_FACT); 852 pad = (~(ring->prod + pad)) & ring->size_mask; 853 854 if (unlikely(pad < (MLX4_EN_TX_WQE_MAX_WQEBBS - 1))) { 855 /* 856 * Compute the least number of DS blocks we need to 857 * pad in order to achieve a TX ring wraparound: 858 */ 859 pad = (DS_FACT * (pad + 1)); 860 } else { 861 /* 862 * The hardware will automatically jump to the next 863 * TXBB. No need for padding. 864 */ 865 pad = 0; 866 } 867 868 /* compute total number of DS blocks */ 869 ds_cnt += pad; 870 /* 871 * When modifying this code, please ensure that the following 872 * computation is always less than or equal to 0x3F: 873 * 874 * ((MLX4_EN_TX_WQE_MAX_WQEBBS - 1) * DS_FACT) + 875 * (MLX4_EN_TX_WQE_MAX_WQEBBS * DS_FACT) 876 * 877 * Else the "ds_cnt" variable can become too big. 878 */ 879 tx_desc->ctrl.fence_size = (ds_cnt & 0x3f); 880 881 /* store pointer to mbuf */ 882 tx_info->mb = mb; 883 tx_info->nr_txbb = DIV_ROUND_UP(ds_cnt, DS_FACT); 884 bf_size = ds_cnt * DS_SIZE_ALIGNMENT; 885 bf_prod = ring->prod; 886 887 /* compute end of "dseg" array */ 888 dseg += nr_segs + pad; 889 890 /* pad using zero immediate dseg */ 891 while (pad--) { 892 dseg--; 893 dseg->addr = 0; 894 dseg->lkey = 0; 895 wmb(); 896 dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0); 897 } 898 899 /* fill segment list */ 900 while (nr_segs--) { 901 if (unlikely(segs[nr_segs].ds_len == 0)) { 902 dseg--; 903 dseg->addr = 0; 904 dseg->lkey = 0; 905 wmb(); 906 dseg->byte_count = SET_BYTE_COUNT((1 << 31)|0); 907 } else { 908 dseg--; 909 dseg->addr = cpu_to_be64((uint64_t)segs[nr_segs].ds_addr); 910 dseg->lkey = cpu_to_be32(priv->mdev->mr.key); 911 wmb(); 912 dseg->byte_count = SET_BYTE_COUNT((uint32_t)segs[nr_segs].ds_len); 913 } 914 } 915 916 wmb(); 917 918 /* write owner bits in reverse order */ 919 if ((opcode & cpu_to_be32(0x1F)) == cpu_to_be32(MLX4_OPCODE_LSO)) 920 mlx4_en_store_inline_lso_header(dseg_inline, ihs, owner_bit); 921 else 922 mlx4_en_store_inline_header(dseg_inline, ihs, owner_bit); 923 924 if (unlikely(priv->validate_loopback)) { 925 /* Copy dst mac address to wqe */ 926 struct ether_header *ethh; 927 u64 mac; 928 u32 mac_l, mac_h; 929 930 ethh = mtod(mb, struct ether_header *); 931 mac = mlx4_en_mac_to_u64(ethh->ether_dhost); 932 if (mac) { 933 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); 934 mac_l = (u32) (mac & 0xffffffff); 935 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); 936 tx_desc->ctrl.imm = cpu_to_be32(mac_l); 937 } 938 } 939 940 /* update producer counter */ 941 ring->prod += tx_info->nr_txbb; 942 943 if (ring->bf_enabled && bf_size <= MAX_BF && 944 (tx_desc->ctrl.ins_vlan != MLX4_WQE_CTRL_INS_VLAN)) { 945 946 /* store doorbell number */ 947 *(volatile __be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 948 949 /* or in producer number for this WQE */ 950 opcode |= cpu_to_be32((bf_prod & 0xffff) << 8); 951 952 /* 953 * Ensure the new descriptor hits memory before 954 * setting ownership of this descriptor to HW: 955 */ 956 wmb(); 957 tx_desc->ctrl.owner_opcode = opcode; 958 wmb(); 959 mlx4_bf_copy(((u8 *)ring->bf.reg) + ring->bf.offset, 960 (volatile unsigned long *) &tx_desc->ctrl, bf_size); 961 wmb(); 962 ring->bf.offset ^= ring->bf.buf_size; 963 } else { 964 /* 965 * Ensure the new descriptor hits memory before 966 * setting ownership of this descriptor to HW: 967 */ 968 wmb(); 969 tx_desc->ctrl.owner_opcode = opcode; 970 wmb(); 971 writel(cpu_to_be32(ring->doorbell_qpn), 972 ((u8 *)ring->bf.uar->map) + MLX4_SEND_DOORBELL); 973 } 974 975 return (0); 976tx_drop: 977 *mbp = NULL; 978 m_freem(mb); 979 return (err); 980} 981 982static int 983mlx4_en_transmit_locked(struct ifnet *dev, int tx_ind, struct mbuf *m) 984{ 985 struct mlx4_en_priv *priv = netdev_priv(dev); 986 struct mlx4_en_tx_ring *ring; 987 struct mbuf *next; 988 int enqueued, err = 0; 989 990 ring = priv->tx_ring[tx_ind]; 991 if ((dev->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 992 IFF_DRV_RUNNING || priv->port_up == 0) { 993 if (m != NULL) 994 err = drbr_enqueue(dev, ring->br, m); 995 return (err); 996 } 997 998 enqueued = 0; 999 if (m != NULL) 1000 /* 1001 * If we can't insert mbuf into drbr, try to xmit anyway. 1002 * We keep the error we got so we could return that after xmit. 1003 */ 1004 err = drbr_enqueue(dev, ring->br, m); 1005 1006 /* Process the queue */ 1007 while ((next = drbr_peek(dev, ring->br)) != NULL) { 1008 if (mlx4_en_xmit(priv, tx_ind, &next) != 0) { 1009 if (next == NULL) { 1010 drbr_advance(dev, ring->br); 1011 } else { 1012 drbr_putback(dev, ring->br, next); 1013 } 1014 break; 1015 } 1016 drbr_advance(dev, ring->br); 1017 enqueued++; 1018 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) 1019 break; 1020 } 1021 1022 if (enqueued > 0) 1023 ring->watchdog_time = ticks; 1024 1025 return (err); 1026} 1027 1028void 1029mlx4_en_tx_que(void *context, int pending) 1030{ 1031 struct mlx4_en_tx_ring *ring; 1032 struct mlx4_en_priv *priv; 1033 struct net_device *dev; 1034 struct mlx4_en_cq *cq; 1035 int tx_ind; 1036 cq = context; 1037 dev = cq->dev; 1038 priv = dev->if_softc; 1039 tx_ind = cq->ring; 1040 ring = priv->tx_ring[tx_ind]; 1041 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1042 mlx4_en_xmit_poll(priv, tx_ind); 1043 spin_lock(&ring->tx_lock); 1044 if (!drbr_empty(dev, ring->br)) 1045 mlx4_en_transmit_locked(dev, tx_ind, NULL); 1046 spin_unlock(&ring->tx_lock); 1047 } 1048} 1049 1050int 1051mlx4_en_transmit(struct ifnet *dev, struct mbuf *m) 1052{ 1053 struct mlx4_en_priv *priv = netdev_priv(dev); 1054 struct mlx4_en_tx_ring *ring; 1055 struct mlx4_en_cq *cq; 1056 int i, err = 0; 1057 1058 /* Compute which queue to use */ 1059 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 1060 i = m->m_pkthdr.flowid % priv->tx_ring_num; 1061 } 1062 else { 1063 i = mlx4_en_select_queue(dev, m); 1064 } 1065 1066 ring = priv->tx_ring[i]; 1067 if (spin_trylock(&ring->tx_lock)) { 1068 err = mlx4_en_transmit_locked(dev, i, m); 1069 spin_unlock(&ring->tx_lock); 1070 /* Poll CQ here */ 1071 mlx4_en_xmit_poll(priv, i); 1072 } else { 1073 err = drbr_enqueue(dev, ring->br, m); 1074 cq = priv->tx_cq[i]; 1075 taskqueue_enqueue(cq->tq, &cq->cq_task); 1076 } 1077 1078 return (err); 1079} 1080 1081/* 1082 * Flush ring buffers. 1083 */ 1084void 1085mlx4_en_qflush(struct ifnet *dev) 1086{ 1087 struct mlx4_en_priv *priv = netdev_priv(dev); 1088 struct mlx4_en_tx_ring *ring; 1089 struct mbuf *m; 1090 1091 for (int i = 0; i < priv->tx_ring_num; i++) { 1092 ring = priv->tx_ring[i]; 1093 spin_lock(&ring->tx_lock); 1094 while ((m = buf_ring_dequeue_sc(ring->br)) != NULL) 1095 m_freem(m); 1096 spin_unlock(&ring->tx_lock); 1097 } 1098 if_qflush(dev); 1099} 1100